diff --git a/.github/actions/quay/action.yml b/.github/actions/quay/action.yml new file mode 100644 index 0000000..377b2ae --- /dev/null +++ b/.github/actions/quay/action.yml @@ -0,0 +1,60 @@ +name: 'Run command in Quay image' +description: 'Run a command in a quay.io container' +inputs: + image: + description: 'Image' + required: true + options: + description: 'Options for the container' + required: false + run: + description: 'Args for the container entrypoint' + required: false + entrypoint: + description: 'Container entrypoint to use' + required: false +runs: + using: 'composite' + steps: + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ env.QUAY_USER }} + password: ${{ env.QUAY_PASSWORD }} + - name: Run command + shell: bash + run: | + export ENTRYPOINT='${{ inputs.entrypoint }}' + export COMMAND='${{ inputs.run }}' + export COMMAND_PREFIX='' + + # Prep for a shell script. + if [[ $ENTRYPOINT == '' ]]; then + ENTRYPOINT='sh' + COMMAND_PREFIX='-c' + COMMAND=$(echo "$COMMAND" | sed -r '/^\s*$/d') + COMMAND=${COMMAND//$'\n'/ ; } + fi + + # Strip newlines in options. + export OPTIONS='${{ inputs.options }}' + OPTIONS=${OPTIONS//$'\n'/ } + + # Sanitize the image name. + export IMAGE='${{ inputs.image }}' + IMAGE=${IMAGE//$'\n'/} + + # Prep the workspace. + export WORKSPACE='${{ github.workspace }}' + WORKSPACE=${WORKSPACE//$'\n'/} + + # Prep volumes. + export VOLUMES="-v /var/run/docker.sock:/var/run/docker.sock -v $WORKSPACE:/workspace" + + # Run the command in docker. + if [[ $COMMAND == *\;* ]]; then + docker run $VOLUMES $OPTIONS --entrypoint=$ENTRYPOINT $IMAGE $COMMAND_PREFIX "$COMMAND" + else + docker run $VOLUMES $OPTIONS --entrypoint=$ENTRYPOINT $IMAGE $COMMAND_PREFIX $COMMAND + fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b596da0 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,166 @@ +name: CI +on: + pull_request: + branches: + - main + push: + branches: + - main + tags: + - v* + + # Allows to run this via the Actions tab + workflow_dispatch: + +env: + REGISTRY: quay.io + +jobs: + lint: + permissions: + # Required: allow read access to the content for analysis. + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + pull-requests: read + # Optional: Allow write access to checks to allow the action to annotate code in the PR. + checks: write + name: Linter + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v5 + - uses: actions/setup-go@v5 + with: + go-version: "1.25.x" + cache: false + - name: golangci-lint + uses: golangci/golangci-lint-action@v8 + with: + # Require: The version of golangci-lint to use. + # When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version. + # When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit. + version: v2.5.0 + + check-license: + name: License scan + runs-on: ubuntu-latest + timeout-minutes: 5 + env: + REPORT_FILE: gl-license-scanning-report.json + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.25.x" + - name: License scanning + run: | + go install github.com/google/go-licenses@latest + go-licenses check . --disallowed_types=forbidden,restricted + - name: Generate license report + run: | + go-licenses report . > licenses.csv + - name: Save license scan report + uses: actions/upload-artifact@v4 + with: + name: license_scanning + path: licenses.csv + + check-go-releaser: + name: Go releaser check + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.25.x" + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v5 + with: + # either 'goreleaser' (default) or 'goreleaser-pro' + distribution: goreleaser + # 'latest', 'nightly', or a semver + version: v1.25.1 + args: check + + test: + name: Tests + needs: + - lint + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.25.x" + - name: Run tests + run: go test -v ./... + + publish: + name: Publish the release + if: ${{ github.ref_type == 'tag' }} + needs: + - test + runs-on: ubuntu-latest + timeout-minutes: 15 + env: + TAG: ${{ github.ref_name }} + S3_BUCKET: "s3://cli-dl.stackstate.com/stackstate-backup-cli/" + QUAY_USER: ${{ secrets.QUAY_USER }} + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.25.x" + - name: Log in to the Container registry + uses: docker/login-action@v3 + if: github.event_name != 'pull_request' + with: + registry: quay.io + username: ${{ secrets.QUAY_USER }} + password: ${{ secrets.QUAY_PASSWORD }} + - name: Log in to the Container registry + uses: docker/login-action@v3 + if: github.event_name != 'pull_request' + with: + registry: docker.io + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Go releaser publish + uses: goreleaser/goreleaser-action@v5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + with: + # either 'goreleaser' (default) or 'goreleaser-pro' + distribution: goreleaser + # 'latest', 'nightly', or a semver + version: v1.25.1 + args: release + - name: Write latest version to file + run: mkdir -p dist && echo "${{ env.TAG }}" > dist/LATEST_VERSION + + - name: Authenticate with AWS + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + + - name: Publish latest version to S3 + run: | + echo "aws s3 cp dist/LATEST_VERSION ${{ env.S3_BUCKET }}" + aws s3 cp dist/LATEST_VERSION ${{ env.S3_BUCKET }} + + - name: Publish installers to S3 + run: | + echo "aws s3 cp scripts/publish/installers/ ${{ env.S3_BUCKET }} --recursive" + aws s3 cp scripts/publish/installers/ ${{ env.S3_BUCKET }} --recursive diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8b88e38 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +bin/ +dist/ +.idea/ +sts-backup +stackstate-backup-cli + +sts-toolbox.yaml +values.yaml + +.vscode/launch.json + +__debug_bin + +pkged.go +vendor/ +result + +*.swp + +.go/ +.gocache/ +release-notes.md +release-notes.json + +.localdev/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..63f93a9 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,79 @@ +version: "2" +run: + concurrency: 4 + issues-exit-code: 1 + tests: true +linters: + default: none + enable: + - copyloopvar + - dogsled + - dupl + - errcheck + - exhaustive + - funlen + - goconst + - gocritic + - gocyclo + - goprintffuncname + - govet + - ineffassign + - lll + - misspell + - mnd + - nakedret + - noctx + - nolintlint + - rowserrcheck + - staticcheck + - unconvert + - unparam + - unused + - whitespace + - gosec + - bodyclose + - depguard + - revive + settings: + depguard: + rules: + main: + list-mode: lax + allow: + - $gostd + - github.com/stackvista + funlen: + lines: 100 + statements: 60 + lll: + line-length: 250 + mnd: + checks: + - argument + - case + - condition + - return + nolintlint: + require-specific: true + allow-unused: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 0000000..740d362 --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,67 @@ +project_name: stackstate-backup-cli +release: + github: + owner: stackvista + name: stackstate-backup-cli + name_template: '{{.Tag}}' +before: + hooks: + - go mod download +builds: +- id: stackstate-backup-cli + targets: + - darwin_amd64 + - darwin_arm64 + - linux_amd64 + - linux_arm64 + - windows_amd64 + main: . + ldflags: + - -s -w -X github.com/stackvista/stackstate-backup-cli/cmd/version/version.Version={{.Version}} + -X github.com/stackvista/stackstate-backup-cli/cmd/version/version.Commit={{.Commit}} + -X github.com/stackvista/stackstate-backup-cli/cmd/version/version.BuildDate={{.Date}} + binary: sts-backup + env: + - CGO_ENABLED=0 + - GO111MODULE=on +archives: +- id: stackstate-backup-cli + builds: + - stackstate-backup-cli + name_template: '{{ .ProjectName }}-{{ .Version }}.{{ .Os }}-{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end }}' + format: tar.gz + format_overrides: + - goos: windows + format: zip +blobs: + - provider: s3 + bucket: cli-dl.stackstate.com + ids: + - stackstate-backup-cli +dockers: +- image_templates: + - "stackstate/stackstate-backup-cli:{{ .Version }}-x86_64" + ids: + - stackstate-backup-cli + use: buildx + dockerfile: docker/Dockerfile.goreleaser + build_flag_templates: + - "--platform=linux/amd64" +- image_templates: + - "stackstate/stackstate-backup-cli:{{ .Version }}-arm64v8" + ids: + - stackstate-backup-cli + use: buildx + goarch: arm64 + dockerfile: docker/Dockerfile.goreleaser + build_flag_templates: + - "--platform=linux/arm64/v8" +docker_manifests: +- name_template: stackstate/stackstate-backup-cli:{{ .Version }} + image_templates: + - stackstate/stackstate-backup-cli:{{ .Version }}-x86_64 + - stackstate/stackstate-backup-cli:{{ .Version }}-arm64v8 +- name_template: stackstate/stackstate-backup-cli:latest + image_templates: + - stackstate/stackstate-backup-cli:{{ .Version }}-x86_64 + - stackstate/stackstate-backup-cli:{{ .Version }}-arm64v8 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..967d4a3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,16 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: +# - id: trailing-whitespace +# - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files +- repo: https://github.com/dnephin/pre-commit-golang + rev: v0.5.0 + hooks: +# - id: go-fmt + # NOTE: This is pretty slow, so it's disabled by default in favour of running lint during CI. + - id: golangci-lint diff --git a/README.md b/README.md new file mode 100644 index 0000000..6430add --- /dev/null +++ b/README.md @@ -0,0 +1,199 @@ +# StackState Backup CLI + +A command-line tool for managing backups and restores for SUSE Observability platform running on Kubernetes. + +## Overview + +This CLI tool replaces the legacy Bash-based backup/restore scripts with a single Go binary that can be run from an operator host. It uses Kubernetes port-forwarding to connect to services and automatically discovers configuration from ConfigMaps and Secrets. + +**Current Support:** Elasticsearch snapshots and restores +**Planned:** VictoriaMetrics, ClickHouse, StackGraph, Configuration backups + +## Installation + +Download pre-built binaries from the [releases page](https://github.com/stackvista/stackstate-backup-cli/releases). + +### Building from Source + +```bash +go build -o sts-backup -ldflags '-s -w -X github.com/stackvista/stackstate-backup-cli/cmd/version.Version=0.0.1 -X github.com/stackvista/stackstate-backup-cli/cmd/version.Commit=abce -X github.com/stackvista/stackstate-backup-cli/cmd/version.Date=2025-10-15' +``` + +## Usage + +```bash +sts-backup [command] [subcommand] [flags] +``` + +### Global Flags + +- `--namespace` - Kubernetes namespace (required) +- `--kubeconfig` - Path to kubeconfig file (default: ~/.kube/config) +- `--configmap` - ConfigMap name containing backup configuration (default: suse-observability-backup-config) +- `--secret` - Secret name containing backup credentials (default: suse-observability-backup-config) +- `--output, -o` - Output format: table, json (default: table) +- `--quiet, -q` - Suppress operational messages +- `--debug` - Enable debug output + +## Commands + +### version + +Display version information. + +```bash +sts-backup version +``` + +### elasticsearch + +Manage Elasticsearch snapshots and restores. + +#### configure + +Configure Elasticsearch snapshot repository and SLM policy. + +```bash +sts-backup elasticsearch configure --namespace +``` + +#### list-indices + +List Elasticsearch indices. + +```bash +sts-backup elasticsearch list-indices --namespace +``` + +#### list-snapshots + +List available Elasticsearch snapshots. + +```bash +sts-backup elasticsearch list-snapshots --namespace +``` + +#### restore-snapshot + +Restore Elasticsearch snapshot. + +```bash +sts-backup elasticsearch restore-snapshot --namespace --snapshot-name [flags] +``` + +**Flags:** +- `--snapshot-name` - Name of snapshot to restore (required) +- `--drop-all-indices` - Delete all existing indices before restore +- `--yes` - Skip confirmation prompt + +## Configuration + +The CLI uses configuration from Kubernetes ConfigMaps and Secrets with the following precedence: + +1. CLI flags (highest priority) +2. Environment variables (prefix: `BACKUP_TOOL_`) +3. Kubernetes Secret (overrides sensitive fields) +4. Kubernetes ConfigMap (base configuration) +5. Defaults (lowest priority) + +### Example Configuration + +Create a ConfigMap with the following structure: + +```yaml +elasticsearch: + snapshotRepository: + name: sts-backup + bucket: sts-elasticsearch-backup + endpoint: suse-observability-minio:9000 + basepath: "" + + slm: + name: auto-sts-backup + schedule: "0 0 3 * * ?" + snapshotTemplateName: "" + repository: sts-backup + indices: "sts*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 30 + + service: + name: suse-observability-elasticsearch-master-headless + port: 9200 + localPortForwardPort: 9200 + + restore: + repository: sts-backup + scaleDownLabelSelector: "observability.suse.com/scalable-during-es-restore=true" + indexPrefix: sts + datastreamIndexPrefix: .ds-sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: sts*,.ds-sts_k8s_logs* +``` + +Apply to Kubernetes: + +```bash +kubectl create configmap suse-observability-backup-config \ + --from-file=config=config.yaml \ + -n +``` + +For sensitive credentials, create a Secret with S3/Minio access keys: + +```bash +kubectl create secret generic suse-observability-backup-config \ + --from-literal=elasticsearch.snapshotRepository.accessKey= \ + --from-literal=elasticsearch.snapshotRepository.secretKey= \ + -n +``` + +See [internal/config/testdata/validConfigMapConfig.yaml](internal/config/testdata/validConfigMapConfig.yaml) for a complete example. + +## Project Structure + +``` +. +├── cmd/ # CLI commands +│ ├── root.go # Root command and flag definitions +│ ├── version/ # Version command +│ └── elasticsearch/ # Elasticsearch subcommands +│ ├── configure.go # Configure snapshot repository +│ ├── list-indices.go # List indices +│ ├── list-snapshots.go # List snapshots +│ └── restore-snapshot.go # Restore snapshot +├── internal/ # Internal packages +│ ├── config/ # Configuration loading and validation +│ ├── elasticsearch/ # Elasticsearch client +│ ├── k8s/ # Kubernetes client utilities +│ ├── logger/ # Structured logging +│ └── output/ # Output formatting (table, JSON) +└── main.go # Entry point +``` + +## CI/CD + +This project uses GitHub Actions and GoReleaser for automated releases: + +1. Push a new tag (e.g., `v1.0.0`) +2. GitHub Actions automatically builds binaries for multiple platforms +3. GoReleaser creates a GitHub release and uploads artifacts to S3 + +## Development + +### Running Tests + +```bash +go test ./... +``` + +### Linting + +```bash +golangci-lint run --config=.golangci.yml ./... +``` + +## License + +Copyright (c) 2025 SUSE diff --git a/cmd/elasticsearch/configure.go b/cmd/elasticsearch/configure.go new file mode 100644 index 0000000..bc43e2b --- /dev/null +++ b/cmd/elasticsearch/configure.go @@ -0,0 +1,108 @@ +package elasticsearch + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/cmd/portforward" + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stackvista/stackstate-backup-cli/internal/k8s" + "github.com/stackvista/stackstate-backup-cli/internal/logger" +) + +func configureCmd(cliCtx *config.Context) *cobra.Command { + return &cobra.Command{ + Use: "configure", + Short: "Configure Elasticsearch snapshot repository and SLM policy", + Long: `Configure Elasticsearch snapshot repository and Snapshot Lifecycle Management (SLM) policy for automated backups.`, + Run: func(_ *cobra.Command, _ []string) { + if err := runConfigure(cliCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }, + } +} + +func runConfigure(cliCtx *config.Context) error { + // Create logger + log := logger.New(cliCtx.Config.Quiet, cliCtx.Config.Debug) + + // Create Kubernetes client + k8sClient, err := k8s.NewClient(cliCtx.Config.Kubeconfig, cliCtx.Config.Debug) + if err != nil { + return fmt.Errorf("failed to create Kubernetes client: %w", err) + } + + // Load configuration + cfg, err := config.LoadConfig(k8sClient.Clientset(), cliCtx.Config.Namespace, cliCtx.Config.ConfigMapName, cliCtx.Config.SecretName) + if err != nil { + return fmt.Errorf("failed to load configuration: %w", err) + } + + // Validate required configuration + if cfg.Elasticsearch.SnapshotRepository.AccessKey == "" || cfg.Elasticsearch.SnapshotRepository.SecretKey == "" { + return fmt.Errorf("accessKey and secretKey are required in the secret configuration") + } + + // Setup port-forward to Elasticsearch + serviceName := cfg.Elasticsearch.Service.Name + localPort := cfg.Elasticsearch.Service.LocalPortForwardPort + remotePort := cfg.Elasticsearch.Service.Port + + pf, err := portforward.SetupPortForward(k8sClient, cliCtx.Config.Namespace, serviceName, localPort, remotePort, log) + if err != nil { + return err + } + defer close(pf.StopChan) + + // Create Elasticsearch client + esClient, err := elasticsearch.NewClient(fmt.Sprintf("http://localhost:%d", pf.LocalPort)) + if err != nil { + return fmt.Errorf("failed to create Elasticsearch client: %w", err) + } + + // Configure snapshot repository + repo := cfg.Elasticsearch.SnapshotRepository + log.Infof("Configuring snapshot repository '%s' (bucket: %s)...", repo.Name, repo.Bucket) + + err = esClient.ConfigureSnapshotRepository( + repo.Name, + repo.Bucket, + repo.Endpoint, + repo.BasePath, + repo.AccessKey, + repo.SecretKey, + ) + if err != nil { + return fmt.Errorf("failed to configure snapshot repository: %w", err) + } + + log.Successf("Snapshot repository configured successfully") + + // Configure SLM policy + slm := cfg.Elasticsearch.SLM + log.Infof("Configuring SLM policy '%s'...", slm.Name) + + err = esClient.ConfigureSLMPolicy( + slm.Name, + slm.Schedule, + slm.SnapshotTemplateName, + slm.Repository, + slm.Indices, + slm.RetentionExpireAfter, + slm.RetentionMinCount, + slm.RetentionMaxCount, + ) + if err != nil { + return fmt.Errorf("failed to configure SLM policy: %w", err) + } + + log.Successf("SLM policy configured successfully") + log.Println() + log.Successf("Configuration completed successfully") + + return nil +} diff --git a/cmd/elasticsearch/configure_test.go b/cmd/elasticsearch/configure_test.go new file mode 100644 index 0000000..b79b303 --- /dev/null +++ b/cmd/elasticsearch/configure_test.go @@ -0,0 +1,394 @@ +package elasticsearch + +import ( + "context" + "fmt" + "testing" + + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +// mockESClientForConfigure is a mock for testing configure command +type mockESClientForConfigure struct { + configureRepoErr error + configureSLMErr error + repoConfigured bool + slmConfigured bool + lastRepoConfig map[string]string + lastSLMConfig map[string]interface{} +} + +func (m *mockESClientForConfigure) ConfigureSnapshotRepository(name, bucket, endpoint, basePath, accessKey, secretKey string) error { + if m.configureRepoErr != nil { + return m.configureRepoErr + } + m.repoConfigured = true + m.lastRepoConfig = map[string]string{ + "name": name, + "bucket": bucket, + "endpoint": endpoint, + "basePath": basePath, + "accessKey": accessKey, + "secretKey": secretKey, + } + return nil +} + +func (m *mockESClientForConfigure) ConfigureSLMPolicy(name, schedule, snapshotName, repository, indices, expireAfter string, minCount, maxCount int) error { + if m.configureSLMErr != nil { + return m.configureSLMErr + } + m.slmConfigured = true + m.lastSLMConfig = map[string]interface{}{ + "name": name, + "schedule": schedule, + "snapshotName": snapshotName, + "repository": repository, + "indices": indices, + "expireAfter": expireAfter, + "minCount": minCount, + "maxCount": maxCount, + } + return nil +} + +func (m *mockESClientForConfigure) ListSnapshots(_ string) ([]elasticsearch.Snapshot, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) GetSnapshot(_, _ string) (*elasticsearch.Snapshot, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) ListIndices(_ string) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) ListIndicesDetailed() ([]elasticsearch.IndexInfo, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) DeleteIndex(_ string) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) IndexExists(_ string) (bool, error) { + return false, fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) RestoreSnapshot(_, _, _ string, _ bool) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForConfigure) RolloverDatastream(_ string) error { + return fmt.Errorf("not implemented") +} + +// TestConfigureCmd_Unit tests the command structure +func TestConfigureCmd_Unit(t *testing.T) { + cliCtx := config.NewContext() + cliCtx.Config.Namespace = testNamespace + cliCtx.Config.ConfigMapName = testConfigMapName + cliCtx.Config.SecretName = testSecretName + + cmd := configureCmd(cliCtx) + + // Test command metadata + assert.Equal(t, "configure", cmd.Use) + assert.Equal(t, "Configure Elasticsearch snapshot repository and SLM policy", cmd.Short) + assert.NotEmpty(t, cmd.Long) + assert.NotNil(t, cmd.Run) +} + +// TestConfigureCmd_Integration tests the integration with Kubernetes client +// +//nolint:funlen +func TestConfigureCmd_Integration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + tests := []struct { + name string + configData string + secretData string + expectError bool + errorContains string + }{ + { + name: "successful configuration with complete data", + configData: ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: minio:9000 + basepath: snapshots + accessKey: test-key + secretKey: test-secret + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +`, + secretData: "", + expectError: false, + }, + { + name: "missing credentials in config", + configData: ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: minio:9000 + basepath: snapshots + accessKey: "" + secretKey: "" + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +`, + secretData: ` +elasticsearch: + snapshotRepository: + accessKey: secret-key + secretKey: secret-value +`, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Create ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMapName, + Namespace: testNamespace, + }, + Data: map[string]string{ + "config": tt.configData, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps(testNamespace).Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create Secret if provided + if tt.secretData != "" { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSecretName, + Namespace: testNamespace, + }, + Data: map[string][]byte{ + "config": []byte(tt.secretData), + }, + } + _, err := fakeClient.CoreV1().Secrets(testNamespace).Create( + context.Background(), secret, metav1.CreateOptions{}, + ) + require.NoError(t, err) + } + + // Test that config loading works + secretName := "" + if tt.secretData != "" { + secretName = testSecretName + } + cfg, err := config.LoadConfig(fakeClient, testNamespace, testConfigMapName, secretName) + + if tt.expectError { + assert.Error(t, err) + if tt.errorContains != "" { + assert.Contains(t, err.Error(), tt.errorContains) + } + } else { + require.NoError(t, err) + assert.NotNil(t, cfg) + assert.NotEmpty(t, cfg.Elasticsearch.SnapshotRepository.AccessKey) + assert.NotEmpty(t, cfg.Elasticsearch.SnapshotRepository.SecretKey) + } + }) + } +} + +// TestMockESClientForConfigure demonstrates mock usage for configure +func TestMockESClientForConfigure(t *testing.T) { + tests := []struct { + name string + configureRepoErr error + configureSLMErr error + expectRepoOK bool + expectSLMOK bool + }{ + { + name: "successful configuration", + configureRepoErr: nil, + configureSLMErr: nil, + expectRepoOK: true, + expectSLMOK: true, + }, + { + name: "repository configuration fails", + configureRepoErr: fmt.Errorf("repository creation failed"), + configureSLMErr: nil, + expectRepoOK: false, + expectSLMOK: false, + }, + { + name: "SLM configuration fails", + configureRepoErr: nil, + configureSLMErr: fmt.Errorf("SLM policy creation failed"), + expectRepoOK: true, + expectSLMOK: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := &mockESClientForConfigure{ + configureRepoErr: tt.configureRepoErr, + configureSLMErr: tt.configureSLMErr, + } + + // Configure repository + err := mockClient.ConfigureSnapshotRepository( + "backup-repo", + "backup-bucket", + "minio:9000", + "snapshots", + "access-key", + "secret-key", + ) + + if tt.expectRepoOK { + assert.NoError(t, err) + assert.True(t, mockClient.repoConfigured) + assert.Equal(t, "backup-repo", mockClient.lastRepoConfig["name"]) + assert.Equal(t, "backup-bucket", mockClient.lastRepoConfig["bucket"]) + } else { + assert.Error(t, err) + return // Don't test SLM if repo failed + } + + // Configure SLM policy + err = mockClient.ConfigureSLMPolicy( + "daily-snapshots", + "0 1 * * *", + "", + "backup-repo", + "sts_*", + "30d", + 5, + 50, + ) + + if tt.expectSLMOK { + assert.NoError(t, err) + assert.True(t, mockClient.slmConfigured) + assert.Equal(t, "daily-snapshots", mockClient.lastSLMConfig["name"]) + assert.Equal(t, "0 1 * * *", mockClient.lastSLMConfig["schedule"]) + assert.Equal(t, 5, mockClient.lastSLMConfig["minCount"]) + assert.Equal(t, 50, mockClient.lastSLMConfig["maxCount"]) + } else { + assert.Error(t, err) + } + }) + } +} + +// TestConfigureValidation tests configuration validation +func TestConfigureValidation(t *testing.T) { + tests := []struct { + name string + accessKey string + secretKey string + expectError bool + errorContains string + }{ + { + name: "valid credentials", + accessKey: "test-key", + secretKey: "test-secret", + expectError: false, + }, + { + name: "missing access key", + accessKey: "", + secretKey: "test-secret", + expectError: true, + errorContains: "accessKey and secretKey are required", + }, + { + name: "missing secret key", + accessKey: "test-key", + secretKey: "", + expectError: true, + errorContains: "accessKey and secretKey are required", + }, + { + name: "missing both credentials", + accessKey: "", + secretKey: "", + expectError: true, + errorContains: "accessKey and secretKey are required", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Simulate validation logic from runConfigure + hasError := tt.accessKey == "" || tt.secretKey == "" + + if tt.expectError { + assert.True(t, hasError) + } else { + assert.False(t, hasError) + } + }) + } +} diff --git a/cmd/elasticsearch/elasticsearch.go b/cmd/elasticsearch/elasticsearch.go new file mode 100644 index 0000000..9fc8c52 --- /dev/null +++ b/cmd/elasticsearch/elasticsearch.go @@ -0,0 +1,20 @@ +package elasticsearch + +import ( + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/internal/config" +) + +func Cmd(cliCtx *config.Context) *cobra.Command { + cmd := &cobra.Command{ + Use: "elasticsearch", + Short: "Elasticsearch backup and restore operations", + } + + cmd.AddCommand(listSnapshotsCmd(cliCtx)) + cmd.AddCommand(listIndicesCmd(cliCtx)) + cmd.AddCommand(restoreCmd(cliCtx)) + cmd.AddCommand(configureCmd(cliCtx)) + + return cmd +} diff --git a/cmd/elasticsearch/list-indices.go b/cmd/elasticsearch/list-indices.go new file mode 100644 index 0000000..4a4f2c2 --- /dev/null +++ b/cmd/elasticsearch/list-indices.go @@ -0,0 +1,101 @@ +package elasticsearch + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/cmd/portforward" + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stackvista/stackstate-backup-cli/internal/k8s" + "github.com/stackvista/stackstate-backup-cli/internal/logger" + "github.com/stackvista/stackstate-backup-cli/internal/output" +) + +func listIndicesCmd(cliCtx *config.Context) *cobra.Command { + return &cobra.Command{ + Use: "list-indices", + Short: "List Elasticsearch indices", + Run: func(_ *cobra.Command, _ []string) { + if err := runListIndices(cliCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }, + } +} + +func runListIndices(cliCtx *config.Context) error { + // Create logger + log := logger.New(cliCtx.Config.Quiet, cliCtx.Config.Debug) + + // Create Kubernetes client + k8sClient, err := k8s.NewClient(cliCtx.Config.Kubeconfig, cliCtx.Config.Debug) + if err != nil { + return fmt.Errorf("failed to create Kubernetes client: %w", err) + } + + // Load configuration + cfg, err := config.LoadConfig(k8sClient.Clientset(), cliCtx.Config.Namespace, cliCtx.Config.ConfigMapName, cliCtx.Config.SecretName) + if err != nil { + return fmt.Errorf("failed to load configuration: %w", err) + } + + // Setup port-forward to Elasticsearch + serviceName := cfg.Elasticsearch.Service.Name + localPort := cfg.Elasticsearch.Service.LocalPortForwardPort + remotePort := cfg.Elasticsearch.Service.Port + + pf, err := portforward.SetupPortForward(k8sClient, cliCtx.Config.Namespace, serviceName, localPort, remotePort, log) + if err != nil { + return err + } + defer close(pf.StopChan) + + // Create Elasticsearch client + esClient, err := elasticsearch.NewClient(fmt.Sprintf("http://localhost:%d", pf.LocalPort)) + if err != nil { + return fmt.Errorf("failed to create Elasticsearch client: %w", err) + } + + // List indices with cat API + log.Infof("Fetching Elasticsearch indices...") + + indices, err := esClient.ListIndicesDetailed() + if err != nil { + return fmt.Errorf("failed to list indices: %w", err) + } + + // Format and print indices + formatter := output.NewFormatter(cliCtx.Config.OutputFormat) + + if len(indices) == 0 { + formatter.PrintMessage("No indices found") + return nil + } + + table := output.Table{ + Headers: []string{"HEALTH", "STATUS", "INDEX", "UUID", "PRI", "REP", "DOCS.COUNT", "DOCS.DELETED", "STORE.SIZE", "PRI.STORE.SIZE", "DATASET.SIZE"}, + Rows: make([][]string, 0, len(indices)), + } + + for _, idx := range indices { + row := []string{ + idx.Health, + idx.Status, + idx.Index, + idx.UUID, + idx.Pri, + idx.Rep, + idx.DocsCount, + idx.DocsDeleted, + idx.StoreSize, + idx.PriStoreSize, + idx.DatasetSize, + } + table.Rows = append(table.Rows, row) + } + + return formatter.PrintTable(table) +} diff --git a/cmd/elasticsearch/list-snapshots.go b/cmd/elasticsearch/list-snapshots.go new file mode 100644 index 0000000..7bc932f --- /dev/null +++ b/cmd/elasticsearch/list-snapshots.go @@ -0,0 +1,101 @@ +package elasticsearch + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/cmd/portforward" + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stackvista/stackstate-backup-cli/internal/k8s" + "github.com/stackvista/stackstate-backup-cli/internal/logger" + "github.com/stackvista/stackstate-backup-cli/internal/output" +) + +func listSnapshotsCmd(cliCtx *config.Context) *cobra.Command { + return &cobra.Command{ + Use: "list-snapshots", + Short: "List available Elasticsearch snapshots", + Run: func(_ *cobra.Command, _ []string) { + if err := runListSnapshots(cliCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }, + } +} + +func runListSnapshots(cliCtx *config.Context) error { + // Create logger + log := logger.New(cliCtx.Config.Quiet, cliCtx.Config.Debug) + + // Create Kubernetes client + k8sClient, err := k8s.NewClient(cliCtx.Config.Kubeconfig, cliCtx.Config.Debug) + if err != nil { + return fmt.Errorf("failed to create Kubernetes client: %w", err) + } + + // Load configuration + cfg, err := config.LoadConfig(k8sClient.Clientset(), cliCtx.Config.Namespace, cliCtx.Config.ConfigMapName, cliCtx.Config.SecretName) + if err != nil { + return fmt.Errorf("failed to load configuration: %w", err) + } + + // Setup port-forward to Elasticsearch + serviceName := cfg.Elasticsearch.Service.Name + localPort := cfg.Elasticsearch.Service.LocalPortForwardPort + remotePort := cfg.Elasticsearch.Service.Port + + pf, err := portforward.SetupPortForward(k8sClient, cliCtx.Config.Namespace, serviceName, localPort, remotePort, log) + if err != nil { + return err + } + defer close(pf.StopChan) + + // Create Elasticsearch client + esClient, err := elasticsearch.NewClient(fmt.Sprintf("http://localhost:%d", pf.LocalPort)) + if err != nil { + return fmt.Errorf("failed to create Elasticsearch client: %w", err) + } + + // List snapshots + repository := cfg.Elasticsearch.Restore.Repository + log.Infof("Fetching snapshots from repository '%s'...", repository) + + snapshots, err := esClient.ListSnapshots(repository) + if err != nil { + return fmt.Errorf("failed to list snapshots: %w", err) + } + + // Format and print snapshots + formatter := output.NewFormatter(cliCtx.Config.OutputFormat) + + if len(snapshots) == 0 { + formatter.PrintMessage("No snapshots found") + return nil + } + + table := output.Table{ + Headers: []string{"SNAPSHOT", "STATE", "START TIME", "DURATION (ms)", "FAILURES"}, + Rows: make([][]string, 0, len(snapshots)), + } + + for _, snapshot := range snapshots { + failures := "0" + if len(snapshot.Failures) > 0 { + failures = fmt.Sprintf("%d", len(snapshot.Failures)) + } + + row := []string{ + snapshot.Snapshot, + snapshot.State, + snapshot.StartTime, + fmt.Sprintf("%d", snapshot.DurationInMillis), + failures, + } + table.Rows = append(table.Rows, row) + } + + return formatter.PrintTable(table) +} diff --git a/cmd/elasticsearch/list_indices_test.go b/cmd/elasticsearch/list_indices_test.go new file mode 100644 index 0000000..4257553 --- /dev/null +++ b/cmd/elasticsearch/list_indices_test.go @@ -0,0 +1,273 @@ +package elasticsearch + +import ( + "context" + "fmt" + "testing" + + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +// Note: Constants testConfigMapName and testNamespace are shared from list_snapshots_test.go + +// mockESClientForIndices is a mock for testing list-indices command +type mockESClientForIndices struct { + indices []string + indicesDetail []elasticsearch.IndexInfo + err error +} + +func (m *mockESClientForIndices) ListSnapshots(_ string) ([]elasticsearch.Snapshot, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) GetSnapshot(_, _ string) (*elasticsearch.Snapshot, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) ListIndices(_ string) ([]string, error) { + if m.err != nil { + return nil, m.err + } + return m.indices, nil +} + +func (m *mockESClientForIndices) ListIndicesDetailed() ([]elasticsearch.IndexInfo, error) { + if m.err != nil { + return nil, m.err + } + return m.indicesDetail, nil +} + +func (m *mockESClientForIndices) DeleteIndex(_ string) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) IndexExists(_ string) (bool, error) { + return false, fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) RestoreSnapshot(_, _, _ string, _ bool) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) ConfigureSnapshotRepository(_, _, _, _, _, _ string) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) ConfigureSLMPolicy(_, _, _, _, _, _ string, _, _ int) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForIndices) RolloverDatastream(_ string) error { + return fmt.Errorf("not implemented") +} + +// TestListIndicesCmd_Unit tests the command structure +func TestListIndicesCmd_Unit(t *testing.T) { + cliCtx := config.NewContext() + cliCtx.Config.Namespace = testNamespace + cliCtx.Config.ConfigMapName = testConfigMapName + cliCtx.Config.OutputFormat = "table" + + cmd := listIndicesCmd(cliCtx) + + // Test command metadata + assert.Equal(t, "list-indices", cmd.Use) + assert.Equal(t, "List Elasticsearch indices", cmd.Short) + assert.NotNil(t, cmd.Run) +} + +// TestListIndicesCmd_Integration tests the integration with Kubernetes client +func TestListIndicesCmd_Integration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Create fake Kubernetes client + fakeClient := fake.NewSimpleClientset() + + // Create ConfigMap with valid config + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMapName, + Namespace: testNamespace, + }, + Data: map[string]string{ + "config": ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: minio:9000 + basepath: snapshots + accessKey: key + secretKey: secret + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +`, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps(testNamespace).Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Test that config loading works + cfg, err := config.LoadConfig(fakeClient, testNamespace, testConfigMapName, "") + require.NoError(t, err) + assert.Equal(t, "elasticsearch-master", cfg.Elasticsearch.Service.Name) + assert.Equal(t, 9200, cfg.Elasticsearch.Service.Port) +} + +// TestMockESClientForIndices demonstrates mock usage for indices +func TestMockESClientForIndices(t *testing.T) { + tests := []struct { + name string + mockIndices []elasticsearch.IndexInfo + mockErr error + expectError bool + expectedCount int + }{ + { + name: "successful list with multiple indices", + mockIndices: []elasticsearch.IndexInfo{ + { + Health: "green", + Status: "open", + Index: "sts_logs-2024-01", + UUID: "uuid1", + Pri: "1", + Rep: "1", + DocsCount: "1000", + DocsDeleted: "0", + StoreSize: "1mb", + PriStoreSize: "500kb", + DatasetSize: "1mb", + }, + { + Health: "yellow", + Status: "open", + Index: "sts_logs-2024-02", + UUID: "uuid2", + Pri: "1", + Rep: "1", + DocsCount: "2000", + DocsDeleted: "10", + StoreSize: "2mb", + PriStoreSize: "1mb", + DatasetSize: "2mb", + }, + }, + mockErr: nil, + expectError: false, + expectedCount: 2, + }, + { + name: "empty indices list", + mockIndices: []elasticsearch.IndexInfo{}, + mockErr: nil, + expectError: false, + expectedCount: 0, + }, + { + name: "error case", + mockIndices: nil, + mockErr: fmt.Errorf("failed to connect to elasticsearch"), + expectError: true, + expectedCount: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create mock client + mockClient := &mockESClientForIndices{ + indicesDetail: tt.mockIndices, + err: tt.mockErr, + } + + // Call the method + indices, err := mockClient.ListIndicesDetailed() + + // Assertions + if tt.expectError { + assert.Error(t, err) + assert.Nil(t, indices) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedCount, len(indices)) + for i, expected := range tt.mockIndices { + assert.Equal(t, expected.Index, indices[i].Index) + assert.Equal(t, expected.Health, indices[i].Health) + assert.Equal(t, expected.Status, indices[i].Status) + } + } + }) + } +} + +// TestMockESClientSimpleList tests the simple ListIndices method +func TestMockESClientSimpleList(t *testing.T) { + tests := []struct { + name string + mockIndices []string + mockErr error + expectError bool + }{ + { + name: "successful simple list", + mockIndices: []string{"index-1", "index-2", "index-3"}, + mockErr: nil, + expectError: false, + }, + { + name: "error case", + mockIndices: nil, + mockErr: fmt.Errorf("connection timeout"), + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := &mockESClientForIndices{ + indices: tt.mockIndices, + err: tt.mockErr, + } + + indices, err := mockClient.ListIndices("*") + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.mockIndices, indices) + } + }) + } +} diff --git a/cmd/elasticsearch/list_snapshots_test.go b/cmd/elasticsearch/list_snapshots_test.go new file mode 100644 index 0000000..e3b3ac4 --- /dev/null +++ b/cmd/elasticsearch/list_snapshots_test.go @@ -0,0 +1,213 @@ +package elasticsearch + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +const ( + testConfigMapName = "backup-config" + testNamespace = "test-ns" + testSecretName = "backup-secret" +) + +// mockESClient is a simple mock for testing commands +type mockESClient struct { + snapshots []elasticsearch.Snapshot + err error +} + +func (m *mockESClient) ListSnapshots(_ string) ([]elasticsearch.Snapshot, error) { + if m.err != nil { + return nil, m.err + } + return m.snapshots, nil +} + +func (m *mockESClient) GetSnapshot(_, _ string) (*elasticsearch.Snapshot, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClient) ListIndices(_ string) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClient) ListIndicesDetailed() ([]elasticsearch.IndexInfo, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClient) DeleteIndex(_ string) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClient) IndexExists(_ string) (bool, error) { + return false, fmt.Errorf("not implemented") +} + +func (m *mockESClient) RestoreSnapshot(_, _, _ string, _ bool) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClient) ConfigureSnapshotRepository(_, _, _, _, _, _ string) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClient) ConfigureSLMPolicy(_, _, _, _, _, _ string, _, _ int) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClient) RolloverDatastream(_ string) error { + return fmt.Errorf("not implemented") +} + +// TestListSnapshotsCmd_Integration demonstrates an integration-style test +// This test uses real fake.Clientset to test the full command flow +func TestListSnapshotsCmd_Integration(t *testing.T) { + // Skip this test in short mode as it requires more setup + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + // Create fake Kubernetes client + fakeClient := fake.NewSimpleClientset() + + // Create ConfigMap with valid config + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testConfigMapName, + Namespace: testNamespace, + }, + Data: map[string]string{ + "config": ` +elasticsearch: + service: + name: elasticsearch-master + port: 9200 + localPortForwardPort: 9200 + restore: + scaleDownLabelSelector: app=test + indexPrefix: sts_ + datastreamIndexPrefix: sts_k8s_logs + datastreamName: sts_k8s_logs + indicesPattern: "sts_*" + repository: backup-repo + snapshotRepository: + name: backup-repo + bucket: backups + endpoint: minio:9000 + basepath: snapshots + accessKey: key + secretKey: secret + slm: + name: daily + schedule: "0 1 * * *" + snapshotTemplateName: "" + repository: backup-repo + indices: "sts_*" + retentionExpireAfter: 30d + retentionMinCount: 5 + retentionMaxCount: 50 +`, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps(testNamespace).Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Test that config loading works + cfg, err := config.LoadConfig(fakeClient, testNamespace, testConfigMapName, "") + require.NoError(t, err) + assert.Equal(t, "backup-repo", cfg.Elasticsearch.Restore.Repository) + assert.Equal(t, "elasticsearch-master", cfg.Elasticsearch.Service.Name) +} + +// TestListSnapshotsCmd_Unit demonstrates a unit-style test +// This test focuses on the command structure and basic behavior +func TestListSnapshotsCmd_Unit(t *testing.T) { + cliCtx := config.NewContext() + cliCtx.Config.Namespace = testNamespace + cliCtx.Config.ConfigMapName = testConfigMapName + cliCtx.Config.OutputFormat = "table" + + cmd := listSnapshotsCmd(cliCtx) + + // Test command metadata + assert.Equal(t, "list-snapshots", cmd.Use) + assert.Equal(t, "List available Elasticsearch snapshots", cmd.Short) + assert.NotNil(t, cmd.Run) +} + +// TestMockESClient demonstrates how to use the mock client +func TestMockESClient(t *testing.T) { + tests := []struct { + name string + mockSnapshots []elasticsearch.Snapshot + mockErr error + expectError bool + }{ + { + name: "successful list", + mockSnapshots: []elasticsearch.Snapshot{ + { + Snapshot: "snapshot-1", + UUID: "uuid-1", + State: "SUCCESS", + StartTime: time.Now().Format(time.RFC3339), + DurationInMillis: 1000, + }, + { + Snapshot: "snapshot-2", + UUID: "uuid-2", + State: "SUCCESS", + StartTime: time.Now().Format(time.RFC3339), + DurationInMillis: 2000, + }, + }, + mockErr: nil, + expectError: false, + }, + { + name: "error case", + mockSnapshots: nil, + mockErr: fmt.Errorf("connection failed"), + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create mock client + mockClient := &mockESClient{ + snapshots: tt.mockSnapshots, + err: tt.mockErr, + } + + // Call the method + snapshots, err := mockClient.ListSnapshots("backup-repo") + + // Assertions + if tt.expectError { + assert.Error(t, err) + assert.Nil(t, snapshots) + } else { + assert.NoError(t, err) + assert.Equal(t, len(tt.mockSnapshots), len(snapshots)) + for i, expected := range tt.mockSnapshots { + assert.Equal(t, expected.Snapshot, snapshots[i].Snapshot) + assert.Equal(t, expected.State, snapshots[i].State) + } + } + }) + } +} diff --git a/cmd/elasticsearch/restore-snapshot.go b/cmd/elasticsearch/restore-snapshot.go new file mode 100644 index 0000000..a857616 --- /dev/null +++ b/cmd/elasticsearch/restore-snapshot.go @@ -0,0 +1,275 @@ +package elasticsearch + +import ( + "bufio" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/cmd/portforward" + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stackvista/stackstate-backup-cli/internal/k8s" + "github.com/stackvista/stackstate-backup-cli/internal/logger" +) + +const ( + // defaultMaxIndexDeleteAttempts is the maximum number of attempts to verify index deletion + defaultMaxIndexDeleteAttempts = 30 + // defaultIndexDeleteRetryInterval is the time to wait between index deletion verification attempts + defaultIndexDeleteRetryInterval = 1 * time.Second +) + +// Restore command flags +var ( + snapshotName string + dropAllIndices bool + skipConfirmation bool +) + +func restoreCmd(cliCtx *config.Context) *cobra.Command { + cmd := &cobra.Command{ + Use: "restore-snapshot", + Short: "Restore Elasticsearch from a snapshot", + Long: `Restore Elasticsearch indices from a snapshot. Can optionally delete existing indices before restore.`, + Run: func(_ *cobra.Command, _ []string) { + if err := runRestore(cliCtx); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + }} + + cmd.Flags().StringVarP(&snapshotName, "snapshot-name", "s", "", "Snapshot name to restore (required)") + cmd.Flags().BoolVarP(&dropAllIndices, "drop-all-indices", "r", false, "Delete all existing STS indices before restore") + cmd.Flags().BoolVar(&skipConfirmation, "yes", false, "Skip confirmation prompt") + _ = cmd.MarkFlagRequired("snapshot-name") + return cmd +} + +func runRestore(cliCtx *config.Context) error { + // Create logger + log := logger.New(cliCtx.Config.Quiet, cliCtx.Config.Debug) + + // Create Kubernetes client + k8sClient, err := k8s.NewClient(cliCtx.Config.Kubeconfig, cliCtx.Config.Debug) + if err != nil { + return fmt.Errorf("failed to create Kubernetes client: %w", err) + } + + // Load configuration + cfg, err := config.LoadConfig(k8sClient.Clientset(), cliCtx.Config.Namespace, cliCtx.Config.ConfigMapName, cliCtx.Config.SecretName) + if err != nil { + return fmt.Errorf("failed to load configuration: %w", err) + } + + // Scale down deployments before restore + scaledDeployments, err := scaleDownDeployments(k8sClient, cliCtx.Config.Namespace, cfg.Elasticsearch.Restore.ScaleDownLabelSelector, log) + if err != nil { + return err + } + + // Ensure deployments are scaled back up on exit (even if restore fails) + defer func() { + if len(scaledDeployments) > 0 { + log.Println() + log.Infof("Scaling up deployments back to original replica counts...") + if err := k8sClient.ScaleUpDeployments(cliCtx.Config.Namespace, scaledDeployments); err != nil { + log.Warningf("Failed to scale up deployments: %v", err) + } else { + log.Successf("Scaled up %d deployment(s) successfully:", len(scaledDeployments)) + for _, dep := range scaledDeployments { + log.Infof(" - %s (replicas: 0 -> %d)", dep.Name, dep.Replicas) + } + } + } + }() + + // Setup port-forward to Elasticsearch + serviceName := cfg.Elasticsearch.Service.Name + localPort := cfg.Elasticsearch.Service.LocalPortForwardPort + remotePort := cfg.Elasticsearch.Service.Port + + pf, err := portforward.SetupPortForward(k8sClient, cliCtx.Config.Namespace, serviceName, localPort, remotePort, log) + if err != nil { + return err + } + defer close(pf.StopChan) + + // Create Elasticsearch client + esClient, err := elasticsearch.NewClient(fmt.Sprintf("http://localhost:%d", pf.LocalPort)) + if err != nil { + return fmt.Errorf("failed to create Elasticsearch client: %w", err) + } + + repository := cfg.Elasticsearch.Restore.Repository + + // Get all indices and filter for STS indices + log.Infof("Fetching current Elasticsearch indices...") + allIndices, err := esClient.ListIndices("*") + if err != nil { + return fmt.Errorf("failed to list indices: %w", err) + } + + stsIndices := filterSTSIndices(allIndices, cfg.Elasticsearch.Restore.IndexPrefix, cfg.Elasticsearch.Restore.DatastreamIndexPrefix) + + if dropAllIndices { + log.Println() + if err := deleteIndices(esClient, stsIndices, cfg, log, skipConfirmation); err != nil { + return err + } + } + + // Restore snapshot + log.Println() + log.Infof("Restoring snapshot '%s' from repository '%s'", snapshotName, repository) + + // Get snapshot details to show indices + snapshot, err := esClient.GetSnapshot(repository, snapshotName) + if err != nil { + return fmt.Errorf("failed to get snapshot details: %w", err) + } + + log.Debugf("Indices pattern: %s", cfg.Elasticsearch.Restore.IndicesPattern) + + if len(snapshot.Indices) == 0 { + log.Warningf("Snapshot contains no indices") + } else { + log.Infof("Snapshot contains %d index(es)", len(snapshot.Indices)) + for _, index := range snapshot.Indices { + log.Debugf(" - %s", index) + } + } + + log.Infof("Starting restore - this may take several minutes...") + + if err := esClient.RestoreSnapshot(repository, snapshotName, cfg.Elasticsearch.Restore.IndicesPattern, true); err != nil { + return fmt.Errorf("failed to restore snapshot: %w", err) + } + + log.Println() + log.Successf("Restore completed successfully") + return nil +} + +// filterSTSIndices filters indices that match the configured STS prefixes +func filterSTSIndices(allIndices []string, indexPrefix, datastreamPrefix string) []string { + var stsIndices []string + for _, index := range allIndices { + if strings.HasPrefix(index, indexPrefix) || strings.HasPrefix(index, datastreamPrefix) { + stsIndices = append(stsIndices, index) + } + } + return stsIndices +} + +// confirmDeletion prompts the user to confirm index deletion +func confirmDeletion() error { + fmt.Print("\nAre you sure you want to delete these indices? (yes/no): ") + reader := bufio.NewReader(os.Stdin) + response, err := reader.ReadString('\n') + if err != nil { + return fmt.Errorf("failed to read confirmation: %w", err) + } + response = strings.TrimSpace(strings.ToLower(response)) + if response != "yes" && response != "y" { + return fmt.Errorf("restore cancelled by user") + } + return nil +} + +// hasDatastreamIndices checks if any indices belong to a datastream +func hasDatastreamIndices(indices []string, datastreamPrefix string) bool { + for _, index := range indices { + if strings.HasPrefix(index, datastreamPrefix+"-") { + return true + } + } + return false +} + +// deleteIndexWithVerification deletes an index and verifies it's gone +func deleteIndexWithVerification(esClient *elasticsearch.Client, index string, log *logger.Logger) error { + log.Infof(" Deleting index: %s", index) + if err := esClient.DeleteIndex(index); err != nil { + return fmt.Errorf("failed to delete index %s: %w", index, err) + } + + // Verify deletion with timeout + for attempt := 0; attempt < defaultMaxIndexDeleteAttempts; attempt++ { + exists, err := esClient.IndexExists(index) + if err != nil { + return fmt.Errorf("failed to check index existence: %w", err) + } + if !exists { + log.Debugf("Index successfully deleted: %s", index) + return nil + } + if attempt >= defaultMaxIndexDeleteAttempts-1 { + return fmt.Errorf("timeout waiting for index %s to be deleted", index) + } + time.Sleep(defaultIndexDeleteRetryInterval) + } + return nil +} + +// scaleDownDeployments scales down deployments matching the label selector +func scaleDownDeployments(k8sClient *k8s.Client, namespace, labelSelector string, log *logger.Logger) ([]k8s.DeploymentScale, error) { + log.Infof("Scaling down deployments (selector: %s)...", labelSelector) + + scaledDeployments, err := k8sClient.ScaleDownDeployments(namespace, labelSelector) + if err != nil { + return nil, fmt.Errorf("failed to scale down deployments: %w", err) + } + + if len(scaledDeployments) == 0 { + log.Infof("No deployments found to scale down") + } else { + log.Successf("Scaled down %d deployment(s):", len(scaledDeployments)) + for _, dep := range scaledDeployments { + log.Infof(" - %s (replicas: %d -> 0)", dep.Name, dep.Replicas) + } + } + + return scaledDeployments, nil +} + +// deleteIndices handles the deletion of all STS indices including datastream rollover +func deleteIndices(esClient *elasticsearch.Client, stsIndices []string, cfg *config.Config, log *logger.Logger, skipConfirm bool) error { + if len(stsIndices) == 0 { + log.Infof("No STS indices found to delete") + return nil + } + + log.Infof("Found %d STS index(es) to delete", len(stsIndices)) + for _, index := range stsIndices { + log.Debugf(" - %s", index) + } + + // Confirmation prompt + if !skipConfirm { + if err := confirmDeletion(); err != nil { + return err + } + } + + // Check for datastream and rollover if needed + if hasDatastreamIndices(stsIndices, cfg.Elasticsearch.Restore.DatastreamIndexPrefix) { + log.Infof("Rolling over datastream '%s'...", cfg.Elasticsearch.Restore.DatastreamName) + if err := esClient.RolloverDatastream(cfg.Elasticsearch.Restore.DatastreamName); err != nil { + return fmt.Errorf("failed to rollover datastream: %w", err) + } + log.Successf("Datastream rolled over successfully") + } + + // Delete all indices + log.Infof("Deleting %d index(es)...", len(stsIndices)) + for _, index := range stsIndices { + if err := deleteIndexWithVerification(esClient, index, log); err != nil { + return err + } + } + log.Successf("All indices deleted successfully") + return nil +} diff --git a/cmd/elasticsearch/restore_snapshot_test.go b/cmd/elasticsearch/restore_snapshot_test.go new file mode 100644 index 0000000..761cc86 --- /dev/null +++ b/cmd/elasticsearch/restore_snapshot_test.go @@ -0,0 +1,431 @@ +package elasticsearch + +import ( + "fmt" + "testing" + "time" + + "github.com/stackvista/stackstate-backup-cli/internal/config" + "github.com/stackvista/stackstate-backup-cli/internal/elasticsearch" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockESClientForRestore is a mock for testing restore command +type mockESClientForRestore struct { + indices []string + snapshot *elasticsearch.Snapshot + deleteErr error + indexExistsMap map[string]bool + restoreErr error + getSnapshotErr error + rolloverErr error + deletedIndices []string + restoredSnapshot string + rolledOverDS string +} + +func (m *mockESClientForRestore) ListIndices(_ string) ([]string, error) { + return m.indices, nil +} + +func (m *mockESClientForRestore) GetSnapshot(_, _ string) (*elasticsearch.Snapshot, error) { + if m.getSnapshotErr != nil { + return nil, m.getSnapshotErr + } + return m.snapshot, nil +} + +func (m *mockESClientForRestore) DeleteIndex(index string) error { + if m.deleteErr != nil { + return m.deleteErr + } + m.deletedIndices = append(m.deletedIndices, index) + // Update exists map + if m.indexExistsMap == nil { + m.indexExistsMap = make(map[string]bool) + } + m.indexExistsMap[index] = false + return nil +} + +func (m *mockESClientForRestore) IndexExists(index string) (bool, error) { + if m.indexExistsMap == nil { + return false, nil + } + exists, ok := m.indexExistsMap[index] + if !ok { + return false, nil + } + return exists, nil +} + +func (m *mockESClientForRestore) RestoreSnapshot(_, snapshotName, _ string, _ bool) error { + if m.restoreErr != nil { + return m.restoreErr + } + m.restoredSnapshot = snapshotName + return nil +} + +func (m *mockESClientForRestore) RolloverDatastream(datastreamName string) error { + if m.rolloverErr != nil { + return m.rolloverErr + } + m.rolledOverDS = datastreamName + return nil +} + +func (m *mockESClientForRestore) ListSnapshots(_ string) ([]elasticsearch.Snapshot, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForRestore) ListIndicesDetailed() ([]elasticsearch.IndexInfo, error) { + return nil, fmt.Errorf("not implemented") +} + +func (m *mockESClientForRestore) ConfigureSnapshotRepository(_, _, _, _, _, _ string) error { + return fmt.Errorf("not implemented") +} + +func (m *mockESClientForRestore) ConfigureSLMPolicy(_, _, _, _, _, _ string, _, _ int) error { + return fmt.Errorf("not implemented") +} + +// TestRestoreCmd_Unit tests the command structure +func TestRestoreCmd_Unit(t *testing.T) { + cliCtx := config.NewContext() + cmd := restoreCmd(cliCtx) + + // Test command metadata + assert.Equal(t, "restore-snapshot", cmd.Use) + assert.Equal(t, "Restore Elasticsearch from a snapshot", cmd.Short) + assert.NotEmpty(t, cmd.Long) + assert.NotNil(t, cmd.Run) + + // Test flags + snapshotFlag := cmd.Flags().Lookup("snapshot-name") + require.NotNil(t, snapshotFlag) + assert.Equal(t, "s", snapshotFlag.Shorthand) + + dropFlag := cmd.Flags().Lookup("drop-all-indices") + require.NotNil(t, dropFlag) + assert.Equal(t, "r", dropFlag.Shorthand) + + yesFlag := cmd.Flags().Lookup("yes") + require.NotNil(t, yesFlag) +} + +// TestFilterSTSIndices tests the index filtering logic +func TestFilterSTSIndices(t *testing.T) { + tests := []struct { + name string + allIndices []string + indexPrefix string + datastreamPrefix string + expectedCount int + expectedIndices []string + }{ + { + name: "filter STS indices only", + allIndices: []string{ + "sts_topology", + "sts_metrics", + "sts_k8s_logs-000001", + "other_index", + ".kibana", + }, + indexPrefix: "sts_", + datastreamPrefix: "sts_k8s_logs", + expectedCount: 3, + expectedIndices: []string{"sts_topology", "sts_metrics", "sts_k8s_logs-000001"}, + }, + { + name: "no STS indices", + allIndices: []string{ + "other_index", + ".kibana", + "system_logs", + }, + indexPrefix: "sts_", + datastreamPrefix: "sts_k8s_logs", + expectedCount: 0, + expectedIndices: []string{}, + }, + { + name: "empty index list", + allIndices: []string{}, + indexPrefix: "sts_", + datastreamPrefix: "sts_k8s_logs", + expectedCount: 0, + expectedIndices: []string{}, + }, + { + name: "only datastream indices", + allIndices: []string{ + "sts_k8s_logs-000001", + "sts_k8s_logs-000002", + "other_index", + }, + indexPrefix: "sts_", + datastreamPrefix: "sts_k8s_logs", + expectedCount: 2, + expectedIndices: []string{"sts_k8s_logs-000001", "sts_k8s_logs-000002"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := filterSTSIndices(tt.allIndices, tt.indexPrefix, tt.datastreamPrefix) + assert.Equal(t, tt.expectedCount, len(result)) + + if tt.expectedCount > 0 { + for _, expected := range tt.expectedIndices { + assert.Contains(t, result, expected) + } + } + }) + } +} + +// TestHasDatastreamIndices tests datastream detection +func TestHasDatastreamIndices(t *testing.T) { + tests := []struct { + name string + indices []string + datastreamPrefix string + expected bool + }{ + { + name: "has datastream indices", + indices: []string{ + "sts_topology", + "sts_k8s_logs-000001", + "sts_metrics", + }, + datastreamPrefix: "sts_k8s_logs", + expected: true, + }, + { + name: "no datastream indices", + indices: []string{ + "sts_topology", + "sts_metrics", + }, + datastreamPrefix: "sts_k8s_logs", + expected: false, + }, + { + name: "empty indices list", + indices: []string{}, + datastreamPrefix: "sts_k8s_logs", + expected: false, + }, + { + name: "datastream prefix without dash", + indices: []string{ + "sts_k8s_logs", + "sts_topology", + }, + datastreamPrefix: "sts_k8s_logs", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := hasDatastreamIndices(tt.indices, tt.datastreamPrefix) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestMockESClientForRestore demonstrates mock usage for restore +// +//nolint:funlen +func TestMockESClientForRestore(t *testing.T) { + tests := []struct { + name string + initialIndices []string + indicesToDelete []string + deleteErr error + restoreErr error + rolloverErr error + expectDeletedCount int + expectRestoreOK bool + expectRolloverOK bool + }{ + { + name: "successful restore with index deletion", + initialIndices: []string{"sts_topology", "sts_metrics"}, + indicesToDelete: []string{"sts_topology", "sts_metrics"}, + deleteErr: nil, + restoreErr: nil, + expectDeletedCount: 2, + expectRestoreOK: true, + }, + { + name: "restore without deletion", + initialIndices: []string{}, + indicesToDelete: []string{}, + deleteErr: nil, + restoreErr: nil, + expectDeletedCount: 0, + expectRestoreOK: true, + }, + { + name: "index deletion fails", + initialIndices: []string{"sts_topology"}, + indicesToDelete: []string{"sts_topology"}, + deleteErr: fmt.Errorf("deletion failed"), + restoreErr: nil, + expectDeletedCount: 0, + expectRestoreOK: false, + }, + { + name: "restore fails", + initialIndices: []string{}, + indicesToDelete: []string{}, + deleteErr: nil, + restoreErr: fmt.Errorf("restore failed"), + expectDeletedCount: 0, + expectRestoreOK: false, + }, + { + name: "successful rollover", + initialIndices: []string{"sts_k8s_logs-000001"}, + indicesToDelete: []string{}, + rolloverErr: nil, + expectDeletedCount: 0, + expectRolloverOK: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := &mockESClientForRestore{ + indices: tt.initialIndices, + deleteErr: tt.deleteErr, + restoreErr: tt.restoreErr, + rolloverErr: tt.rolloverErr, + indexExistsMap: make(map[string]bool), + snapshot: &elasticsearch.Snapshot{ + Snapshot: "test-snapshot", + State: "SUCCESS", + Indices: []string{"sts_topology", "sts_metrics"}, + Repository: "backup-repo", + }, + } + + // Initialize index existence + for _, idx := range tt.initialIndices { + mockClient.indexExistsMap[idx] = true + } + + // Test deletion + for _, idx := range tt.indicesToDelete { + err := mockClient.DeleteIndex(idx) + if tt.deleteErr != nil { + assert.Error(t, err) + return + } + assert.NoError(t, err) + } + + assert.Equal(t, tt.expectDeletedCount, len(mockClient.deletedIndices)) + + // Test rollover if applicable + if tt.expectRolloverOK { + err := mockClient.RolloverDatastream("sts_k8s_logs") + assert.NoError(t, err) + assert.Equal(t, "sts_k8s_logs", mockClient.rolledOverDS) + } + + // Test restore + err := mockClient.RestoreSnapshot("backup-repo", "test-snapshot", "sts_*", true) + if tt.expectRestoreOK { + assert.NoError(t, err) + assert.Equal(t, "test-snapshot", mockClient.restoredSnapshot) + } else if tt.restoreErr != nil { + assert.Error(t, err) + } + }) + } +} + +// TestDeleteIndexWithVerification tests index deletion with verification +func TestDeleteIndexWithVerification(t *testing.T) { + tests := []struct { + name string + indexName string + deleteErr error + indexExistsAfter bool + expectError bool + }{ + { + name: "successful deletion", + indexName: "sts_test", + deleteErr: nil, + indexExistsAfter: false, + expectError: false, + }, + { + name: "deletion fails", + indexName: "sts_test", + deleteErr: fmt.Errorf("deletion error"), + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := &mockESClientForRestore{ + deleteErr: tt.deleteErr, + indexExistsMap: map[string]bool{tt.indexName: true}, + } + + // Simulate the deletion + err := mockClient.DeleteIndex(tt.indexName) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // Verify index was marked as deleted + exists, _ := mockClient.IndexExists(tt.indexName) + assert.False(t, exists) + } + }) + } +} + +// TestRestoreSnapshot_Integration tests snapshot info retrieval +func TestRestoreSnapshot_Integration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + + mockClient := &mockESClientForRestore{ + snapshot: &elasticsearch.Snapshot{ + Snapshot: "backup-2024-01-01", + UUID: "test-uuid", + Repository: "backup-repo", + State: "SUCCESS", + StartTime: time.Now().Add(-1 * time.Hour).Format(time.RFC3339), + Indices: []string{"sts_topology", "sts_metrics", "sts_traces"}, + }, + } + + snapshot, err := mockClient.GetSnapshot("backup-repo", "backup-2024-01-01") + require.NoError(t, err) + assert.NotNil(t, snapshot) + assert.Equal(t, "backup-2024-01-01", snapshot.Snapshot) + assert.Equal(t, "SUCCESS", snapshot.State) + assert.Equal(t, 3, len(snapshot.Indices)) +} + +// TestRestoreConstants tests the restore command constants +func TestRestoreConstants(t *testing.T) { + assert.Equal(t, 30, defaultMaxIndexDeleteAttempts) + assert.Equal(t, 1*time.Second, defaultIndexDeleteRetryInterval) +} diff --git a/cmd/portforward/portforward.go b/cmd/portforward/portforward.go new file mode 100644 index 0000000..19830bb --- /dev/null +++ b/cmd/portforward/portforward.go @@ -0,0 +1,45 @@ +package portforward + +import ( + "fmt" + + "github.com/stackvista/stackstate-backup-cli/internal/k8s" + "github.com/stackvista/stackstate-backup-cli/internal/logger" +) + +// Conn contains the channels needed to manage a port-forward connection +type Conn struct { + StopChan chan struct{} + ReadyChan <-chan struct{} + LocalPort int +} + +// SetupPortForward establishes a port-forward to a Kubernetes service and waits for it to be ready. +// It returns a Conn containing the stop and ready channels, plus the local port. +// The caller is responsible for closing the StopChan when done. +func SetupPortForward( + k8sClient *k8s.Client, + namespace string, + serviceName string, + localPort int, + remotePort int, + log *logger.Logger, +) (*Conn, error) { + log.Infof("Setting up port-forward to %s:%d in namespace %s...", serviceName, remotePort, namespace) + + stopChan, readyChan, err := k8sClient.PortForwardService(namespace, serviceName, localPort, remotePort) + if err != nil { + return nil, fmt.Errorf("failed to setup port-forward: %w", err) + } + + // Wait for port-forward to be ready + <-readyChan + + log.Successf("Port-forward established successfully") + + return &Conn{ + StopChan: stopChan, + ReadyChan: readyChan, + LocalPort: localPort, + }, nil +} diff --git a/cmd/portforward/portforward_test.go b/cmd/portforward/portforward_test.go new file mode 100644 index 0000000..f6f10ae --- /dev/null +++ b/cmd/portforward/portforward_test.go @@ -0,0 +1,123 @@ +package portforward + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + "github.com/stackvista/stackstate-backup-cli/internal/k8s" + "github.com/stackvista/stackstate-backup-cli/internal/logger" +) + +func TestSetupPortForward_ServiceNotFound(t *testing.T) { + fakeClientset := fake.NewSimpleClientset() + client := k8s.NewTestClient(fakeClientset) + log := logger.New(true, false) + + _, err := SetupPortForward(client, "default", "nonexistent-service", 8080, 9200, log) + if err == nil { + t.Fatal("expected error for nonexistent service, got nil") + } +} + +func TestSetupPortForward_NoPodsFound(t *testing.T) { + fakeClientset := fake.NewSimpleClientset( + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "test", + }, + }, + }, + ) + client := k8s.NewTestClient(fakeClientset) + log := logger.New(true, false) + + _, err := SetupPortForward(client, "default", "test-service", 8080, 9200, log) + if err == nil { + t.Fatal("expected error for service with no pods, got nil") + } +} + +func TestSetupPortForward_NoRunningPods(t *testing.T) { + fakeClientset := fake.NewSimpleClientset( + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "test", + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + Labels: map[string]string{ + "app": "test", + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + }, + ) + client := k8s.NewTestClient(fakeClientset) + log := logger.New(true, false) + + _, err := SetupPortForward(client, "default", "test-service", 8080, 9200, log) + if err == nil { + t.Fatal("expected error for service with no running pods, got nil") + } +} + +func TestConn_Structure(t *testing.T) { + stopChan := make(chan struct{}) + readyChan := make(chan struct{}) + localPort := 8080 + + result := &Conn{ + StopChan: stopChan, + ReadyChan: readyChan, + LocalPort: localPort, + } + + if result.StopChan == nil { + t.Error("expected StopChan to be set") + } + if result.ReadyChan == nil { + t.Error("expected ReadyChan to be set") + } + if result.LocalPort != localPort { + t.Errorf("expected LocalPort to be %d, got %d", localPort, result.LocalPort) + } +} + +func TestConn_ChannelCleanup(t *testing.T) { + stopChan := make(chan struct{}) + readyChan := make(chan struct{}) + + result := &Conn{ + StopChan: stopChan, + ReadyChan: readyChan, + LocalPort: 8080, + } + + close(result.StopChan) + + select { + case <-result.StopChan: + // Successfully received from closed channel + default: + t.Error("expected StopChan to be closed") + } +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..1893ac5 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,51 @@ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/stackvista/stackstate-backup-cli/cmd/elasticsearch" + "github.com/stackvista/stackstate-backup-cli/cmd/version" + "github.com/stackvista/stackstate-backup-cli/internal/config" +) + +var ( + cliCtx *config.Context +) + +// addBackupConfigFlags adds configuration flags needed for backup/restore operations +// to commands that interact with data services (Elasticsearch, etc.) +func addBackupConfigFlags(cmd *cobra.Command) { + cmd.PersistentFlags().StringVar(&cliCtx.Config.Namespace, "namespace", "", "Kubernetes namespace (required)") + cmd.PersistentFlags().StringVar(&cliCtx.Config.Kubeconfig, "kubeconfig", "", "Path to kubeconfig file (default: ~/.kube/config)") + cmd.PersistentFlags().BoolVar(&cliCtx.Config.Debug, "debug", false, "Enable debug output") + cmd.PersistentFlags().BoolVarP(&cliCtx.Config.Quiet, "quiet", "q", false, "Suppress operational messages (only show errors and data output)") + cmd.PersistentFlags().StringVar(&cliCtx.Config.ConfigMapName, "configmap", "suse-observability-backup-config", "ConfigMap name containing backup configuration") + cmd.PersistentFlags().StringVar(&cliCtx.Config.SecretName, "secret", "suse-observability-backup-config", "Secret name containing backup configuration") + cmd.PersistentFlags().StringVarP(&cliCtx.Config.OutputFormat, "output", "o", "table", "Output format (table, json)") + _ = cmd.MarkPersistentFlagRequired("namespace") +} + +func init() { + cliCtx = config.NewContext() + + // Add backup config flags to commands that need them + esCmd := elasticsearch.Cmd(cliCtx) + addBackupConfigFlags(esCmd) + rootCmd.AddCommand(esCmd) + + // Add commands that don't need backup config flags + rootCmd.AddCommand(version.Cmd()) +} + +var rootCmd = &cobra.Command{ + Use: "sts-backup", + Short: "Backup and restore tool for SUSE Observability platform", + Long: `A CLI tool for managing backups and restores for SUSE Observability platform running on Kubernetes.`, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/cmd/version/version.go b/cmd/version/version.go new file mode 100644 index 0000000..8a123db --- /dev/null +++ b/cmd/version/version.go @@ -0,0 +1,27 @@ +package version + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +var ( + Version string + Commit string + Date string +) + +func Cmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "version", + Short: "Display the version number", + Run: func(_ *cobra.Command, _ []string) { + fmt.Printf("Version: %s\n", Version) + fmt.Printf("Commit: %s\n", Commit) + fmt.Printf("Date built: %s\n", Date) + }, + } + + return cmd +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..f1658f0 --- /dev/null +++ b/flake.nix @@ -0,0 +1,111 @@ +{ + description = "StackState CLI"; + + nixConfig.bash-prompt = "STS CLI 2 $ "; + + inputs = { + nixpkgs.url = "github:nixos/nixpkgs"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = { self, nixpkgs, flake-utils }: + flake-utils.lib.eachDefaultSystem (system: + let + pkgs = import nixpkgs { inherit system; overlays = [ ]; }; + pkgs-linux = import nixpkgs { system = "x86_64-linux"; overlays = [ ]; }; + + # Dependencies used for both development and CI/CD + sharedDeps = pkgs: (with pkgs; [ + bash + go_1_19 + gotools + diffutils # Required for golangci-lint + golangci-lint + openapi-generator-cli + ]); + + # Dependencies used only by CI/CD + ciDeps = pkgs: (with pkgs; [ + git + cacert + gcc + coreutils-full + goreleaser + awscli + docker + ]); + + darwinDevShellExtraDeps = pkgs: pkgs.lib.optionals pkgs.stdenv.isDarwin (with pkgs.darwin.apple_sdk_11_0; [ + Libsystem + IOKit + ]); + in { + + devShells = { + dev = pkgs.mkShell { + buildInputs = sharedDeps(pkgs) ++ darwinDevShellExtraDeps(pkgs); + }; + + ci = pkgs.mkShell { + buildInputs = sharedDeps(pkgs) ++ ciDeps(pkgs); + }; + }; + + devShell = self.devShells."${system}".dev; + + packages = { + sts = pkgs.buildGo119Module { + pname = "sts"; + version = "2.0.0"; + + src = ./.; + + # This hash locks the dependencies of this package. + # Change it to the provided when the go dependencies change. + # See https://www.tweag.io/blog/2021-03-04-gomod2nix/ for details. + # + # NOTE In case if your build fails due to incosistency in vendor modules + # Comment out the real hash and uncomment the fake one then on next `nix build .` run + # you will get a new real hash which can be used here. + # + # vendorSha256 = pkgs.lib.fakeSha256; + vendorSha256 = "sha256-aXTDHT1N+4Qpkuxb8vvBvP2VPyS5ofCgX6XFhJ5smUQ="; + + postInstall = '' + mv $out/bin/stackstate-cli2 $out/bin/sts + ''; + }; + + ci-image = pkgs.dockerTools.buildImage { + name = "stackstate-cli2-ci"; + tag = "latest"; + created = "now"; + + contents = sharedDeps(pkgs-linux) ++ ciDeps(pkgs-linux); + + config = { + Env = [ + "GIT_SSL_CAINFO=/etc/ssl/certs/ca-bundle.crt" + "SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt" + ]; + + # Required to make golangci-lint work. + Volumes = { + "/tmp" = {}; + }; + }; + }; + + default = self.packages."${system}".sts; + }; + + apps = { + sts = { + type = "app"; + program = "${self.packages."${system}".sts}/bin/sts"; + }; + + default = self.apps."${system}".sts; + }; + }); +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..f66215c --- /dev/null +++ b/go.mod @@ -0,0 +1,70 @@ +module github.com/stackvista/stackstate-backup-cli + +go 1.25.2 + +require ( + dario.cat/mergo v1.0.2 + github.com/elastic/go-elasticsearch/v8 v8.19.0 + github.com/go-playground/validator/v10 v10.28.0 + github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.11.1 + gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/client-go v0.34.1 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/elastic/elastic-transport-go/v8 v8.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..6447f57 --- /dev/null +++ b/go.sum @@ -0,0 +1,202 @@ +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elastic/elastic-transport-go/v8 v8.7.0 h1:OgTneVuXP2uip4BA658Xi6Hfw+PeIOod2rY3GVMGoVE= +github.com/elastic/elastic-transport-go/v8 v8.7.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-elasticsearch/v8 v8.19.0 h1:VmfBLNRORY7RZL+9hTxBD97ehl9H8Nxf2QigDh6HuMU= +github.com/elastic/go-elasticsearch/v8 v8.19.0/go.mod h1:F3j9e+BubmKvzvLjNui/1++nJuJxbkhHefbaT0kFKGY= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..d196400 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,139 @@ +// Package config provides configuration management for the backup CLI tool. +// It supports loading configuration from Kubernetes ConfigMaps and Secrets +// with a merge strategy that allows ConfigMap to be overridden by Secret. +package config + +import ( + "context" + "fmt" + + "dario.cat/mergo" + "github.com/go-playground/validator/v10" + "gopkg.in/yaml.v3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// Config represents the merged configuration from ConfigMap and Secret +type Config struct { + Elasticsearch ElasticsearchConfig `yaml:"elasticsearch" validate:"required"` +} + +// ElasticsearchConfig holds Elasticsearch-specific configuration +type ElasticsearchConfig struct { + Service ServiceConfig `yaml:"service" validate:"required"` + Restore RestoreConfig `yaml:"restore" validate:"required"` + SnapshotRepository SnapshotRepositoryConfig `yaml:"snapshotRepository" validate:"required"` + SLM SLMConfig `yaml:"slm" validate:"required"` +} + +// RestoreConfig holds restore-specific configuration +type RestoreConfig struct { + ScaleDownLabelSelector string `yaml:"scaleDownLabelSelector" validate:"required"` + IndexPrefix string `yaml:"indexPrefix" validate:"required"` + DatastreamIndexPrefix string `yaml:"datastreamIndexPrefix" validate:"required"` + DatastreamName string `yaml:"datastreamName" validate:"required"` + IndicesPattern string `yaml:"indicesPattern" validate:"required"` + Repository string `yaml:"repository" validate:"required"` +} + +// SnapshotRepositoryConfig holds snapshot repository configuration +type SnapshotRepositoryConfig struct { + Name string `yaml:"name" validate:"required"` + Bucket string `yaml:"bucket" validate:"required"` + Endpoint string `yaml:"endpoint" validate:"required"` + BasePath string `yaml:"basepath"` + AccessKey string `yaml:"accessKey" validate:"required"` // From secret + SecretKey string `yaml:"secretKey" validate:"required"` // From secret +} + +// SLMConfig holds Snapshot Lifecycle Management configuration +type SLMConfig struct { + Name string `yaml:"name" validate:"required"` + Schedule string `yaml:"schedule" validate:"required"` + SnapshotTemplateName string `yaml:"snapshotTemplateName" validate:"required"` + Repository string `yaml:"repository" validate:"required"` + Indices string `yaml:"indices" validate:"required"` + RetentionExpireAfter string `yaml:"retentionExpireAfter" validate:"required"` + RetentionMinCount int `yaml:"retentionMinCount" validate:"required,min=1"` + RetentionMaxCount int `yaml:"retentionMaxCount" validate:"required,min=1"` +} + +// ServiceConfig holds service connection details +type ServiceConfig struct { + Name string `yaml:"name" validate:"required"` + Port int `yaml:"port" validate:"required,min=1,max=65535"` + LocalPortForwardPort int `yaml:"localPortForwardPort" validate:"required,min=1,max=65535"` +} + +// LoadConfig loads and merges configuration from ConfigMap and Secret +// ConfigMap provides base configuration, Secret overrides it +// All required fields must be present after merging, validated with validator +func LoadConfig(clientset kubernetes.Interface, namespace, configMapName, secretName string) (*Config, error) { + ctx := context.Background() + config := &Config{} + + // Load ConfigMap if it exists + if configMapName != "" { + cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, configMapName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get ConfigMap '%s': %w", configMapName, err) + } + + if configData, ok := cm.Data["config"]; ok { + if err := yaml.Unmarshal([]byte(configData), config); err != nil { + return nil, fmt.Errorf("failed to parse ConfigMap config: %w", err) + } + } else { + return nil, fmt.Errorf("ConfigMap '%s' does not contain 'config' key", configMapName) + } + } + + // Load Secret if it exists (overrides ConfigMap) + if secretName != "" { + secret, err := clientset.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + // Secret is optional - only used for overrides + fmt.Printf("Warningf: Secret '%s' not found, using ConfigMap only\n", secretName) + } else { + if configData, ok := secret.Data["config"]; ok { + var secretConfig Config + if err := yaml.Unmarshal(configData, &secretConfig); err != nil { + return nil, fmt.Errorf("failed to parse Secret config: %w", err) + } + // Merge Secret config into base config (non-zero values override) + if err := mergo.Merge(config, secretConfig, mergo.WithOverride); err != nil { + return nil, fmt.Errorf("failed to merge Secret config: %w", err) + } + } + } + } + + // Validate the merged configuration + validate := validator.New() + if err := validate.Struct(config); err != nil { + return nil, fmt.Errorf("configuration validation failed: %w", err) + } + + return config, nil +} + +type Context struct { + Config *CLIConfig +} + +type CLIConfig struct { + Namespace string + Kubeconfig string + Debug bool + Quiet bool + ConfigMapName string + SecretName string + OutputFormat string // table, json +} + +func NewContext() *Context { + return &Context{ + Config: &CLIConfig{}, + } +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go new file mode 100644 index 0000000..544a1d3 --- /dev/null +++ b/internal/config/config_test.go @@ -0,0 +1,482 @@ +package config + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/go-playground/validator/v10" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +const invalidConfigYAML = ` +elasticsearch: + service: + name: "" + port: 0 +` + +// loadTestData loads test configuration from testdata files +func loadTestData(t *testing.T, filename string) string { + t.Helper() + data, err := os.ReadFile(filepath.Join("testdata", filename)) + require.NoError(t, err, "failed to read test data file: %s", filename) + return string(data) +} + +func TestLoadConfig_FromConfigMapOnly(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + validConfigYAML := loadTestData(t, "validConfigMapOnly.yaml") + + // Create ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "") + + // Assertions + require.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + assert.Equal(t, 9200, config.Elasticsearch.Service.Port) + assert.Equal(t, "sts-backup", config.Elasticsearch.SnapshotRepository.Name) + assert.Equal(t, "configmap-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) + assert.Equal(t, "configmap-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) +} + +func TestLoadConfig_CompleteConfiguration(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + validConfigYAML := loadTestData(t, "validConfigMapConfig.yaml") + secretOverrideYAML := loadTestData(t, "validSecretConfig.yaml") + + // Create ConfigMap with non-sensitive configuration + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create Secret with sensitive credentials + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-secret", + Namespace: "test-ns", + }, + Data: map[string][]byte{ + "config": []byte(secretOverrideYAML), + }, + } + _, err = fakeClient.CoreV1().Secrets("test-ns").Create( + context.Background(), secret, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config - production pattern: ConfigMap + Secret + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "backup-secret") + + // Comprehensive assertions + require.NoError(t, err) + assert.NotNil(t, config) + + // Service config + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + assert.Equal(t, 9200, config.Elasticsearch.Service.Port) + assert.Equal(t, 9200, config.Elasticsearch.Service.LocalPortForwardPort) + + // Restore config + assert.Equal(t, "observability.suse.com/scalable-during-es-restore=true", config.Elasticsearch.Restore.ScaleDownLabelSelector) + assert.Equal(t, "sts", config.Elasticsearch.Restore.IndexPrefix) + assert.Equal(t, ".ds-sts_k8s_logs", config.Elasticsearch.Restore.DatastreamIndexPrefix) + assert.Equal(t, "sts_k8s_logs", config.Elasticsearch.Restore.DatastreamName) + assert.Equal(t, "sts*,.ds-sts_k8s_logs*", config.Elasticsearch.Restore.IndicesPattern) + assert.Equal(t, "sts-backup", config.Elasticsearch.Restore.Repository) + + // Snapshot repository config + assert.Equal(t, "sts-backup", config.Elasticsearch.SnapshotRepository.Name) + assert.Equal(t, "sts-elasticsearch-backup", config.Elasticsearch.SnapshotRepository.Bucket) + assert.Equal(t, "suse-observability-minio:9000", config.Elasticsearch.SnapshotRepository.Endpoint) + assert.Equal(t, "", config.Elasticsearch.SnapshotRepository.BasePath) + // Credentials come from Secret + assert.Equal(t, "secret-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) + assert.Equal(t, "secret-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) + + // SLM config + assert.Equal(t, "auto-sts-backup", config.Elasticsearch.SLM.Name) + assert.Equal(t, "0 0 3 * * ?", config.Elasticsearch.SLM.Schedule) + assert.Equal(t, "", config.Elasticsearch.SLM.SnapshotTemplateName) + assert.Equal(t, "sts-backup", config.Elasticsearch.SLM.Repository) + assert.Equal(t, "sts*", config.Elasticsearch.SLM.Indices) + assert.Equal(t, "30d", config.Elasticsearch.SLM.RetentionExpireAfter) + assert.Equal(t, 5, config.Elasticsearch.SLM.RetentionMinCount) + assert.Equal(t, 30, config.Elasticsearch.SLM.RetentionMaxCount) +} + +func TestLoadConfig_WithSecretOverride(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + validConfigYAML := loadTestData(t, "validConfigMapOnly.yaml") + secretOverrideYAML := loadTestData(t, "validSecretConfig.yaml") + + // Create ConfigMap with credentials + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create Secret with different credentials + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-secret", + Namespace: "test-ns", + }, + Data: map[string][]byte{ + "config": []byte(secretOverrideYAML), + }, + } + _, err = fakeClient.CoreV1().Secrets("test-ns").Create( + context.Background(), secret, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "backup-secret") + + // Assertions - Secret should override ConfigMap credentials + require.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) + // Verify Secret overrides ConfigMap: secret-access-key overrides configmap-access-key + assert.Equal(t, "secret-access-key", config.Elasticsearch.SnapshotRepository.AccessKey) + assert.Equal(t, "secret-secret-key", config.Elasticsearch.SnapshotRepository.SecretKey) +} + +func TestLoadConfig_ConfigMapNotFound(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Try to load non-existent ConfigMap + config, err := LoadConfig(fakeClient, "test-ns", "nonexistent", "") + + // Assertions + assert.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "failed to get ConfigMap") +} + +func TestLoadConfig_ConfigMapMissingConfigKey(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + validConfigYAML := loadTestData(t, "validConfigMapOnly.yaml") + + // Create ConfigMap without 'config' key + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "wrong-key": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "") + + // Assertions + assert.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "does not contain 'config' key") +} + +func TestLoadConfig_InvalidYAML(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Create ConfigMap with invalid YAML + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": "invalid: yaml: content: [unclosed", + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "") + + // Assertions + assert.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "failed to parse ConfigMap config") +} + +func TestLoadConfig_ValidationFails(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Create ConfigMap with invalid config (missing required fields) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": invalidConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "") + + // Assertions + assert.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "configuration validation failed") +} + +func TestLoadConfig_SecretNotFoundWarning(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + validConfigYAML := loadTestData(t, "validConfigMapOnly.yaml") + + // Create only ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-config", + Namespace: "test-ns", + }, + Data: map[string]string{ + "config": validConfigYAML, + }, + } + _, err := fakeClient.CoreV1().ConfigMaps("test-ns").Create( + context.Background(), cm, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Load config with non-existent secret (should succeed with warning) + config, err := LoadConfig(fakeClient, "test-ns", "backup-config", "nonexistent-secret") + + // Assertions - should succeed as secret is optional + require.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "suse-observability-elasticsearch-master-headless", config.Elasticsearch.Service.Name) +} + +func TestLoadConfig_EmptyConfigMapName(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Try to load with empty ConfigMap name + config, err := LoadConfig(fakeClient, "test-ns", "", "") + + // Should fail - ConfigMap is required + assert.Error(t, err) + assert.Nil(t, config) +} + +func TestNewContext(t *testing.T) { + ctx := NewContext() + + assert.NotNil(t, ctx) + assert.NotNil(t, ctx.Config) + assert.Equal(t, "", ctx.Config.Namespace) + assert.Equal(t, "", ctx.Config.Kubeconfig) + assert.False(t, ctx.Config.Debug) + assert.False(t, ctx.Config.Quiet) + assert.Equal(t, "", ctx.Config.ConfigMapName) + assert.Equal(t, "", ctx.Config.SecretName) + assert.Equal(t, "", ctx.Config.OutputFormat) +} + +func TestCLIConfig_Defaults(t *testing.T) { + config := &CLIConfig{} + + // Verify zero values + assert.Equal(t, "", config.Namespace) + assert.Equal(t, "", config.Kubeconfig) + assert.False(t, config.Debug) + assert.False(t, config.Quiet) + assert.Equal(t, "", config.ConfigMapName) + assert.Equal(t, "", config.SecretName) + assert.Equal(t, "", config.OutputFormat) +} + +//nolint:funlen +func TestConfig_StructValidation(t *testing.T) { + tests := []struct { + name string + config *Config + expectError bool + }{ + { + name: "valid config", + config: &Config{ + Elasticsearch: ElasticsearchConfig{ + Service: ServiceConfig{ + Name: "es-master", + Port: 9200, + LocalPortForwardPort: 9200, + }, + Restore: RestoreConfig{ + ScaleDownLabelSelector: "app=test", + IndexPrefix: "sts_", + DatastreamIndexPrefix: "sts_k8s", + DatastreamName: "sts_k8s", + IndicesPattern: "*", + Repository: "repo", + }, + SnapshotRepository: SnapshotRepositoryConfig{ + Name: "repo", + Bucket: "bucket", + Endpoint: "endpoint", + AccessKey: "key", + SecretKey: "secret", + }, + SLM: SLMConfig{ + Name: "slm", + Schedule: "0 0 * * *", + SnapshotTemplateName: "snap", + Repository: "repo", + Indices: "*", + RetentionExpireAfter: "30d", + RetentionMinCount: 1, + RetentionMaxCount: 10, + }, + }, + }, + expectError: false, + }, + { + name: "invalid port number", + config: &Config{ + Elasticsearch: ElasticsearchConfig{ + Service: ServiceConfig{ + Name: "es-master", + Port: 0, // Invalid + LocalPortForwardPort: 9200, + }, + Restore: RestoreConfig{ + ScaleDownLabelSelector: "app=test", + IndexPrefix: "sts_", + DatastreamIndexPrefix: "sts_k8s", + DatastreamName: "sts_k8s", + IndicesPattern: "*", + Repository: "repo", + }, + SnapshotRepository: SnapshotRepositoryConfig{ + Name: "repo", + Bucket: "bucket", + Endpoint: "endpoint", + AccessKey: "key", + SecretKey: "secret", + }, + SLM: SLMConfig{ + Name: "slm", + Schedule: "0 0 * * *", + SnapshotTemplateName: "snap", + Repository: "repo", + Indices: "*", + RetentionExpireAfter: "30d", + RetentionMinCount: 1, + RetentionMaxCount: 10, + }, + }, + }, + expectError: true, + }, + { + name: "invalid retention count", + config: &Config{ + Elasticsearch: ElasticsearchConfig{ + Service: ServiceConfig{ + Name: "es-master", + Port: 9200, + LocalPortForwardPort: 9200, + }, + Restore: RestoreConfig{ + ScaleDownLabelSelector: "app=test", + IndexPrefix: "sts_", + DatastreamIndexPrefix: "sts_k8s", + DatastreamName: "sts_k8s", + IndicesPattern: "*", + Repository: "repo", + }, + SnapshotRepository: SnapshotRepositoryConfig{ + Name: "repo", + Bucket: "bucket", + Endpoint: "endpoint", + AccessKey: "key", + SecretKey: "secret", + }, + SLM: SLMConfig{ + Name: "slm", + Schedule: "0 0 * * *", + SnapshotTemplateName: "snap", + Repository: "repo", + Indices: "*", + RetentionExpireAfter: "30d", + RetentionMinCount: 0, // Invalid - must be >= 1 + RetentionMaxCount: 10, + }, + }, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Use validator directly to test struct validation + validate := validator.New() + err := validate.Struct(tt.config) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/internal/config/testdata/validConfigMapConfig.yaml b/internal/config/testdata/validConfigMapConfig.yaml new file mode 100644 index 0000000..f538995 --- /dev/null +++ b/internal/config/testdata/validConfigMapConfig.yaml @@ -0,0 +1,62 @@ +# Valid ConfigMap Configuration for StackState Backup CLI +# This file contains the main configuration for Elasticsearch backup and restore operations. +# It is typically stored in a Kubernetes ConfigMap. + +elasticsearch: + # Snapshot repository configuration for S3-compatible storage (Minio) + snapshotRepository: + # Name of the Elasticsearch snapshot repository + name: sts-backup + # S3 bucket name where snapshots will be stored + bucket: sts-elasticsearch-backup + # Minio/S3 endpoint (hostname:port) + endpoint: suse-observability-minio:9000 + # Base path within the bucket for snapshots (empty string for root) + basepath: "" + + # Snapshot Lifecycle Management (SLM) policy configuration + # SLM automates snapshot creation on a schedule + slm: + # Name of the SLM policy + name: auto-sts-backup + # Cron schedule for automatic snapshots (Quartz format: "second minute hour day month weekday") + # Example: "0 0 3 * * ?" = daily at 3:00 AM + schedule: "0 0 3 * * ?" + # Template for snapshot names (supports Elasticsearch date math) + # Example: "" creates snapshots like "sts-backup-20240115-0300" + snapshotTemplateName: "" + # Repository to store snapshots (must match snapshotRepository.name) + repository: sts-backup + # Indices pattern to include in snapshots (glob pattern) + indices: "sts*" + # Retention policy: delete snapshots older than this duration (e.g., 30d, 7d, 90d) + retentionExpireAfter: 30d + # Retention policy: minimum number of snapshots to keep (even if expired) + retentionMinCount: 5 + # Retention policy: maximum number of snapshots to keep + retentionMaxCount: 30 + + # Elasticsearch service connection details + service: + # Name of the Elasticsearch service in Kubernetes + name: suse-observability-elasticsearch-master-headless + # Port number for Elasticsearch HTTP API + port: 9200 + # Local port to use for port-forwarding (can be same as port) + localPortForwardPort: 9200 + + # Restore operation configuration + restore: + # Snapshot repository to restore from (must match snapshotRepository.name) + repository: sts-backup + # Kubernetes label selector for deployments to scale down during restore + # Example: "observability.suse.com/scalable-during-es-restore=true" + scaleDownLabelSelector: "observability.suse.com/scalable-during-es-restore=true" + # Prefix for regular indices to filter during restore operations + indexPrefix: sts + # Prefix for datastream indices (datastreams use pattern: .ds-{name}-{generation}) + datastreamIndexPrefix: .ds-sts_k8s_logs + # Name of the datastream (used for rollover operations) + datastreamName: sts_k8s_logs + # Pattern for indices to restore from snapshot (comma-separated glob patterns) + indicesPattern: sts*,.ds-sts_k8s_logs* diff --git a/internal/config/testdata/validConfigMapOnly.yaml b/internal/config/testdata/validConfigMapOnly.yaml new file mode 100644 index 0000000..a7b3415 --- /dev/null +++ b/internal/config/testdata/validConfigMapOnly.yaml @@ -0,0 +1,69 @@ +# Valid ConfigMap-Only Configuration for StackState Backup CLI +# This file contains a complete configuration including credentials in the ConfigMap. +# Use this for tests that don't involve Secret overrides. +# In production, credentials should always be stored in Secrets, not ConfigMaps. + +elasticsearch: + # Snapshot repository configuration for S3-compatible storage (Minio) + snapshotRepository: + # Name of the Elasticsearch snapshot repository + name: sts-backup + # S3 bucket name where snapshots will be stored + bucket: sts-elasticsearch-backup + # Minio/S3 endpoint (hostname:port) + endpoint: suse-observability-minio:9000 + # Base path within the bucket for snapshots (empty string for root) + basepath: "" + # Access key for S3/Minio - included here for testing only + # In production, use Secret overrides instead + accessKey: configmap-access-key + # Secret key for S3/Minio - included here for testing only + # In production, use Secret overrides instead + secretKey: configmap-secret-key + + # Snapshot Lifecycle Management (SLM) policy configuration + # SLM automates snapshot creation on a schedule + slm: + # Name of the SLM policy + name: auto-sts-backup + # Cron schedule for automatic snapshots (Quartz format: "second minute hour day month weekday") + # Example: "0 0 3 * * ?" = daily at 3:00 AM + schedule: "0 0 3 * * ?" + # Template for snapshot names (supports Elasticsearch date math) + # Example: "" creates snapshots like "sts-backup-20240115-0300" + snapshotTemplateName: "" + # Repository to store snapshots (must match snapshotRepository.name) + repository: sts-backup + # Indices pattern to include in snapshots (glob pattern) + indices: "sts*" + # Retention policy: delete snapshots older than this duration (e.g., 30d, 7d, 90d) + retentionExpireAfter: 30d + # Retention policy: minimum number of snapshots to keep (even if expired) + retentionMinCount: 5 + # Retention policy: maximum number of snapshots to keep + retentionMaxCount: 30 + + # Elasticsearch service connection details + service: + # Name of the Elasticsearch service in Kubernetes + name: suse-observability-elasticsearch-master-headless + # Port number for Elasticsearch HTTP API + port: 9200 + # Local port to use for port-forwarding (can be same as port) + localPortForwardPort: 9200 + + # Restore operation configuration + restore: + # Snapshot repository to restore from (must match snapshotRepository.name) + repository: sts-backup + # Kubernetes label selector for deployments to scale down during restore + # Example: "observability.suse.com/scalable-during-es-restore=true" + scaleDownLabelSelector: "observability.suse.com/scalable-during-es-restore=true" + # Prefix for regular indices to filter during restore operations + indexPrefix: sts + # Prefix for datastream indices (datastreams use pattern: .ds-{name}-{generation}) + datastreamIndexPrefix: .ds-sts_k8s_logs + # Name of the datastream (used for rollover operations) + datastreamName: sts_k8s_logs + # Pattern for indices to restore from snapshot (comma-separated glob patterns) + indicesPattern: sts*,.ds-sts_k8s_logs* \ No newline at end of file diff --git a/internal/config/testdata/validSecretConfig.yaml b/internal/config/testdata/validSecretConfig.yaml new file mode 100644 index 0000000..a41c8c0 --- /dev/null +++ b/internal/config/testdata/validSecretConfig.yaml @@ -0,0 +1,15 @@ +# Valid Secret Configuration for StackState Backup CLI +# This file contains sensitive credentials for S3/Minio access. +# It is typically stored in a Kubernetes Secret and overrides values from the ConfigMap. +# +# Only the fields specified here will override the ConfigMap values. +# All other configuration remains unchanged. + +elasticsearch: + snapshotRepository: + # S3/Minio access key (overrides ConfigMap value if present) + # This credential is used to authenticate with the S3-compatible storage + accessKey: secret-access-key + # S3/Minio secret key (overrides ConfigMap value if present) + # Keep this value secure - it should never be committed to ConfigMaps + secretKey: secret-secret-key diff --git a/internal/elasticsearch/client.go b/internal/elasticsearch/client.go new file mode 100644 index 0000000..74b20f3 --- /dev/null +++ b/internal/elasticsearch/client.go @@ -0,0 +1,350 @@ +// Package elasticsearch provides a client for interacting with Elasticsearch +// including snapshot management, index operations, and SLM policy configuration. +package elasticsearch + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/elastic/go-elasticsearch/v8" +) + +// Client represents an Elasticsearch client +type Client struct { + es *elasticsearch.Client +} + +// IndexInfo represents detailed information about an Elasticsearch index +type IndexInfo struct { + Health string `json:"health"` + Status string `json:"status"` + Index string `json:"index"` + UUID string `json:"uuid"` + Pri string `json:"pri"` + Rep string `json:"rep"` + DocsCount string `json:"docs.count"` + DocsDeleted string `json:"docs.deleted"` + StoreSize string `json:"store.size"` + PriStoreSize string `json:"pri.store.size"` + DatasetSize string `json:"dataset.size"` +} + +// Snapshot represents an Elasticsearch snapshot +type Snapshot struct { + Snapshot string `json:"snapshot"` + UUID string `json:"uuid"` + Repository string `json:"repository"` + State string `json:"state"` + StartTime string `json:"start_time"` + StartTimeMillis int64 `json:"start_time_in_millis"` + EndTime string `json:"end_time"` + EndTimeMillis int64 `json:"end_time_in_millis"` + DurationInMillis int64 `json:"duration_in_millis"` + Indices []string `json:"indices"` + Failures []string `json:"failures"` + Shards struct { + Total int `json:"total"` + Failed int `json:"failed"` + Successful int `json:"successful"` + } `json:"shards"` +} + +// SnapshotsResponse represents the response from Elasticsearch snapshots API +type SnapshotsResponse struct { + Snapshots []Snapshot `json:"snapshots"` + Total int `json:"total"` + Remaining int `json:"remaining"` +} + +// NewClient creates a new Elasticsearch client +func NewClient(baseURL string) (*Client, error) { + cfg := elasticsearch.Config{ + Addresses: []string{baseURL}, + } + + es, err := elasticsearch.NewClient(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create Elasticsearch client: %w", err) + } + + return &Client{ + es: es, + }, nil +} + +// ListSnapshots retrieves all snapshots from a repository +func (c *Client) ListSnapshots(repository string) ([]Snapshot, error) { + res, err := c.es.Snapshot.Get( + repository, + []string{"_all"}, + c.es.Snapshot.Get.WithContext(context.Background()), + ) + if err != nil { + return nil, fmt.Errorf("failed to get snapshots: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return nil, fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + var snapshotsResp SnapshotsResponse + if err := json.NewDecoder(res.Body).Decode(&snapshotsResp); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return snapshotsResp.Snapshots, nil +} + +// GetSnapshot retrieves details of a specific snapshot including its indices +func (c *Client) GetSnapshot(repository, snapshotName string) (*Snapshot, error) { + res, err := c.es.Snapshot.Get( + repository, + []string{snapshotName}, + c.es.Snapshot.Get.WithContext(context.Background()), + ) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return nil, fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + var snapshotsResp SnapshotsResponse + if err := json.NewDecoder(res.Body).Decode(&snapshotsResp); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + if len(snapshotsResp.Snapshots) == 0 { + return nil, fmt.Errorf("snapshot %s not found", snapshotName) + } + + return &snapshotsResp.Snapshots[0], nil +} + +// ListIndices retrieves all indices matching a pattern +func (c *Client) ListIndices(pattern string) ([]string, error) { + res, err := c.es.Cat.Indices( + c.es.Cat.Indices.WithContext(context.Background()), + c.es.Cat.Indices.WithIndex(pattern), + c.es.Cat.Indices.WithH("index"), + c.es.Cat.Indices.WithFormat("json"), + ) + if err != nil { + return nil, fmt.Errorf("failed to list indices: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return nil, fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + var indices []struct { + Index string `json:"index"` + } + if err := json.NewDecoder(res.Body).Decode(&indices); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + result := make([]string, len(indices)) + for i, idx := range indices { + result[i] = idx.Index + } + + return result, nil +} + +// ListIndicesDetailed retrieves detailed information about all indices +func (c *Client) ListIndicesDetailed() ([]IndexInfo, error) { + res, err := c.es.Cat.Indices( + c.es.Cat.Indices.WithContext(context.Background()), + c.es.Cat.Indices.WithH("health,status,index,uuid,pri,rep,docs.count,docs.deleted,store.size,pri.store.size,dataset.size"), + c.es.Cat.Indices.WithFormat("json"), + ) + if err != nil { + return nil, fmt.Errorf("failed to list indices: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return nil, fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + var indices []IndexInfo + if err := json.NewDecoder(res.Body).Decode(&indices); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return indices, nil +} + +// DeleteIndex deletes a specific index +func (c *Client) DeleteIndex(index string) error { + res, err := c.es.Indices.Delete( + []string{index}, + c.es.Indices.Delete.WithContext(context.Background()), + ) + if err != nil { + return fmt.Errorf("failed to delete index: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + return nil +} + +// IndexExists checks if an index exists +func (c *Client) IndexExists(index string) (bool, error) { + res, err := c.es.Indices.Exists( + []string{index}, + c.es.Indices.Exists.WithContext(context.Background()), + ) + if err != nil { + return false, fmt.Errorf("failed to check index existence: %w", err) + } + defer res.Body.Close() + + if res.StatusCode == http.StatusNotFound { + return false, nil + } + + if res.IsError() { + return false, fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + return true, nil +} + +// RolloverDatastream performs a rollover on a datastream +func (c *Client) RolloverDatastream(datastreamName string) error { + res, err := c.es.Indices.Rollover( + datastreamName, + c.es.Indices.Rollover.WithContext(context.Background()), + ) + if err != nil { + return fmt.Errorf("failed to rollover datastream: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + return nil +} + +// ConfigureSnapshotRepository configures an S3 snapshot repository +func (c *Client) ConfigureSnapshotRepository(name, bucket, endpoint, basePath, accessKey, secretKey string) error { + body := map[string]interface{}{ + "type": "s3", + "settings": map[string]interface{}{ + "bucket": bucket, + "region": "minio", + "endpoint": endpoint, + "base_path": basePath, + "protocol": "http", + "access_key": accessKey, + "secret_key": secretKey, + "path_style_access": "true", + }, + } + + bodyJSON, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("failed to marshal request body: %w", err) + } + + res, err := c.es.Snapshot.CreateRepository( + name, + strings.NewReader(string(bodyJSON)), + c.es.Snapshot.CreateRepository.WithContext(context.Background()), + ) + if err != nil { + return fmt.Errorf("failed to create snapshot repository: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + return nil +} + +// ConfigureSLMPolicy configures a Snapshot Lifecycle Management policy +func (c *Client) ConfigureSLMPolicy(name, schedule, snapshotName, repository, indices, expireAfter string, minCount, maxCount int) error { + body := map[string]interface{}{ + "schedule": schedule, + "name": snapshotName, + "repository": repository, + "config": map[string]interface{}{ + "indices": indices, + "ignore_unavailable": false, + "include_global_state": false, + }, + "retention": map[string]interface{}{ + "expire_after": expireAfter, + "min_count": minCount, + "max_count": maxCount, + }, + } + + bodyJSON, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("failed to marshal request body: %w", err) + } + + res, err := c.es.SlmPutLifecycle( + name, + c.es.SlmPutLifecycle.WithContext(context.Background()), + c.es.SlmPutLifecycle.WithBody(strings.NewReader(string(bodyJSON))), + ) + if err != nil { + return fmt.Errorf("failed to create SLM policy: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + return nil +} + +// RestoreSnapshot restores a snapshot from a repository +func (c *Client) RestoreSnapshot(repository, snapshotName, indicesPattern string, waitForCompletion bool) error { + body := map[string]interface{}{ + "indices": indicesPattern, + } + + bodyJSON, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("failed to marshal request body: %w", err) + } + + res, err := c.es.Snapshot.Restore( + repository, + snapshotName, + c.es.Snapshot.Restore.WithContext(context.Background()), + c.es.Snapshot.Restore.WithBody(strings.NewReader(string(bodyJSON))), + c.es.Snapshot.Restore.WithWaitForCompletion(waitForCompletion), + ) + if err != nil { + return fmt.Errorf("failed to restore snapshot: %w", err) + } + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("elasticsearch returned error: %s", res.String()) + } + + return nil +} diff --git a/internal/elasticsearch/client_test.go b/internal/elasticsearch/client_test.go new file mode 100644 index 0000000..a347721 --- /dev/null +++ b/internal/elasticsearch/client_test.go @@ -0,0 +1,424 @@ +package elasticsearch + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockESServer creates a test HTTP server with Elasticsearch headers +func mockESServer(handler http.HandlerFunc) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add Elasticsearch headers for client validation + w.Header().Set("X-Elastic-Product", "Elasticsearch") + handler(w, r) + })) +} + +func TestClient_ListSnapshots(t *testing.T) { + tests := []struct { + name string + repository string + responseBody string + responseStatus int + expectedCount int + expectError bool + }{ + { + name: "successful list with multiple snapshots", + repository: "test-repo", + responseStatus: http.StatusOK, + responseBody: `{ + "snapshots": [ + { + "snapshot": "snapshot-2024-01-01", + "uuid": "uuid-1", + "repository": "test-repo", + "state": "SUCCESS" + }, + { + "snapshot": "snapshot-2024-01-02", + "uuid": "uuid-2", + "repository": "test-repo", + "state": "SUCCESS" + } + ], + "total": 2, + "remaining": 0 + }`, + expectedCount: 2, + expectError: false, + }, + { + name: "empty snapshot list", + repository: "empty-repo", + responseStatus: http.StatusOK, + responseBody: `{ + "snapshots": [], + "total": 0, + "remaining": 0 + }`, + expectedCount: 0, + expectError: false, + }, + { + name: "elasticsearch returns error", + repository: "bad-repo", + responseStatus: http.StatusNotFound, + responseBody: `{"error": "repository not found"}`, + expectedCount: 0, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test server + server := mockESServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Verify request path + expectedPath := "/_snapshot/" + tt.repository + "/_all" + assert.Equal(t, expectedPath, r.URL.Path) + assert.Equal(t, http.MethodGet, r.Method) + + w.WriteHeader(tt.responseStatus) + _, _ = w.Write([]byte(tt.responseBody)) + })) + defer server.Close() + + // Create client + client, err := NewClient(server.URL) + require.NoError(t, err) + + // Execute test + snapshots, err := client.ListSnapshots(tt.repository) + + // Assertions + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expectedCount, len(snapshots)) + + // Verify snapshot details if any + if tt.expectedCount > 0 { + assert.Equal(t, "snapshot-2024-01-01", snapshots[0].Snapshot) + assert.Equal(t, tt.repository, snapshots[0].Repository) + } + }) + } +} + +func TestClient_GetSnapshot(t *testing.T) { + tests := []struct { + name string + repository string + snapshotName string + responseBody string + responseStatus int + expectError bool + }{ + { + name: "successful get snapshot", + repository: "test-repo", + snapshotName: "snapshot-2024-01-01", + responseStatus: http.StatusOK, + responseBody: `{ + "snapshots": [ + { + "snapshot": "snapshot-2024-01-01", + "uuid": "uuid-1", + "repository": "test-repo", + "state": "SUCCESS", + "indices": ["index-1", "index-2"] + } + ] + }`, + expectError: false, + }, + { + name: "snapshot not found", + repository: "test-repo", + snapshotName: "nonexistent", + responseStatus: http.StatusOK, + responseBody: `{ + "snapshots": [] + }`, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test server + server := mockESServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedPath := "/_snapshot/" + tt.repository + "/" + tt.snapshotName + assert.Equal(t, expectedPath, r.URL.Path) + + w.WriteHeader(tt.responseStatus) + _, _ = w.Write([]byte(tt.responseBody)) + })) + defer server.Close() + + // Create client + client, err := NewClient(server.URL) + require.NoError(t, err) + + // Execute test + snapshot, err := client.GetSnapshot(tt.repository, tt.snapshotName) + + // Assertions + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.NotNil(t, snapshot) + assert.Equal(t, tt.snapshotName, snapshot.Snapshot) + assert.Equal(t, tt.repository, snapshot.Repository) + }) + } +} + +func TestClient_ListIndices(t *testing.T) { + tests := []struct { + name string + pattern string + responseBody string + responseStatus int + expectedCount int + expectError bool + }{ + { + name: "list all indices", + pattern: "*", + responseStatus: http.StatusOK, + responseBody: `[ + {"index": "index-1"}, + {"index": "index-2"}, + {"index": "index-3"} + ]`, + expectedCount: 3, + expectError: false, + }, + { + name: "list specific pattern", + pattern: "logs-*", + responseStatus: http.StatusOK, + responseBody: `[ + {"index": "logs-2024-01"}, + {"index": "logs-2024-02"} + ]`, + expectedCount: 2, + expectError: false, + }, + { + name: "no indices found", + pattern: "nonexistent-*", + responseStatus: http.StatusOK, + responseBody: `[]`, + expectedCount: 0, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test server + server := mockESServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/_cat/indices/"+tt.pattern, r.URL.Path) + assert.Equal(t, "json", r.URL.Query().Get("format")) + assert.Equal(t, "index", r.URL.Query().Get("h")) + + w.WriteHeader(tt.responseStatus) + _, _ = w.Write([]byte(tt.responseBody)) + })) + defer server.Close() + + // Create client + client, err := NewClient(server.URL) + require.NoError(t, err) + + // Execute test + indices, err := client.ListIndices(tt.pattern) + + // Assertions + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expectedCount, len(indices)) + }) + } +} + +func TestClient_DeleteIndex(t *testing.T) { + tests := []struct { + name string + index string + responseStatus int + expectError bool + }{ + { + name: "successful delete", + index: "test-index", + responseStatus: http.StatusOK, + expectError: false, + }, + { + name: "index not found", + index: "nonexistent", + responseStatus: http.StatusNotFound, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test server + server := mockESServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/"+tt.index, r.URL.Path) + assert.Equal(t, http.MethodDelete, r.Method) + + w.WriteHeader(tt.responseStatus) + })) + defer server.Close() + + // Create client + client, err := NewClient(server.URL) + require.NoError(t, err) + + // Execute test + err = client.DeleteIndex(tt.index) + + // Assertions + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestClient_IndexExists(t *testing.T) { + tests := []struct { + name string + index string + responseStatus int + expectedExists bool + }{ + { + name: "index exists", + index: "existing-index", + responseStatus: http.StatusOK, + expectedExists: true, + }, + { + name: "index does not exist", + index: "nonexistent-index", + responseStatus: http.StatusNotFound, + expectedExists: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test server + server := mockESServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/"+tt.index, r.URL.Path) + assert.Equal(t, http.MethodHead, r.Method) + + w.WriteHeader(tt.responseStatus) + })) + defer server.Close() + + // Create client + client, err := NewClient(server.URL) + require.NoError(t, err) + + // Execute test + exists, err := client.IndexExists(tt.index) + + // Assertions + require.NoError(t, err) + assert.Equal(t, tt.expectedExists, exists) + }) + } +} + +func TestClient_RestoreSnapshot(t *testing.T) { + tests := []struct { + name string + repository string + snapshotName string + indicesPattern string + waitForCompletion bool + responseStatus int + expectError bool + }{ + { + name: "successful restore", + repository: "test-repo", + snapshotName: "snapshot-2024-01-01", + indicesPattern: "*", + waitForCompletion: true, + responseStatus: http.StatusOK, + expectError: false, + }, + { + name: "snapshot not found", + repository: "test-repo", + snapshotName: "nonexistent", + indicesPattern: "*", + waitForCompletion: false, + responseStatus: http.StatusNotFound, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test server + server := mockESServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedPath := "/_snapshot/" + tt.repository + "/" + tt.snapshotName + "/_restore" + assert.Equal(t, expectedPath, r.URL.Path) + assert.Equal(t, http.MethodPost, r.Method) + + if tt.waitForCompletion { + assert.Equal(t, "true", r.URL.Query().Get("wait_for_completion")) + } + + w.WriteHeader(tt.responseStatus) + })) + defer server.Close() + + // Create client + client, err := NewClient(server.URL) + require.NoError(t, err) + + // Execute test + err = client.RestoreSnapshot(tt.repository, tt.snapshotName, tt.indicesPattern, tt.waitForCompletion) + + // Assertions + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestNewClient(t *testing.T) { + client, err := NewClient("http://localhost:9200") + require.NoError(t, err) + assert.NotNil(t, client) +} diff --git a/internal/elasticsearch/interface.go b/internal/elasticsearch/interface.go new file mode 100644 index 0000000..40ecdea --- /dev/null +++ b/internal/elasticsearch/interface.go @@ -0,0 +1,26 @@ +package elasticsearch + +// Interface defines the contract for Elasticsearch client operations +// This interface allows for easy mocking in tests +type Interface interface { + // Snapshot operations + ListSnapshots(repository string) ([]Snapshot, error) + GetSnapshot(repository, snapshotName string) (*Snapshot, error) + RestoreSnapshot(repository, snapshotName, indicesPattern string, waitForCompletion bool) error + + // Index operations + ListIndices(pattern string) ([]string, error) + ListIndicesDetailed() ([]IndexInfo, error) + DeleteIndex(index string) error + IndexExists(index string) (bool, error) + + // Datastream operations + RolloverDatastream(datastreamName string) error + + // Repository and SLM operations + ConfigureSnapshotRepository(name, bucket, endpoint, basePath, accessKey, secretKey string) error + ConfigureSLMPolicy(name, schedule, snapshotName, repository, indices, expireAfter string, minCount, maxCount int) error +} + +// Ensure *Client implements Interface +var _ Interface = (*Client)(nil) diff --git a/internal/k8s/client.go b/internal/k8s/client.go new file mode 100644 index 0000000..6e72ebf --- /dev/null +++ b/internal/k8s/client.go @@ -0,0 +1,231 @@ +// Package k8s provides Kubernetes client functionality including +// port-forwarding, deployment scaling, and service discovery. +package k8s + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +// Client wraps the Kubernetes clientset +type Client struct { + clientset kubernetes.Interface + restConfig *rest.Config + debug bool +} + +// Clientset returns the underlying Kubernetes clientset +func (c *Client) Clientset() kubernetes.Interface { + return c.clientset +} + +// NewClient creates a new Kubernetes client +func NewClient(kubeconfigPath string, debug bool) (*Client, error) { + if kubeconfigPath == "" { + // Use default kubeconfig location + home, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to get home directory: %w", err) + } + kubeconfigPath = filepath.Join(home, ".kube", "config") + } + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return nil, fmt.Errorf("failed to build config: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to create clientset: %w", err) + } + + return &Client{ + clientset: clientset, + restConfig: config, + debug: debug, + }, nil +} + +// PortForwardService creates a port-forward to a Kubernetes service +func (c *Client) PortForwardService(namespace, serviceName string, localPort, remotePort int) (chan struct{}, chan struct{}, error) { + ctx := context.Background() + + // Get service to find pods + svc, err := c.clientset.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to get service: %w", err) + } + + // Find pod matching service selector + podList, err := c.clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{ + MatchLabels: svc.Spec.Selector, + }), + }) + if err != nil { + return nil, nil, fmt.Errorf("failed to list pods: %w", err) + } + + if len(podList.Items) == 0 { + return nil, nil, fmt.Errorf("no pods found for service %s", serviceName) + } + + // Find a running pod + var targetPod *corev1.Pod + for i := range podList.Items { + if podList.Items[i].Status.Phase == corev1.PodRunning { + targetPod = &podList.Items[i] + break + } + } + + if targetPod == nil { + return nil, nil, fmt.Errorf("no running pods found for service %s", serviceName) + } + // Setup port-forward + return c.PortForwardPod(namespace, targetPod.Name, localPort, remotePort) +} + +// PortForwardPod creates a port-forward to a specific pod +func (c *Client) PortForwardPod(namespace, podName string, localPort, remotePort int) (chan struct{}, chan struct{}, error) { + path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName) + hostIP := c.restConfig.Host + url, err := url.Parse(hostIP) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse host: %w", err) + } + url.Path = path + + transport, upgrader, err := spdy.RoundTripperFor(c.restConfig) + if err != nil { + return nil, nil, fmt.Errorf("failed to create round tripper: %w", err) + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, url) + + stopChan := make(chan struct{}, 1) + readyChan := make(chan struct{}) + + ports := []string{fmt.Sprintf("%d:%d", localPort, remotePort)} + + // Use discard writers if debug is disabled to suppress port-forward output + outWriter := io.Discard + errWriter := io.Discard + if c.debug { + outWriter = os.Stdout + errWriter = os.Stderr + } + + fw, err := portforward.New(dialer, ports, stopChan, readyChan, outWriter, errWriter) + if err != nil { + return nil, nil, fmt.Errorf("failed to create port forwarder: %w", err) + } + + go func() { + if err := fw.ForwardPorts(); err != nil { + if c.debug { + fmt.Fprintf(os.Stderr, "Port forward error: %v\n", err) + } + } + }() + + return stopChan, readyChan, nil +} + +// DeploymentScale holds the name and original replica count of a deployment +type DeploymentScale struct { + Name string + Replicas int32 +} + +// ScaleDownDeployments scales down deployments matching a label selector to 0 replicas +// Returns a map of deployment names to their original replica counts +func (c *Client) ScaleDownDeployments(namespace, labelSelector string) ([]DeploymentScale, error) { + ctx := context.Background() + + // List deployments matching the label selector + deployments, err := c.clientset.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return nil, fmt.Errorf("failed to list deployments: %w", err) + } + + if len(deployments.Items) == 0 { + return []DeploymentScale{}, nil + } + + var scaledDeployments []DeploymentScale + + // Scale down each deployment + for _, deployment := range deployments.Items { + originalReplicas := int32(0) + if deployment.Spec.Replicas != nil { + originalReplicas = *deployment.Spec.Replicas + } + + // Store original replica count + scaledDeployments = append(scaledDeployments, DeploymentScale{ + Name: deployment.Name, + Replicas: originalReplicas, + }) + + // Scale to 0 if not already at 0 + if originalReplicas > 0 { + replicas := int32(0) + deployment.Spec.Replicas = &replicas + + _, err := c.clientset.AppsV1().Deployments(namespace).Update(ctx, &deployment, metav1.UpdateOptions{}) + if err != nil { + return scaledDeployments, fmt.Errorf("failed to scale down deployment %s: %w", deployment.Name, err) + } + } + } + + return scaledDeployments, nil +} + +// ScaleUpDeployments restores deployments to their original replica counts +func (c *Client) ScaleUpDeployments(namespace string, deploymentScales []DeploymentScale) error { + ctx := context.Background() + + for _, scale := range deploymentScales { + deployment, err := c.clientset.AppsV1().Deployments(namespace).Get(ctx, scale.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %s: %w", scale.Name, err) + } + + deployment.Spec.Replicas = &scale.Replicas + + _, err = c.clientset.AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to scale up deployment %s: %w", scale.Name, err) + } + } + + return nil +} + +// NewTestClient creates a k8s Client for testing with a fake clientset. +// This function is exported so it can be used in other package tests. +func NewTestClient(clientset kubernetes.Interface) *Client { + return &Client{ + clientset: clientset, + restConfig: nil, + debug: false, + } +} diff --git a/internal/k8s/client_test.go b/internal/k8s/client_test.go new file mode 100644 index 0000000..6868624 --- /dev/null +++ b/internal/k8s/client_test.go @@ -0,0 +1,328 @@ +package k8s + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestClient_ScaleDownDeployments(t *testing.T) { + tests := []struct { + name string + namespace string + labelSelector string + deployments []appsv1.Deployment + expectedScales []DeploymentScale + expectError bool + }{ + { + name: "scale down multiple deployments", + namespace: "test-ns", + labelSelector: "app=test", + deployments: []appsv1.Deployment{ + createDeployment("deploy1", "test-ns", map[string]string{"app": "test"}, 3), + createDeployment("deploy2", "test-ns", map[string]string{"app": "test"}, 5), + }, + expectedScales: []DeploymentScale{ + {Name: "deploy1", Replicas: 3}, + {Name: "deploy2", Replicas: 5}, + }, + expectError: false, + }, + { + name: "scale down deployment with zero replicas", + namespace: "test-ns", + labelSelector: "app=test", + deployments: []appsv1.Deployment{ + createDeployment("deploy1", "test-ns", map[string]string{"app": "test"}, 0), + }, + expectedScales: []DeploymentScale{ + {Name: "deploy1", Replicas: 0}, + }, + expectError: false, + }, + { + name: "no deployments matching selector", + namespace: "test-ns", + labelSelector: "app=nonexistent", + deployments: []appsv1.Deployment{}, + expectedScales: []DeploymentScale{}, + expectError: false, + }, + { + name: "deployments with different labels not selected", + namespace: "test-ns", + labelSelector: "app=test", + deployments: []appsv1.Deployment{ + createDeployment("deploy1", "test-ns", map[string]string{"app": "test"}, 3), + createDeployment("deploy2", "test-ns", map[string]string{"app": "other"}, 2), + }, + expectedScales: []DeploymentScale{ + {Name: "deploy1", Replicas: 3}, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake clientset with test deployments + fakeClient := fake.NewSimpleClientset() + for _, deploy := range tt.deployments { + _, err := fakeClient.AppsV1().Deployments(tt.namespace).Create( + context.Background(), &deploy, metav1.CreateOptions{}, + ) + require.NoError(t, err) + } + + // Create our client wrapper + client := &Client{ + clientset: fakeClient, + } + + // Execute scale down + scales, err := client.ScaleDownDeployments(tt.namespace, tt.labelSelector) + + // Assertions + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, len(tt.expectedScales), len(scales)) + + // Verify each scaled deployment + for i, expectedScale := range tt.expectedScales { + assert.Equal(t, expectedScale.Name, scales[i].Name) + assert.Equal(t, expectedScale.Replicas, scales[i].Replicas) + + // Verify the deployment was actually scaled to 0 + deploy, err := fakeClient.AppsV1().Deployments(tt.namespace).Get( + context.Background(), expectedScale.Name, metav1.GetOptions{}, + ) + require.NoError(t, err) + if expectedScale.Replicas > 0 { + assert.Equal(t, int32(0), *deploy.Spec.Replicas, "deployment should be scaled to 0") + } + } + }) + } +} + +func TestClient_ScaleUpDeployments(t *testing.T) { + tests := []struct { + name string + namespace string + initialReplicas int32 + scaleToReplicas int32 + deploymentName string + expectError bool + }{ + { + name: "scale up from zero to three", + namespace: "test-ns", + initialReplicas: 0, + scaleToReplicas: 3, + deploymentName: "test-deploy", + expectError: false, + }, + { + name: "scale up from two to five", + namespace: "test-ns", + initialReplicas: 2, + scaleToReplicas: 5, + deploymentName: "test-deploy", + expectError: false, + }, + { + name: "restore to zero replicas", + namespace: "test-ns", + initialReplicas: 3, + scaleToReplicas: 0, + deploymentName: "test-deploy", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake clientset with deployment at initial scale + fakeClient := fake.NewSimpleClientset() + deploy := createDeployment(tt.deploymentName, tt.namespace, map[string]string{"app": "test"}, tt.initialReplicas) + _, err := fakeClient.AppsV1().Deployments(tt.namespace).Create( + context.Background(), &deploy, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create our client wrapper + client := &Client{ + clientset: fakeClient, + } + + // Execute scale up + scales := []DeploymentScale{ + {Name: tt.deploymentName, Replicas: tt.scaleToReplicas}, + } + err = client.ScaleUpDeployments(tt.namespace, scales) + + // Assertions + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + + // Verify the deployment was scaled to expected replicas + updatedDeploy, err := fakeClient.AppsV1().Deployments(tt.namespace).Get( + context.Background(), tt.deploymentName, metav1.GetOptions{}, + ) + require.NoError(t, err) + assert.Equal(t, tt.scaleToReplicas, *updatedDeploy.Spec.Replicas) + }) + } +} + +func TestClient_ScaleUpDeployments_NonExistent(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + client := &Client{ + clientset: fakeClient, + } + + scales := []DeploymentScale{ + {Name: "nonexistent-deploy", Replicas: 3}, + } + err := client.ScaleUpDeployments("test-ns", scales) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to get deployment") +} + +func TestClient_Clientset(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + client := &Client{ + clientset: fakeClient, + } + + clientset := client.Clientset() + assert.NotNil(t, clientset) + assert.Equal(t, fakeClient, clientset) +} + +func TestClient_PortForwardService_ServiceNotFound(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + client := &Client{ + clientset: fakeClient, + } + + _, _, err := client.PortForwardService("test-ns", "nonexistent-svc", 8080, 9200) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to get service") +} + +func TestClient_PortForwardService_NoPodsFound(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Create a service without any matching pods + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-svc", + Namespace: "test-ns", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"app": "test"}, + }, + } + _, err := fakeClient.CoreV1().Services("test-ns").Create( + context.Background(), svc, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + client := &Client{ + clientset: fakeClient, + } + + _, _, err = client.PortForwardService("test-ns", "test-svc", 8080, 9200) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no pods found for service") +} + +func TestClient_PortForwardService_NoRunningPods(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + + // Create a service + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-svc", + Namespace: "test-ns", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"app": "test"}, + }, + } + _, err := fakeClient.CoreV1().Services("test-ns").Create( + context.Background(), svc, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + // Create a pod in Pending state + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + Labels: map[string]string{"app": "test"}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + } + _, err = fakeClient.CoreV1().Pods("test-ns").Create( + context.Background(), pod, metav1.CreateOptions{}, + ) + require.NoError(t, err) + + client := &Client{ + clientset: fakeClient, + } + + _, _, err = client.PortForwardService("test-ns", "test-svc", 8080, 9200) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no running pods found for service") +} + +// Helper function to create a deployment for testing +func createDeployment(name, namespace string, labels map[string]string, replicas int32) appsv1.Deployment { + return appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "test:latest", + }, + }, + }, + }, + }, + } +} diff --git a/internal/k8s/interface.go b/internal/k8s/interface.go new file mode 100644 index 0000000..679d9af --- /dev/null +++ b/internal/k8s/interface.go @@ -0,0 +1,21 @@ +package k8s + +import "k8s.io/client-go/kubernetes" + +// Interface defines the contract for Kubernetes client operations +// This interface allows for easy mocking in tests +type Interface interface { + // Clientset returns the underlying Kubernetes clientset + // Useful for direct API access when needed + Clientset() kubernetes.Interface + + // Port forwarding operations + PortForwardService(namespace, serviceName string, localPort, remotePort int) (stopChan chan struct{}, readyChan chan struct{}, err error) + + // Deployment scaling operations + ScaleDownDeployments(namespace, labelSelector string) ([]DeploymentScale, error) + ScaleUpDeployments(namespace string, deployments []DeploymentScale) error +} + +// Ensure *Client implements Interface +var _ Interface = (*Client)(nil) diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 0000000..5230f34 --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,63 @@ +package logger + +import ( + "fmt" + "io" + "os" +) + +// Logger handles operational logging to stderr, keeping stdout clean for data output +type Logger struct { + writer io.Writer + quiet bool + debug bool +} + +// New creates a new logger that writes to stderr +func New(quiet, debug bool) *Logger { + return &Logger{ + writer: os.Stderr, + quiet: quiet, + debug: debug, + } +} + +// Infof logs an informational message +func (l *Logger) Infof(format string, args ...interface{}) { + if !l.quiet { + _, _ = fmt.Fprintf(l.writer, format+"\n", args...) + } +} + +// Successf logs a success message +func (l *Logger) Successf(format string, args ...interface{}) { + if !l.quiet { + _, _ = fmt.Fprintf(l.writer, "✓ "+format+"\n", args...) + } +} + +// Warningf logs a warning message +func (l *Logger) Warningf(format string, args ...interface{}) { + if !l.quiet { + _, _ = fmt.Fprintf(l.writer, "Warning: "+format+"\n", args...) + } +} + +// Errorf logs an error message (always shown, even in quiet mode) +func (l *Logger) Errorf(format string, args ...interface{}) { + _, _ = fmt.Fprintf(l.writer, "Error: "+format+"\n", args...) +} + +// Debug logs a debug message (only shown when debug mode is enabled) +func (l *Logger) Debugf(format string, args ...interface{}) { + if l.debug { + _, _ = fmt.Fprintf(l.writer, "DEBUG: "+format+"\n", args...) + } +} + +// Println prints a blank line (for spacing) +func (l *Logger) Println() { + if !l.quiet { + _, _ = fmt.Fprintln(l.writer) + } +} diff --git a/internal/logger/logger_test.go b/internal/logger/logger_test.go new file mode 100644 index 0000000..ce5be9b --- /dev/null +++ b/internal/logger/logger_test.go @@ -0,0 +1,335 @@ +package logger + +import ( + "bytes" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + tests := []struct { + name string + quiet bool + debug bool + }{ + { + name: "normal mode", + quiet: false, + debug: false, + }, + { + name: "quiet mode", + quiet: true, + debug: false, + }, + { + name: "debug mode", + quiet: false, + debug: true, + }, + { + name: "quiet and debug mode", + quiet: true, + debug: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger := New(tt.quiet, tt.debug) + assert.NotNil(t, logger) + assert.Equal(t, tt.quiet, logger.quiet) + assert.Equal(t, tt.debug, logger.debug) + assert.NotNil(t, logger.writer) + }) + } +} + +func TestLogger_Infof(t *testing.T) { + tests := []struct { + name string + quiet bool + message string + args []interface{} + expectedOutput string + shouldOutput bool + }{ + { + name: "info message in normal mode", + quiet: false, + message: "Processing %s", + args: []interface{}{"test"}, + expectedOutput: "Processing test\n", + shouldOutput: true, + }, + { + name: "info message in quiet mode", + quiet: true, + message: "Processing %s", + args: []interface{}{"test"}, + expectedOutput: "", + shouldOutput: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + quiet: tt.quiet, + } + + logger.Infof(tt.message, tt.args...) + + if tt.shouldOutput { + assert.Equal(t, tt.expectedOutput, buf.String()) + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +func TestLogger_Successf(t *testing.T) { + tests := []struct { + name string + quiet bool + message string + args []interface{} + shouldOutput bool + containsSymbol bool + }{ + { + name: "success message in normal mode", + quiet: false, + message: "Completed %s", + args: []interface{}{"task"}, + shouldOutput: true, + containsSymbol: true, + }, + { + name: "success message in quiet mode", + quiet: true, + message: "Completed %s", + args: []interface{}{"task"}, + shouldOutput: false, + containsSymbol: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + quiet: tt.quiet, + } + + logger.Successf(tt.message, tt.args...) + + if tt.shouldOutput { + output := buf.String() + assert.Contains(t, output, "Completed task") + if tt.containsSymbol { + assert.Contains(t, output, "✓") + } + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +//nolint:dupl // Test functions are intentionally similar for consistency +func TestLogger_Warningf(t *testing.T) { + tests := []struct { + name string + quiet bool + message string + args []interface{} + shouldOutput bool + }{ + { + name: "warning in normal mode", + quiet: false, + message: "Deprecated %s", + args: []interface{}{"feature"}, + shouldOutput: true, + }, + { + name: "warning in quiet mode", + quiet: true, + message: "Deprecated %s", + args: []interface{}{"feature"}, + shouldOutput: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + quiet: tt.quiet, + } + + logger.Warningf(tt.message, tt.args...) + + if tt.shouldOutput { + output := buf.String() + assert.Contains(t, output, "Warning:") + assert.Contains(t, output, "Deprecated feature") + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +func TestLogger_Errorf(t *testing.T) { + tests := []struct { + name string + quiet bool + message string + args []interface{} + }{ + { + name: "error in normal mode", + quiet: false, + message: "Failed to %s", + args: []interface{}{"connect"}, + }, + { + name: "error in quiet mode (still outputs)", + quiet: true, + message: "Failed to %s", + args: []interface{}{"connect"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + quiet: tt.quiet, + } + + logger.Errorf(tt.message, tt.args...) + + // Errors always output, regardless of quiet mode + output := buf.String() + assert.Contains(t, output, "Error:") + assert.Contains(t, output, "Failed to connect") + }) + } +} + +//nolint:dupl // Test functions are intentionally similar for consistency +func TestLogger_Debugf(t *testing.T) { + tests := []struct { + name string + debug bool + message string + args []interface{} + shouldOutput bool + }{ + { + name: "debug message with debug enabled", + debug: true, + message: "Debug info: %s", + args: []interface{}{"details"}, + shouldOutput: true, + }, + { + name: "debug message with debug disabled", + debug: false, + message: "Debug info: %s", + args: []interface{}{"details"}, + shouldOutput: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + debug: tt.debug, + } + + logger.Debugf(tt.message, tt.args...) + + if tt.shouldOutput { + output := buf.String() + assert.Contains(t, output, "DEBUG:") + assert.Contains(t, output, "Debug info: details") + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +func TestLogger_Println(t *testing.T) { + tests := []struct { + name string + quiet bool + shouldOutput bool + }{ + { + name: "blank line in normal mode", + quiet: false, + shouldOutput: true, + }, + { + name: "blank line in quiet mode", + quiet: true, + shouldOutput: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + quiet: tt.quiet, + } + + logger.Println() + + if tt.shouldOutput { + assert.Equal(t, "\n", buf.String()) + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +func TestLogger_MultipleCalls(t *testing.T) { + buf := &bytes.Buffer{} + logger := &Logger{ + writer: buf, + quiet: false, + debug: true, + } + + logger.Infof("Starting process") + logger.Debugf("Debug details") + logger.Successf("Process completed") + logger.Warningf("Cleanup recommended") + + output := buf.String() + lines := strings.Split(strings.TrimSpace(output), "\n") + assert.Len(t, lines, 4) + + assert.Contains(t, output, "Starting process") + assert.Contains(t, output, "DEBUG: Debug details") + assert.Contains(t, output, "✓ Process completed") + assert.Contains(t, output, "Warning: Cleanup recommended") +} diff --git a/internal/output/formatter.go b/internal/output/formatter.go new file mode 100644 index 0000000..c8a136c --- /dev/null +++ b/internal/output/formatter.go @@ -0,0 +1,119 @@ +package output + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + "text/tabwriter" +) + +// Format represents supported output formats +type Format string + +const ( + FormatTable Format = "table" + FormatJSON Format = "json" + + // tabwriterPadding is the padding between columns in table output + tabwriterPadding = 2 +) + +// Formatter handles output formatting for list commands +type Formatter struct { + writer io.Writer + format Format +} + +// NewFormatter creates a new output formatter +// Defaults to table format if invalid format provided +func NewFormatter(format string) *Formatter { + f := Format(format) + if f != FormatTable && f != FormatJSON { + f = FormatTable + } + return &Formatter{ + writer: os.Stdout, + format: f, + } +} + +// Table represents a table with headers and rows +type Table struct { + Headers []string + Rows [][]string +} + +// PrintTable prints data in the configured format (table or json) +func (f *Formatter) PrintTable(table Table) error { + if len(table.Rows) == 0 { + if f.format == FormatTable { + fmt.Fprintln(f.writer, "No data found") + } else { + // For JSON, output empty array + return f.printJSON([]map[string]string{}) + } + return nil + } + + switch f.format { + case FormatJSON: + return f.printJSON(tableToMaps(table)) + case FormatTable: + return f.printTable(table) + default: + return f.printTable(table) + } +} + +// printTable prints data in table format using tabwriter +func (f *Formatter) printTable(table Table) error { + w := tabwriter.NewWriter(f.writer, 0, 0, tabwriterPadding, ' ', 0) + + // Print header + fmt.Fprintln(w, strings.Join(table.Headers, "\t")) + + // Print rows + for _, row := range table.Rows { + fmt.Fprintln(w, strings.Join(row, "\t")) + } + + return w.Flush() +} + +// printJSON prints data in JSON format +func (f *Formatter) printJSON(data interface{}) error { + encoder := json.NewEncoder(f.writer) + encoder.SetIndent("", " ") + return encoder.Encode(data) +} + +// tableToMaps converts a Table to a slice of maps for JSON output +func tableToMaps(table Table) []map[string]string { + result := make([]map[string]string, 0, len(table.Rows)) + for _, row := range table.Rows { + item := make(map[string]string) + for i, header := range table.Headers { + if i < len(row) { + item[header] = row[i] + } + } + result = append(result, item) + } + return result +} + +// PrintMessage prints a simple message (only in table format, ignored in JSON) +func (f *Formatter) PrintMessage(message string) { + if f.format == FormatTable { + fmt.Fprintln(f.writer, message) + } +} + +// PrintError prints an error message (only in table format, ignored in JSON) +func (f *Formatter) PrintError(err error) { + if f.format == FormatTable { + fmt.Fprintf(f.writer, "Errorf: %v\n", err) + } +} diff --git a/internal/output/formatter_test.go b/internal/output/formatter_test.go new file mode 100644 index 0000000..ab2e869 --- /dev/null +++ b/internal/output/formatter_test.go @@ -0,0 +1,379 @@ +package output + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewFormatter(t *testing.T) { + tests := []struct { + name string + format string + expectedFormat Format + }{ + { + name: "table format", + format: "table", + expectedFormat: FormatTable, + }, + { + name: "json format", + format: "json", + expectedFormat: FormatJSON, + }, + { + name: "invalid format defaults to table", + format: "invalid", + expectedFormat: FormatTable, + }, + { + name: "empty format defaults to table", + format: "", + expectedFormat: FormatTable, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + formatter := NewFormatter(tt.format) + assert.NotNil(t, formatter) + assert.Equal(t, tt.expectedFormat, formatter.format) + assert.NotNil(t, formatter.writer) + }) + } +} + +func TestFormatter_PrintTable_TableFormat(t *testing.T) { + tests := []struct { + name string + table Table + expectedContains []string + }{ + { + name: "table with data", + table: Table{ + Headers: []string{"NAME", "STATUS", "COUNT"}, + Rows: [][]string{ + {"snapshot-1", "SUCCESS", "100"}, + {"snapshot-2", "PARTIAL", "50"}, + }, + }, + expectedContains: []string{"NAME", "STATUS", "COUNT", "snapshot-1", "SUCCESS", "100", "snapshot-2", "PARTIAL", "50"}, + }, + { + name: "table with single row", + table: Table{ + Headers: []string{"ID", "VALUE"}, + Rows: [][]string{ + {"1", "test"}, + }, + }, + expectedContains: []string{"ID", "VALUE", "1", "test"}, + }, + { + name: "empty table", + table: Table{ + Headers: []string{"NAME", "STATUS"}, + Rows: [][]string{}, + }, + expectedContains: []string{"No data found"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + formatter := &Formatter{ + writer: buf, + format: FormatTable, + } + + err := formatter.PrintTable(tt.table) + require.NoError(t, err) + + output := buf.String() + for _, expected := range tt.expectedContains { + assert.Contains(t, output, expected) + } + }) + } +} + +func TestFormatter_PrintTable_JSONFormat(t *testing.T) { + tests := []struct { + name string + table Table + expectedLen int + validateFirst map[string]string + }{ + { + name: "json with data", + table: Table{ + Headers: []string{"NAME", "STATUS"}, + Rows: [][]string{ + {"snapshot-1", "SUCCESS"}, + {"snapshot-2", "PARTIAL"}, + }, + }, + expectedLen: 2, + validateFirst: map[string]string{ + "NAME": "snapshot-1", + "STATUS": "SUCCESS", + }, + }, + { + name: "json with single row", + table: Table{ + Headers: []string{"ID", "VALUE"}, + Rows: [][]string{ + {"1", "test"}, + }, + }, + expectedLen: 1, + validateFirst: map[string]string{ + "ID": "1", + "VALUE": "test", + }, + }, + { + name: "empty json", + table: Table{ + Headers: []string{"NAME"}, + Rows: [][]string{}, + }, + expectedLen: 0, + validateFirst: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + formatter := &Formatter{ + writer: buf, + format: FormatJSON, + } + + err := formatter.PrintTable(tt.table) + require.NoError(t, err) + + var result []map[string]string + err = json.Unmarshal(buf.Bytes(), &result) + require.NoError(t, err) + + assert.Len(t, result, tt.expectedLen) + + if tt.validateFirst != nil && len(result) > 0 { + for key, expectedValue := range tt.validateFirst { + assert.Equal(t, expectedValue, result[0][key]) + } + } + }) + } +} + +func TestFormatter_PrintMessage(t *testing.T) { + tests := []struct { + name string + format Format + message string + shouldOutput bool + }{ + { + name: "message in table format", + format: FormatTable, + message: "Operation completed successfully", + shouldOutput: true, + }, + { + name: "message in json format (ignored)", + format: FormatJSON, + message: "Operation completed successfully", + shouldOutput: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + formatter := &Formatter{ + writer: buf, + format: tt.format, + } + + formatter.PrintMessage(tt.message) + + if tt.shouldOutput { + assert.Contains(t, buf.String(), tt.message) + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +func TestFormatter_PrintError(t *testing.T) { + tests := []struct { + name string + format Format + err error + shouldOutput bool + }{ + { + name: "error in table format", + format: FormatTable, + err: assert.AnError, + shouldOutput: true, + }, + { + name: "error in json format (ignored)", + format: FormatJSON, + err: assert.AnError, + shouldOutput: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buf := &bytes.Buffer{} + formatter := &Formatter{ + writer: buf, + format: tt.format, + } + + formatter.PrintError(tt.err) + + if tt.shouldOutput { + output := buf.String() + assert.Contains(t, output, "Errorf:") + assert.Contains(t, output, tt.err.Error()) + } else { + assert.Empty(t, buf.String()) + } + }) + } +} + +func TestTableToMaps(t *testing.T) { + tests := []struct { + name string + table Table + expected []map[string]string + }{ + { + name: "standard table", + table: Table{ + Headers: []string{"NAME", "STATUS"}, + Rows: [][]string{ + {"test1", "active"}, + {"test2", "inactive"}, + }, + }, + expected: []map[string]string{ + {"NAME": "test1", "STATUS": "active"}, + {"NAME": "test2", "STATUS": "inactive"}, + }, + }, + { + name: "table with mismatched row length", + table: Table{ + Headers: []string{"NAME", "STATUS", "COUNT"}, + Rows: [][]string{ + {"test1", "active"}, + {"test2", "inactive", "5"}, + }, + }, + expected: []map[string]string{ + {"NAME": "test1", "STATUS": "active"}, + {"NAME": "test2", "STATUS": "inactive", "COUNT": "5"}, + }, + }, + { + name: "empty table", + table: Table{ + Headers: []string{"NAME"}, + Rows: [][]string{}, + }, + expected: []map[string]string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tableToMaps(tt.table) + assert.Equal(t, len(tt.expected), len(result)) + + for i, expectedMap := range tt.expected { + assert.Equal(t, expectedMap, result[i]) + } + }) + } +} + +func TestFormatter_TableAlignment(t *testing.T) { + buf := &bytes.Buffer{} + formatter := &Formatter{ + writer: buf, + format: FormatTable, + } + + table := Table{ + Headers: []string{"SHORT", "VERY_LONG_HEADER", "MED"}, + Rows: [][]string{ + {"a", "value1", "x"}, + {"bb", "value2", "yy"}, + {"ccc", "value3", "zzz"}, + }, + } + + err := formatter.PrintTable(table) + require.NoError(t, err) + + output := buf.String() + lines := strings.Split(strings.TrimSpace(output), "\n") + + // Should have header + 3 data rows + assert.Len(t, lines, 4) + + // Check that columns are aligned (each line should have proper spacing) + for _, line := range lines { + assert.NotEmpty(t, line) + // Should contain multiple columns separated by spaces + parts := strings.Fields(line) + assert.GreaterOrEqual(t, len(parts), 3) + } +} + +func TestFormatter_JSONIndentation(t *testing.T) { + buf := &bytes.Buffer{} + formatter := &Formatter{ + writer: buf, + format: FormatJSON, + } + + table := Table{ + Headers: []string{"NAME", "VALUE"}, + Rows: [][]string{ + {"test", "data"}, + }, + } + + err := formatter.PrintTable(table) + require.NoError(t, err) + + output := buf.String() + + // JSON should be indented (contains newlines and spaces) + assert.Contains(t, output, "\n") + assert.Contains(t, output, " ") + + // Should be valid JSON + var result []map[string]string + err = json.Unmarshal(buf.Bytes(), &result) + require.NoError(t, err) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..c1330c6 --- /dev/null +++ b/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/stackvista/stackstate-backup-cli/cmd" +) + +func main() { + cmd.Execute() +}