From 282406b9a8828a3713568cfb6fa3b9fc79a31c4f Mon Sep 17 00:00:00 2001 From: ProbstenHias Date: Mon, 6 May 2024 12:32:41 +0200 Subject: [PATCH] feat: add Anexia webhook This commit introduces the Anexia webhook as an webhook provider for the externalDNS project. --- .conform.yaml | 29 + .github/dependabot.yml | 14 + .github/workflows/pull_request.yml | 150 +++++ .gitignore | 7 + .golangci.yml | 20 + .goreleaser.yml | 120 ++++ Dockerfile | 6 + LICENSE | 201 +++++++ Makefile | 23 + .../init/configuration/configuration.go | 29 + cmd/webhook/init/dnsprovider/dnsprovider.go | 52 ++ .../init/dnsprovider/dnsprovider_test.go | 61 ++ cmd/webhook/init/logging/log.go | 41 ++ cmd/webhook/init/server/server.go | 66 +++ cmd/webhook/init/server/server_test.go | 545 ++++++++++++++++++ cmd/webhook/main.go | 43 ++ go.mod | 37 ++ go.sum | 130 +++++ internal/anexia/configuration.go | 22 + internal/anexia/endpoint_helpers.go | 28 + internal/anexia/provider.go | 334 +++++++++++ internal/anexia/provider_test.go | 537 +++++++++++++++++ pkg/webhook/mediatype.go | 41 ++ pkg/webhook/webhook.go | 228 ++++++++ 24 files changed, 2764 insertions(+) create mode 100644 .conform.yaml create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/pull_request.yml create mode 100644 .gitignore create mode 100644 .golangci.yml create mode 100644 .goreleaser.yml create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 cmd/webhook/init/configuration/configuration.go create mode 100644 cmd/webhook/init/dnsprovider/dnsprovider.go create mode 100644 cmd/webhook/init/dnsprovider/dnsprovider_test.go create mode 100644 cmd/webhook/init/logging/log.go create mode 100644 cmd/webhook/init/server/server.go create mode 100644 cmd/webhook/init/server/server_test.go create mode 100644 cmd/webhook/main.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/anexia/configuration.go create mode 100644 internal/anexia/endpoint_helpers.go create mode 100644 internal/anexia/provider.go create mode 100644 internal/anexia/provider_test.go create mode 100644 pkg/webhook/mediatype.go create mode 100644 pkg/webhook/webhook.go diff --git a/.conform.yaml b/.conform.yaml new file mode 100644 index 0000000..32c3059 --- /dev/null +++ b/.conform.yaml @@ -0,0 +1,29 @@ +--- +policies: + - type: commit + spec: + header: + length: 200 + imperative: true + case: lower + invalidLastCharacters: . + body: + required: false + dco: false + gpg: false + spellcheck: + locale: US + maximumOfOneCommit: false + conventional: + types: + - refactor + - perf + - chore + - test + - docs + - no_type + scopes: + - release + - deps + - ci + descriptionLength: 100 diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..07bc535 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 +updates: + - package-ecosystem: gomod + directory: / + schedule: + interval: daily + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + - package-ecosystem: docker + directory: / + schedule: + interval: daily diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 0000000..324fad5 --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,150 @@ +--- +name: Verify + +on: + push: + branches: + - main + pull_request: + workflow_dispatch: + +permissions: + contents: read + pull-requests: read + packages: read + id-token: write + +env: + GO111MODULE: "on" + +jobs: + conform: + runs-on: ubuntu-latest + name: Conform + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: siderolabs/conform@v0.1.0-alpha.27 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + lint_provider: + runs-on: ubuntu-latest + name: Lint Provider + strategy: + max-parallel: 4 + matrix: + go-version: [1.22.x] + golangci-lint-version: [v1.54.2] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "${{ matrix.go-version }}" + + - name: Lint Provider + uses: golangci/golangci-lint-action@v5 + with: + version: "${{ matrix.golangci-lint-version }}" + args: -c .golangci.yml + + - name: Vet Provider + run: | + go vet ./... + + test: + runs-on: ubuntu-latest + name: Unit Tests + strategy: + max-parallel: 4 + matrix: + go-version: [1.22.x] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "${{ matrix.go-version }}" + + - name: Run Tests + run: | + make test + + - run: | + go install github.com/mattn/goveralls@latest + goveralls -coverprofile=covprofile -service=github + env: + COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + dockerfile_lint: + runs-on: ubuntu-latest + name: Lint Dockerfile + + steps: + - name: Checkout repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + with: + fetch-depth: 0 + + - name: Lint Dockerfile + uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: Dockerfile + + build: + runs-on: ubuntu-latest + name: Build Provider and Container + needs: + - lint_provider + - dockerfile_lint + - test + strategy: + max-parallel: 4 + matrix: + go-version: [1.22.x] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "${{ matrix.go-version }}" + + - name: Install cosign + uses: sigstore/cosign-installer@v3.5.0 + - name: Download Syft + uses: anchore/sbom-action/download-syft@v0.15.11 + + - name: Setup QEMU + uses: docker/setup-qemu-action@v3 + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Release via GoReleaser + uses: goreleaser/goreleaser-action@v5 + with: + args: -p 3 release --snapshot --clean --skip=publish --timeout 60m0s + version: latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CI_COMMIT_TIMESTAMP: ${{ github.event.pull_request.updated_at }} + CI_COMMIT_SHA: ${{ github.sha }} + CI_COMMIT_TAG: ${{ github.sha }}-dev diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bbb1e23 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +.idea/ +.vscode/ +build/ +dist/ +reports/ + +covprofile diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..3294567 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,20 @@ +linters: + enable: + - errcheck + - revive + - govet + - ineffassign + - lll + - gosimple + - staticcheck + - unused + - misspell + - nakedret + - unconvert + - unused + enable-all: false +linters-settings: + lll: + line-length: 250 +run: + timeout: 10m diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 0000000..8a5135b --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,120 @@ +--- +archives: + - id: archive + name_template: "{{ .Binary }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}" +builds: + - binary: external-dns-anexia-webhook + env: + - CGO_ENABLED=0 + - GO111MODULE=on + goarch: + - amd64 + - arm64 + - arm + goos: + - darwin + - windows + - linux + goarm: + - "7" + ldflags: + - -X 'main.Version={{ .Tag }}' + - -X 'main.Gitsha={{ .ShortCommit }}' + main: ./cmd/webhook +source: + enabled: true +signs: + - cmd: cosign + env: + - COSIGN_EXPERIMENTAL=1 + certificate: "${artifact}.pem" + args: + - sign-blob + - --yes + - "--output-certificate=${certificate}" + - "--bundle=${signature}" + - "${artifact}" + artifacts: all + output: true +sboms: + - artifacts: archive + - id: source + artifacts: source +dockers: + - use: buildx + goos: linux + goarch: amd64 + image_templates: + - ghcr.io/probstenhias/external-dns-anexia-webhook:latest-amd64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }}-amd64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }}-amd64 + build_flag_templates: + - --pull + - --platform=linux/amd64 + - --build-arg=CI_COMMIT_TIMESTAMP="{{ .Env.CI_COMMIT_TIMESTAMP }}" + - --build-arg=CI_COMMIT_SHA="{{ .Env.CI_COMMIT_SHA }}" + - --build-arg=CI_COMMIT_TAG="{{ .Env.CI_COMMIT_TAG }}" + - use: buildx + goos: linux + goarch: arm64 + image_templates: + - ghcr.io/probstenhias/external-dns-anexia-webhook:latest-arm64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }}-arm64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }}-arm64 + build_flag_templates: + - --pull + - --platform=linux/arm64 + - --build-arg=CI_COMMIT_TIMESTAMP="{{ .Env.CI_COMMIT_TIMESTAMP }}" + - --build-arg=CI_COMMIT_SHA="{{ .Env.CI_COMMIT_SHA }}" + - --build-arg=CI_COMMIT_TAG="{{ .Env.CI_COMMIT_TAG }}" + - use: buildx + goos: linux + goarch: arm + goarm: "7" + image_templates: + - ghcr.io/probstenhias/external-dns-anexia-webhook:latest-armv7 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }}-armv7 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }}-armv7 + build_flag_templates: + - --pull + - --platform=linux/arm/v7 + - --build-arg=CI_COMMIT_TIMESTAMP="{{ .Env.CI_COMMIT_TIMESTAMP }}" + - --build-arg=CI_COMMIT_SHA="{{ .Env.CI_COMMIT_SHA }}" + - --build-arg=CI_COMMIT_TAG="{{ .Env.CI_COMMIT_TAG }}" +docker_manifests: + - name_template: ghcr.io/probstenhias/external-dns-anexia-webhook:latest + image_templates: + - ghcr.io/probstenhias/external-dns-anexia-webhook:latest-amd64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:latest-arm64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:latest-armv7 + - name_template: ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }} + image_templates: + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }}-amd64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }}-arm64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_SHA }}-armv7 + - name_template: ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }} + image_templates: + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }}-amd64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }}-arm64 + - ghcr.io/probstenhias/external-dns-anexia-webhook:{{ .Env.CI_COMMIT_TAG }}-armv7 +changelog: + skip: true + use: github + filters: + exclude: + - "^docs" + - "^chore" + groups: + - title: "New Features" + regexp: "^.*feat[(\\w)]*:+.*$" + order: 0 + - title: "Bugfixes" + regexp: "^.*fix[(\\w)]*:+.*$" + order: 10 + - title: Other Work + order: 999 +release: + disable: false + prerelease: auto +snapshot: + name_template: "{{ .Tag }}-SNAPSHOT" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..72f6087 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,6 @@ +FROM gcr.io/distroless/static-debian12:nonroot + +USER 20000:20000 +COPY --chmod=555 external-dns-anexia-webhook /opt/external-dns-anexia-webhook/app + +ENTRYPOINT ["/opt/external-dns-ionos-webhook/app"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f2b596e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 IONOS SE + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..7b5dca3 --- /dev/null +++ b/Makefile @@ -0,0 +1,23 @@ +ARTIFACT_NAME := external-dns-anexia-webhook + +TESTPARALLELISM := 4 + +WORKING_DIR := $(shell pwd) + +.PHONY: lint +lint:: + golangci-lint run -c .golangci.yml + go vet ./... + +.PHONY: clean +clean:: + rm -rf $(WORKING_DIR)/bin + +.PHONY: build +build:: + go build -o $(WORKING_DIR)/bin/${ARTIFACT_NAME} ./cmd/webhook + chmod +x $(WORKING_DIR)/bin/${ARTIFACT_NAME} + +.PHONY: test +test:: + go test -v -tags=all -parallel ${TESTPARALLELISM} -timeout 2h -covermode atomic -coverprofile=covprofile ./... diff --git a/cmd/webhook/init/configuration/configuration.go b/cmd/webhook/init/configuration/configuration.go new file mode 100644 index 0000000..b4376c7 --- /dev/null +++ b/cmd/webhook/init/configuration/configuration.go @@ -0,0 +1,29 @@ +package configuration + +import ( + "time" + + "github.com/caarlos0/env/v11" + log "github.com/sirupsen/logrus" +) + +// Config struct for configuration environmental variables +type Config struct { + ServerHost string `env:"SERVER_HOST" envDefault:"localhost"` + ServerPort int `env:"SERVER_PORT" envDefault:"8888"` + ServerReadTimeout time.Duration `env:"SERVER_READ_TIMEOUT"` + ServerWriteTimeout time.Duration `env:"SERVER_WRITE_TIMEOUT"` + DomainFilter []string `env:"DOMAIN_FILTER" envDefault:""` + ExcludeDomains []string `env:"EXCLUDE_DOMAIN_FILTER" envDefault:""` + RegexDomainFilter string `env:"REGEXP_DOMAIN_FILTER" envDefault:""` + RegexDomainExclusion string `env:"REGEXP_DOMAIN_FILTER_EXCLUSION" envDefault:""` +} + +// Init sets up configuration by reading set environmental variables +func Init() Config { + cfg := Config{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("Error reading configuration from environment: %v", err) + } + return cfg +} diff --git a/cmd/webhook/init/dnsprovider/dnsprovider.go b/cmd/webhook/init/dnsprovider/dnsprovider.go new file mode 100644 index 0000000..69ce22d --- /dev/null +++ b/cmd/webhook/init/dnsprovider/dnsprovider.go @@ -0,0 +1,52 @@ +package dnsprovider + +import ( + "fmt" + "regexp" + "strings" + + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/configuration" + "github.com/probstenhias/external-dns-anexia-webhook/internal/anexia" + "github.com/caarlos0/env/v11" + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/provider" + + log "github.com/sirupsen/logrus" +) + +func Init(config configuration.Config) (provider.Provider, error) { + var domainFilter endpoint.DomainFilter + createMsg := "Creating anexia provider with " + + if config.RegexDomainFilter != "" { + createMsg += fmt.Sprintf("regexp domain filter: '%s', ", config.RegexDomainFilter) + if config.RegexDomainExclusion != "" { + createMsg += fmt.Sprintf("with exclusion: '%s', ", config.RegexDomainExclusion) + } + domainFilter = endpoint.NewRegexDomainFilter( + regexp.MustCompile(config.RegexDomainFilter), + regexp.MustCompile(config.RegexDomainExclusion), + ) + } else { + if config.DomainFilter != nil && len(config.DomainFilter) > 0 { + createMsg += fmt.Sprintf("domain filter: '%s', ", strings.Join(config.DomainFilter, ",")) + } + if config.ExcludeDomains != nil && len(config.ExcludeDomains) > 0 { + createMsg += fmt.Sprintf("exclude domain filter: '%s', ", strings.Join(config.ExcludeDomains, ",")) + } + domainFilter = endpoint.NewDomainFilterWithExclusions(config.DomainFilter, config.ExcludeDomains) + } + + createMsg = strings.TrimSuffix(createMsg, ", ") + if strings.HasSuffix(createMsg, "with ") { + createMsg += "no kind of domain filters" + } + log.Info(createMsg) + + anexiaConfig := anexia.Configuration{} + if err := env.Parse(&anexiaConfig); err != nil { + return nil, fmt.Errorf("reading anexia configuration failed: %v", err) + } + + return anexia.NewProvider(&anexiaConfig, domainFilter) +} diff --git a/cmd/webhook/init/dnsprovider/dnsprovider_test.go b/cmd/webhook/init/dnsprovider/dnsprovider_test.go new file mode 100644 index 0000000..f0dc485 --- /dev/null +++ b/cmd/webhook/init/dnsprovider/dnsprovider_test.go @@ -0,0 +1,61 @@ +package dnsprovider + +import ( + "testing" + + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/configuration" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestInit(t *testing.T) { + log.SetLevel(log.DebugLevel) + + cases := []struct { + name string + config configuration.Config + env map[string]string + expectedError string + }{ + { + name: "minimal config for anexia provider", + config: configuration.Config{}, + env: map[string]string{ + "ANEXIA_API_TOKEN": "token", + }, + }, + { + name: "domain filter config for anexia provider", + config: configuration.Config{ + DomainFilter: []string{"domain.com"}, + ExcludeDomains: []string{"sub.domain.com"}, + }, + env: map[string]string{ + "ANEXIA_API_TOKEN": "token", + }, + }, + { + name: "empty configuration", + config: configuration.Config{}, + expectedError: "reading anexia configuration failed: env: environment variable \"ANEXIA_API_TOKEN\" should not be empty", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + for k, v := range tc.env { + t.Setenv(k, v) + } + + dnsProvider, err := Init(tc.config) + + if tc.expectedError != "" { + assert.EqualError(t, err, tc.expectedError, "expecting error") + return + } + + assert.NoErrorf(t, err, "error creating provider") + assert.NotNil(t, dnsProvider) + }) + } +} diff --git a/cmd/webhook/init/logging/log.go b/cmd/webhook/init/logging/log.go new file mode 100644 index 0000000..cedc547 --- /dev/null +++ b/cmd/webhook/init/logging/log.go @@ -0,0 +1,41 @@ +package logging + +import ( + "os" + "strconv" + + log "github.com/sirupsen/logrus" +) + +func Init() { + setLogLevel() + setLogFormat() +} + +func setLogFormat() { + format := os.Getenv("LOG_FORMAT") + if format == "json" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + log.SetFormatter(&log.TextFormatter{}) + } +} + +func setLogLevel() { + level := os.Getenv("LOG_LEVEL") + if level == "" { + log.SetLevel(log.InfoLevel) + } else { + if levelInt, err := strconv.Atoi(level); err == nil { + log.SetLevel(log.Level(uint32(levelInt))) + } else { + levelInt, err := log.ParseLevel(level) + if err != nil { + log.SetLevel(log.InfoLevel) + log.Errorf("Invalid log level '%s', defaulting to info", level) + } else { + log.SetLevel(levelInt) + } + } + } +} diff --git a/cmd/webhook/init/server/server.go b/cmd/webhook/init/server/server.go new file mode 100644 index 0000000..3080c22 --- /dev/null +++ b/cmd/webhook/init/server/server.go @@ -0,0 +1,66 @@ +package server + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/go-chi/chi/v5" + + log "github.com/sirupsen/logrus" + + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/configuration" + + "github.com/probstenhias/external-dns-anexia-webhook/pkg/webhook" +) + +// Init server initialization function +// The server will respond to the following endpoints: +// - / (GET): initialization, negotiates headers and returns the domain filter +// - /records (GET): returns the current records +// - /records (POST): applies the changes +// - /adjustendpoints (POST): executes the AdjustEndpoints method +func Init(config configuration.Config, p *webhook.Webhook) *http.Server { + r := chi.NewRouter() + r.Use(webhook.Health) + r.Get("/", p.Negotiate) + r.Get("/records", p.Records) + r.Post("/records", p.ApplyChanges) + r.Post("/adjustendpoints", p.AdjustEndpoints) + + srv := createHTTPServer(fmt.Sprintf("%s:%d", config.ServerHost, config.ServerPort), r, config.ServerReadTimeout, config.ServerWriteTimeout) + go func() { + log.Infof("starting server on addr: '%s' ", srv.Addr) + if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Errorf("can't serve on addr: '%s', error: %v", srv.Addr, err) + } + }() + return srv +} + +func createHTTPServer(addr string, hand http.Handler, readTimeout, writeTimeout time.Duration) *http.Server { + return &http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + Addr: addr, + Handler: hand, + } +} + +// ShutdownGracefully gracefully shutdown the http server +func ShutdownGracefully(srv *http.Server) { + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + sig := <-sigCh + log.Infof("shutting down server due to received signal: %v", sig) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + if err := srv.Shutdown(ctx); err != nil { + log.Errorf("error shutting down server: %v", err) + } + cancel() +} diff --git a/cmd/webhook/init/server/server_test.go b/cmd/webhook/init/server/server_test.go new file mode 100644 index 0000000..a8e4e6b --- /dev/null +++ b/cmd/webhook/init/server/server_test.go @@ -0,0 +1,545 @@ +package server + +import ( + "context" + "fmt" + "io" + "net/http" + "reflect" + "strings" + "testing" + "time" + + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/configuration" + "github.com/probstenhias/external-dns-anexia-webhook/pkg/webhook" + log "github.com/sirupsen/logrus" + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" +) + +type testCase struct { + name string + returnRecords []*endpoint.Endpoint + returnAdjustedEndpoints []*endpoint.Endpoint + returnDomainFilter endpoint.DomainFilter + hasError error + method string + path string + headers map[string]string + body string + expectedStatusCode int + expectedResponseHeaders map[string]string + expectedBody string + expectedChanges *plan.Changes + expectedEndpointsToAdjust []*endpoint.Endpoint + log.Ext1FieldLogger +} + +var mockProvider *MockProvider + +func TestMain(m *testing.M) { + mockProvider = &MockProvider{} + + srv := Init(configuration.Init(), webhook.New(mockProvider)) + go ShutdownGracefully(srv) + + time.Sleep(300 * time.Millisecond) + + m.Run() + if err := srv.Shutdown(context.TODO()); err != nil { + panic(err) + } +} + +func TestRecords(t *testing.T) { + testCases := []testCase{ + { + name: "valid case", + returnRecords: []*endpoint.Endpoint{ + { + DNSName: "test.example.com", + Targets: []string{""}, + RecordType: "A", + RecordTTL: 3600, + Labels: map[string]string{ + "label1": "value1", + }, + }, + }, + method: http.MethodGet, + headers: map[string]string{"Accept": "application/external.dns.webhook+json;version=1"}, + path: "/records", + body: "", + expectedStatusCode: http.StatusOK, + expectedResponseHeaders: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + }, + expectedBody: "[{\"dnsName\":\"test.example.com\",\"targets\":[\"\"],\"recordType\":\"A\",\"recordTTL\":3600,\"labels\":{\"label1\":\"value1\"}}]", + }, + { + name: "no accept header", + method: http.MethodGet, + headers: map[string]string{}, + path: "/records", + body: "", + expectedStatusCode: http.StatusNotAcceptable, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "client must provide an accept header", + }, + { + name: "wrong accept header", + method: http.MethodGet, + headers: map[string]string{"Accept": "invalid"}, + path: "/records", + body: "", + expectedStatusCode: http.StatusUnsupportedMediaType, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "Client must provide a valid versioned media type in the accept header: Unsupported media type version: 'invalid'. Supported media types are: 'application/external.dns.webhook+json;version=1'", + }, + { + name: "backend error", + hasError: fmt.Errorf("backend error"), + method: http.MethodGet, + headers: map[string]string{"Accept": "application/external.dns.webhook+json;version=1"}, + path: "/records", + body: "", + expectedStatusCode: http.StatusInternalServerError, + }, + } + + executeTestCases(t, testCases) +} + +func TestApplyChanges(t *testing.T) { + testCases := []testCase{ + { + name: "valid case", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + }, + path: "/records", + body: ` +{ + "Create": [ + { + "dnsName": "test.example.com", + "targets": ["11.11.11.11"], + "recordType": "A", + "recordTTL": 3600, + "labels": { + "label1": "value1", + "label2": "value2" + } + } + ] +}`, + expectedStatusCode: http.StatusNoContent, + expectedResponseHeaders: map[string]string{}, + expectedBody: "", + expectedChanges: &plan.Changes{ + Create: []*endpoint.Endpoint{ + { + DNSName: "test.example.com", + Targets: []string{"11.11.11.11"}, + RecordType: "A", + RecordTTL: 3600, + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + }, + }, + }, + }, + { + name: "valid case with updates", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + }, + path: "/records", + body: ` +{ + "UpdateOld": [ + { + "dnsName": "test.example.com", + "targets": ["11.11.11.11"], + "recordType": "A", + "recordTTL": 3600, + "labels": { + "label1": "value1", + "label2": "value2" + } + } + ], + "UpdateNew": [ + { + "dnsName": "test.example.com", + "targets": ["22.22.22.22"], + "recordType": "A", + "recordTTL": 3600, + "labels": { + "label1": "value1", + "label2": "value2" + } + } + ] +}`, + expectedStatusCode: http.StatusNoContent, + expectedResponseHeaders: map[string]string{}, + expectedBody: "", + expectedChanges: &plan.Changes{ + UpdateOld: []*endpoint.Endpoint{ + { + DNSName: "test.example.com", + Targets: []string{"11.11.11.11"}, + RecordType: "A", + RecordTTL: 3600, + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + }, + }, + UpdateNew: []*endpoint.Endpoint{ + { + DNSName: "test.example.com", + Targets: []string{"22.22.22.22"}, + RecordType: "A", + RecordTTL: 3600, + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + }, + }, + }, + }, + { + name: "no content type header", + method: http.MethodPost, + path: "/records", + body: "", + expectedStatusCode: http.StatusNotAcceptable, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "client must provide a content type", + }, + { + name: "wrong content type header", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "invalid", + }, + path: "/records", + body: "", + expectedStatusCode: http.StatusUnsupportedMediaType, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "Client must provide a valid versioned media type in the content type: Unsupported media type version: 'invalid'. Supported media types are: 'application/external.dns.webhook+json;version=1'", + }, + { + name: "invalid json", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + "Accept": "application/external.dns.webhook+json;version=1", + }, + path: "/records", + body: "invalid", + expectedStatusCode: http.StatusBadRequest, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "error decoding changes: invalid character 'i' looking for beginning of value", + }, + { + name: "backend error", + hasError: fmt.Errorf("backend error"), + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + "Accept": "application/external.dns.webhook+json;version=1", + }, + path: "/records", + body: ` +{ + "Create": [ + { + "dnsName": "test.example.com", + "targets": ["11.11.11.11"], + "recordType": "A", + "recordTTL": 3600, + "labels": { + "label1": "value1", + "label2": "value2" + } + } + ] +}`, + expectedStatusCode: http.StatusInternalServerError, + }, + } + + executeTestCases(t, testCases) +} + +func TestAdjustEndpoints(t *testing.T) { + testCases := []testCase{ + { + name: "happy case", + returnAdjustedEndpoints: []*endpoint.Endpoint{ + { + DNSName: "adjusted.example.com", + Targets: []string{""}, + RecordType: "A", + RecordTTL: 3600, + Labels: map[string]string{ + "label1": "value1", + }, + }, + }, + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + "Accept": "application/external.dns.webhook+json;version=1", + }, + path: "/adjustendpoints", + body: ` +[ + { + "dnsName": "toadjust.example.com", + "targets": [], + "recordType": "A", + "recordTTL": 3600, + "labels": { + "label1": "value1", + "label2": "value2" + } + } +]`, + expectedStatusCode: http.StatusOK, + expectedResponseHeaders: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + }, + expectedBody: "[{\"dnsName\":\"adjusted.example.com\",\"targets\":[\"\"],\"recordType\":\"A\",\"recordTTL\":3600,\"labels\":{\"label1\":\"value1\"}}]", + expectedEndpointsToAdjust: []*endpoint.Endpoint{ + { + DNSName: "toadjust.example.com", + Targets: []string{}, + RecordType: "A", + RecordTTL: 3600, + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + }, + }, + }, + { + name: "no content type header", + method: http.MethodPost, + headers: map[string]string{ + "Accept": "application/external.dns.webhook+json;version=1", + }, + path: "/adjustendpoints", + body: "", + expectedStatusCode: http.StatusNotAcceptable, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "client must provide a content type", + }, + { + name: "wrong content type header", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "invalid", + "Accept": "application/external.dns.webhook+json;version=1", + }, + path: "/adjustendpoints", + body: "", + expectedStatusCode: http.StatusUnsupportedMediaType, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "Client must provide a valid versioned media type in the content type: Unsupported media type version: 'invalid'. Supported media types are: 'application/external.dns.webhook+json;version=1'", + }, + { + name: "no accept header", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + }, + path: "/adjustendpoints", + body: "", + expectedStatusCode: http.StatusNotAcceptable, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "client must provide an accept header", + }, + { + name: "wrong accept header", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + "Accept": "invalid", + }, + path: "/adjustendpoints", + body: "", + expectedStatusCode: http.StatusUnsupportedMediaType, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "Client must provide a valid versioned media type in the accept header: Unsupported media type version: 'invalid'. Supported media types are: 'application/external.dns.webhook+json;version=1'", + }, + { + name: "invalid json", + method: http.MethodPost, + headers: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + "Accept": "application/external.dns.webhook+json;version=1", + }, + path: "/adjustendpoints", + body: "invalid", + expectedStatusCode: http.StatusBadRequest, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "failed to decode request body: invalid character 'i' looking for beginning of value", + }, + } + + executeTestCases(t, testCases) +} + +func TestNegotiate(t *testing.T) { + testCases := []testCase{ + { + name: "happy case", + returnDomainFilter: endpoint.NewDomainFilter([]string{"a.de"}), + method: http.MethodGet, + headers: map[string]string{"Accept": "application/external.dns.webhook+json;version=1"}, + path: "/", + body: "", + expectedStatusCode: http.StatusOK, + expectedResponseHeaders: map[string]string{ + "Content-Type": "application/external.dns.webhook+json;version=1", + }, + expectedBody: `{"include":["a.de"]}`, + }, + { + name: "no accept header", + method: http.MethodGet, + headers: map[string]string{}, + path: "/", + body: "", + expectedStatusCode: http.StatusNotAcceptable, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "client must provide an accept header", + }, + { + name: "wrong accept header", + method: http.MethodGet, + headers: map[string]string{"Accept": "invalid"}, + path: "/", + body: "", + expectedStatusCode: http.StatusUnsupportedMediaType, + expectedResponseHeaders: map[string]string{ + "Content-Type": "text/plain", + }, + expectedBody: "Client must provide a valid versioned media type in the accept header: Unsupported media type version: 'invalid'. Supported media types are: 'application/external.dns.webhook+json;version=1'", + }, + } + + executeTestCases(t, testCases) +} + +func executeTestCases(t *testing.T, testCases []testCase) { + log.SetLevel(log.DebugLevel) + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d. %s", i+1, tc.name), func(t *testing.T) { + mockProvider.testCase = tc + mockProvider.t = t + + var bodyReader io.Reader = strings.NewReader(tc.body) + + request, err := http.NewRequest(tc.method, "http://localhost:8888"+tc.path, bodyReader) + if err != nil { + t.Error(err) + } + + for k, v := range tc.headers { + request.Header.Set(k, v) + } + + response, err := http.DefaultClient.Do(request) + if err != nil { + t.Error(err) + } + + if response.StatusCode != tc.expectedStatusCode { + t.Errorf("expected status code %d, got %d", tc.expectedStatusCode, response.StatusCode) + } + + for k, v := range tc.expectedResponseHeaders { + if response.Header.Get(k) != v { + t.Errorf("expected header '%s' with value '%s', got '%s'", k, v, response.Header.Get(k)) + } + } + + if tc.expectedBody != "" { + body, err := io.ReadAll(response.Body) + if err != nil { + t.Error(err) + } + _ = response.Body.Close() + actualTrimmedBody := strings.TrimSpace(string(body)) + if actualTrimmedBody != tc.expectedBody { + t.Errorf("expected body '%s', got '%s'", tc.expectedBody, actualTrimmedBody) + } + } + }) + } +} + +type MockProvider struct { + t *testing.T + testCase testCase +} + +func (d *MockProvider) Records(_ context.Context) ([]*endpoint.Endpoint, error) { + return d.testCase.returnRecords, d.testCase.hasError +} + +func (d *MockProvider) ApplyChanges(_ context.Context, changes *plan.Changes) error { + if d.testCase.hasError != nil { + return d.testCase.hasError + } + if !reflect.DeepEqual(changes, d.testCase.expectedChanges) { + d.t.Errorf("expected changes '%v', got '%v'", d.testCase.expectedChanges, changes) + } + return nil +} + +func (d *MockProvider) AdjustEndpoints(endpoints []*endpoint.Endpoint) ([]*endpoint.Endpoint, error) { + if !reflect.DeepEqual(endpoints, d.testCase.expectedEndpointsToAdjust) { + d.t.Errorf("expected endpoints to adjust '%v', got '%v'", d.testCase.expectedEndpointsToAdjust, endpoints) + } + return d.testCase.returnAdjustedEndpoints, nil +} + +func (d *MockProvider) GetDomainFilter() endpoint.DomainFilter { + return d.testCase.returnDomainFilter +} diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go new file mode 100644 index 0000000..e2385bd --- /dev/null +++ b/cmd/webhook/main.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/configuration" + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/dnsprovider" + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/logging" + "github.com/probstenhias/external-dns-anexia-webhook/cmd/webhook/init/server" + "github.com/probstenhias/external-dns-anexia-webhook/pkg/webhook" + log "github.com/sirupsen/logrus" +) + +const banner = ` + _ _ _ _______ _____ _ + / \ | \ | | ____\ \/ /_ _| / \ + / _ \ | \| | _| \ / | | / _ \ + / ___ \| |\ | |___ / \ | | / ___ \ +/_/ \_\_| \_|_____/_/\_\___/_/ \_\ +external-dns-anexia-webhook +version: %s (%s) + +` + +var ( + Version = "local" + Gitsha = "?" +) + +func main() { + fmt.Printf(banner, Version, Gitsha) + + logging.Init() + + config := configuration.Init() + provider, err := dnsprovider.Init(config) + if err != nil { + log.Fatalf("failed to initialize provider: %v", err) + } + + srv := server.Init(config, webhook.New(provider)) + server.ShutdownGracefully(srv) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..ac44d4c --- /dev/null +++ b/go.mod @@ -0,0 +1,37 @@ +module github.com/probstenhias/external-dns-anexia-webhook + +go 1.22.2 + +require ( + github.com/caarlos0/env/v11 v11.0.0 + github.com/go-chi/chi/v5 v5.0.12 + github.com/sirupsen/logrus v1.9.3 + github.com/stretchr/testify v1.9.0 + go.anx.io/go-anxcloud v0.6.4 + sigs.k8s.io/external-dns v0.14.1 +) + +require ( + github.com/aws/aws-sdk-go v1.51.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..fdfc3ca --- /dev/null +++ b/go.sum @@ -0,0 +1,130 @@ +github.com/aws/aws-sdk-go v1.51.1 h1:AFvTihcDPanvptoKS09a4yYmNtPm3+pXlk6uYHmZiFk= +github.com/aws/aws-sdk-go v1.51.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/caarlos0/env/v11 v11.0.0 h1:ZIlkOjuL3xoZS0kmUJlF74j2Qj8GMOq3CDLX/Viak8Q= +github.com/caarlos0/env/v11 v11.0.0/go.mod h1:2RC3HQu8BQqtEK3V4iHPxj0jOdWdbPpWJ6pOueeU1xM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= +github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= +github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.anx.io/go-anxcloud v0.6.4 h1:SaFqYHFZC96PNt0cp7bX+4khAWg1u1hUdSt11R++fn8= +go.anx.io/go-anxcloud v0.6.4/go.mod h1:aattNBzzaDFtPRU/eTsNK1lDdTFa8QUXal+w1SQPCF0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/external-dns v0.14.1 h1:CtXyAa1KyFRDM3UqV2B06ZwgEWCN1rqxJWGkLWSgUdg= +sigs.k8s.io/external-dns v0.14.1/go.mod h1:bOqmVWTMbPYsd/yViG4HxOFRyGeuXi1q+cmfYJ8ZxQI= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/anexia/configuration.go b/internal/anexia/configuration.go new file mode 100644 index 0000000..e1099f3 --- /dev/null +++ b/internal/anexia/configuration.go @@ -0,0 +1,22 @@ +package anexia + +import ( + "github.com/caarlos0/env/v11" + log "github.com/sirupsen/logrus" +) + +// Configuration holds configuration from environmental variables +type Configuration struct { + APIToken string `env:"ANEXIA_API_TOKEN,notEmpty"` + APIEndpointURL string `env:"ANEXIA_API_URL"` + DryRun bool `env:"DRY_RUN" envDefault:"false"` +} + +// Init sets up configuration by reading set environmental variables +func Init() Configuration { + cfg := Configuration{} + if err := env.Parse(&cfg); err != nil { + log.Fatalf("error reading configuration from environment: %v", err) + } + return cfg +} diff --git a/internal/anexia/endpoint_helpers.go b/internal/anexia/endpoint_helpers.go new file mode 100644 index 0000000..761ec73 --- /dev/null +++ b/internal/anexia/endpoint_helpers.go @@ -0,0 +1,28 @@ +package anexia + +import ( + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" +) + +func GetCreateDeleteSetsFromChanges(changes *plan.Changes) ([]*endpoint.Endpoint, []*endpoint.Endpoint) { + toCreate := make([]*endpoint.Endpoint, len(changes.Create)) + copy(toCreate, changes.Create) + + toDelete := make([]*endpoint.Endpoint, len(changes.Delete)) + copy(toDelete, changes.Delete) + + for i, updateOldEndpoint := range changes.UpdateOld { + updateNewEndpoint := changes.UpdateNew[i] + if endpointsAreDifferent(*updateOldEndpoint, *updateNewEndpoint) { + toDelete = append(toDelete, updateOldEndpoint) + toCreate = append(toCreate, updateNewEndpoint) + } + } + return toCreate, toDelete +} + +func endpointsAreDifferent(a endpoint.Endpoint, b endpoint.Endpoint) bool { + return a.DNSName != b.DNSName || a.RecordType != b.RecordType || + a.RecordTTL != b.RecordTTL || !a.Targets.Same(b.Targets) +} diff --git a/internal/anexia/provider.go b/internal/anexia/provider.go new file mode 100644 index 0000000..518c241 --- /dev/null +++ b/internal/anexia/provider.go @@ -0,0 +1,334 @@ +package anexia + +import ( + "context" + "fmt" + "sort" + "strings" + + log "github.com/sirupsen/logrus" + "go.anx.io/go-anxcloud/pkg/api" + "go.anx.io/go-anxcloud/pkg/api/types" + anxcloudDns "go.anx.io/go-anxcloud/pkg/apis/clouddns/v1" + "go.anx.io/go-anxcloud/pkg/client" + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" + "sigs.k8s.io/external-dns/provider" +) + +type DNSClient struct { + client types.API + dryRun bool +} + +type DNSService interface { + GetZones(ctx context.Context) ([]*anxcloudDns.Zone, error) + GetRecords(ctx context.Context) ([]*anxcloudDns.Record, error) + GetRecordsByZoneNameAndName(ctx context.Context, zoneName, name string) ([]*anxcloudDns.Record, error) + GetZonesByDomainName(ctx context.Context, domainName string) ([]*anxcloudDns.Zone, error) + DeleteRecord(ctx context.Context, zoneName, recordID string) error + CreateRecord(ctx context.Context, zoneName string, record *anxcloudDns.Record) error +} + +func (c *DNSClient) GetZones(ctx context.Context) ([]*anxcloudDns.Zone, error) { + log.Debugf("get all zones ...") + channel := make(types.ObjectChannel) + + if err := c.client.List(ctx, &anxcloudDns.Zone{}, api.ObjectChannel(&channel)); err != nil { + log.Errorf("failed to list zones: %v", err) + return nil, err + } + + zone := anxcloudDns.Zone{} + + zones := make([]*anxcloudDns.Zone, 0) + for res := range channel { + if err := res(&zone); err != nil { + log.Errorf("failed to parse zone: %v", err) + return nil, err + } + zones = append(zones, &zone) + } + + return zones, nil +} + +func (c *DNSClient) GetRecords(ctx context.Context) ([]*anxcloudDns.Record, error) { + log.Debugf("get all records ...") + channel := make(types.ObjectChannel) + + allZones, err := c.GetZones(ctx) + if err != nil { + log.Errorf("failed to get zones: %v", err) + return nil, err + } + + for _, zone := range allZones { + log.Debugf("get records for zone %s ...", zone.Name) + zoneName := zone.Name + + if err := c.client.List(ctx, &anxcloudDns.Record{ZoneName: zoneName}, api.ObjectChannel(&channel)); err != nil { + log.Errorf("failed to list records for zone %s: %v", zoneName, err) + return nil, err + } + } + + record := anxcloudDns.Record{} + + records := make([]*anxcloudDns.Record, 0) + for res := range channel { + if err := res(&record); err != nil { + log.Errorf("failed to parse record: %v", err) + return nil, err + } + records = append(records, &record) + } + + return records, nil +} + +func (c *DNSClient) GetRecordsByZoneNameAndName(ctx context.Context, zoneName, name string) ([]*anxcloudDns.Record, error) { + log.Debugf("get records for zone %s and name %s ...", zoneName, name) + channel := make(types.ObjectChannel) + + if err := c.client.List(ctx, &anxcloudDns.Record{ZoneName: zoneName, Name: name}, api.ObjectChannel(&channel)); err != nil { + log.Errorf("failed to list records for zone %s and name %s: %v", zoneName, name, err) + return nil, err + } + + record := anxcloudDns.Record{} + + records := make([]*anxcloudDns.Record, 0) + for res := range channel { + if err := res(&record); err != nil { + log.Errorf("failed to parse record: %v", err) + return nil, err + } + records = append(records, &record) + } + + return records, nil +} + +func (c *DNSClient) GetZonesByDomainName(ctx context.Context, domainName string) ([]*anxcloudDns.Zone, error) { + log.Debugf("get zones for domain %s ...", domainName) + allZones, err := c.GetZones(ctx) + if err != nil { + return nil, err + } + possibleZones := make([]*anxcloudDns.Zone, 0) + for _, zone := range allZones { + if strings.HasSuffix(domainName, zone.Name) { + possibleZones = append(possibleZones, zone) + } + } + + // sort zones by length, longest first + // this is necessary because the domain name might match multiple zones + // and we want to use the most specific one + sort.Slice(possibleZones, func(i, j int) bool { + return len(possibleZones[i].Name) > len(possibleZones[j].Name) + }) + return possibleZones, nil +} + +func (c *DNSClient) DeleteRecord(ctx context.Context, zoneName, recordID string) error { + if c.dryRun { + log.Infof("dry run: would delete record %s", recordID) + return nil + } + log.Debugf("delete record %s ...", recordID) + err := c.client.Destroy(ctx, &anxcloudDns.Record{ZoneName: zoneName, Identifier: recordID}) + if err != nil { + log.Errorf("failed to delete record %s: %v", recordID, err) + return err + } + log.Debug("record deleted") + return nil +} + +func (c *DNSClient) CreateRecord(ctx context.Context, _ string, record *anxcloudDns.Record) error { + if c.dryRun { + log.Infof("dry run: would create record %v", record) + return nil + } + log.Debugf("create record %v ...", record) + err := c.client.Create(ctx, record) + if err != nil { + log.Errorf("failed to create record %v: %v", record, err) + return err + } + log.Debug("record created") + return nil +} + +type Provider struct { + provider.BaseProvider + client DNSService + domainFilter endpoint.DomainFilter +} + +// NewProvider returns an instance of new provider +func NewProvider(configuration *Configuration, domainFilter endpoint.DomainFilter) (*Provider, error) { + client, err := createClient(configuration) + if err != nil { + return nil, fmt.Errorf("failed to create Anexia client: %w", err) + } + prov := &Provider{ + client: &DNSClient{client: client, dryRun: configuration.DryRun}, + domainFilter: domainFilter, + } + return prov, nil +} + +func createClient(configuration *Configuration) (apiClient types.API, err error) { + options := []client.Option{ + client.TokenFromString(configuration.APIToken), + } + + if configuration.APIEndpointURL == "" { + log.Warn("API endpoint URL is not set, using default") + } else { + log.Debugf("Creating Anexia client with base URL %s", configuration.APIEndpointURL) + options = append(options, client.BaseURL(configuration.APIEndpointURL)) + } + apiClient, err = api.NewAPI( + api.WithClientOptions( + options..., + ), + ) + + if err != nil { + return nil, err + } + if configuration.DryRun { + log.Warnf("Dry run mode enabled, no changes will be made") + } + return apiClient, nil +} + +func (p *Provider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) { + records, err := p.client.GetRecords(ctx) + if err != nil { + return nil, err + } + + groups := make(map[string][]*endpoint.Endpoint, 0) + for _, record := range records { + ep := recordToEndpoint(record) + if p.domainFilter.IsConfigured() && !p.domainFilter.Match(ep.DNSName) { + log.Debugf("Skipping record %s because it was filtered out by the domain filter", ep.DNSName) + continue + } + key := ep.DNSName + ep.RecordType + groups[key] = append(groups[key], ep) + } + + mergedEndpoints := make([]*endpoint.Endpoint, 0) + for _, endpoints := range groups { + mergedEndpoint := &endpoint.Endpoint{ + DNSName: endpoints[0].DNSName, + RecordType: endpoints[0].RecordType, + RecordTTL: endpoints[0].RecordTTL, + } + for _, ep := range endpoints { + mergedEndpoint.Targets = append(mergedEndpoint.Targets, ep.Targets...) + } + mergedEndpoints = append(mergedEndpoints, mergedEndpoint) + } + return mergedEndpoints, nil +} + +func recordToEndpoint(record *anxcloudDns.Record) *endpoint.Endpoint { + + return &endpoint.Endpoint{ + DNSName: func() string { + if record.Name == "@" || record.Name == "" { + return record.ZoneName + } + return record.Name + "." + record.ZoneName + + }(), + RecordTTL: endpoint.TTL(record.TTL), + RecordType: record.Type, + Targets: []string{record.RData}, + } +} + +func (p *Provider) ApplyChanges(ctx context.Context, changes *plan.Changes) error { + epToCreate, epToDelete := GetCreateDeleteSetsFromChanges(changes) + log.Debugf("apply changes, create: %d, delete: %d", len(epToCreate), len(epToDelete)) + + recordsToDelete := make([]*anxcloudDns.Record, 0) + for _, ep := range epToDelete { + if p.domainFilter.IsConfigured() && !p.domainFilter.Match(ep.DNSName) { + log.Debugf("Skipping record %s because it was filtered out by the domain filter", ep.DNSName) + continue + } + potentialZones, err := p.client.GetZonesByDomainName(ctx, ep.DNSName) + if err != nil { + log.Errorf("failed to get zones for domain %s: %v", ep.DNSName, err) + break + } + for _, zone := range potentialZones { + recordName := strings.TrimSuffix(ep.DNSName, "."+zone.Name) + records, err := p.client.GetRecordsByZoneNameAndName(ctx, zone.Name, recordName) + if err != nil { + log.Errorf("failed to get records for zone %s and name %s: %v", zone.Name, recordName, err) + break + } + for _, record := range records { + if record.Type != ep.RecordType { + continue + } + for _, target := range ep.Targets { + if record.RData == target { + recordsToDelete = append(recordsToDelete, record) + break + } + } + } + } + } + + for _, record := range recordsToDelete { + if err := p.client.DeleteRecord(ctx, record.ZoneName, record.Identifier); err != nil { + return err + } + } + + recordsToCreate := make([]*anxcloudDns.Record, 0) + for _, ep := range epToCreate { + if p.domainFilter.IsConfigured() && !p.domainFilter.Match(ep.DNSName) { + log.Debugf("Skipping record %s because it was filtered out by the domain filter", ep.DNSName) + continue + } + zone, err := p.client.GetZonesByDomainName(ctx, ep.DNSName) + if err != nil { + log.Errorf("failed to get zones for domain %s: %v", ep.DNSName, err) + break + } + if len(zone) == 0 { + log.Warnf("no zone found for domain %s", ep.DNSName) + continue + } + for _, target := range ep.Targets { + recordsToCreate = append(recordsToCreate, &anxcloudDns.Record{ + ZoneName: zone[0].Name, + Name: strings.TrimSuffix(ep.DNSName, "."+zone[0].Name), + RData: target, + TTL: int(ep.RecordTTL), + Type: ep.RecordType, + }) + } + } + + for _, record := range recordsToCreate { + if err := p.client.CreateRecord(ctx, record.ZoneName, record); err != nil { + return err + } + } + + return nil + +} diff --git a/internal/anexia/provider_test.go b/internal/anexia/provider_test.go new file mode 100644 index 0000000..3cf59fd --- /dev/null +++ b/internal/anexia/provider_test.go @@ -0,0 +1,537 @@ +package anexia + +import ( + "fmt" + "math/rand" + "strings" + "testing" + + "context" + + "github.com/caarlos0/env/v11" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + anxcloudDns "go.anx.io/go-anxcloud/pkg/apis/clouddns/v1" + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" +) + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func TestNewProvider(t *testing.T) { + log.SetLevel(log.DebugLevel) + t.Setenv("ANEXIA_API_TOKEN", "1") + anexiaConfig := &Configuration{} + + err := env.Parse(anexiaConfig) + require.NoError(t, err) + domainFilter := endpoint.NewDomainFilter([]string{"a.de."}) + p, err := NewProvider(anexiaConfig, domainFilter) + require.NoError(t, err) + require.Equal(t, true, p.domainFilter.IsConfigured()) + require.Equal(t, false, p.domainFilter.Match("b.de.")) +} + +func TestRecords(t *testing.T) { + log.SetLevel(log.DebugLevel) + ctx := context.Background() + testCases := []struct { + name string + givenRecords []*anxcloudDns.Record + givenError error + givenDomainFilter endpoint.DomainFilter + expectedEndpoints []*endpoint.Endpoint + expectedError error + }{ + { + name: "no records", + givenRecords: []*anxcloudDns.Record{}, + expectedEndpoints: []*endpoint.Endpoint{}, + }, + { + name: "error reading records", + givenRecords: []*anxcloudDns.Record{}, + givenError: fmt.Errorf("test error"), + expectedEndpoints: []*endpoint.Endpoint{}, + expectedError: fmt.Errorf("test error"), + }, + { + name: "multiple A records", + givenRecords: createRecordSlice(3, func(i int) (string, string, string, int, string) { + recordName := "a" + fmt.Sprintf("%d", i+1) + zoneName := "a.de" + return recordName, zoneName, "A", ((i + 1) * 100), fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1) + }), + expectedEndpoints: createEndpointSlice(3, func(i int) (string, string, endpoint.TTL, []string) { + return "a" + fmt.Sprintf("%d", i+1) + ".a.de", "A", endpoint.TTL((i + 1) * 100), []string{fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1)} + }), + }, + { + name: "multiple records filtered by domain", + givenRecords: createRecordSlice(6, func(i int) (string, string, string, int, string) { + if i < 3 { + recordName := "a" + fmt.Sprintf("%d", i+1) + zoneName := "a.de" + return recordName, zoneName, "A", ((i + 1) * 100), fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1) + } + recordName := "b" + fmt.Sprintf("%d", i+1) + zoneName := "b.de" + return recordName, zoneName, "A", ((i + 1) * 100), fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1) + }), + givenDomainFilter: endpoint.NewDomainFilter([]string{"a.de"}), + expectedEndpoints: createEndpointSlice(3, func(i int) (string, string, endpoint.TTL, []string) { + return "a" + fmt.Sprintf("%d", i+1) + ".a.de", "A", endpoint.TTL((i + 1) * 100), []string{fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1)} + }), + }, + { + name: "records mapped to same endpoint", + givenRecords: createRecordSlice(3, func(i int) (string, string, string, int, string) { + if i < 2 { + return "", "a.de", "A", 300, fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1) + } + return "", "c.de", "A", 300, fmt.Sprintf("%d.%d.%d.%d", i+1, i+1, i+1, i+1) + + }), + expectedEndpoints: createEndpointSlice(2, func(i int) (string, string, endpoint.TTL, []string) { + if i == 0 { + return "a.de", "A", endpoint.TTL(300), []string{"1.1.1.1", "2.2.2.2"} + } + return "c.de", "A", endpoint.TTL(300), []string{"3.3.3.3"} + }), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDNSClient := &mockDNSClient{ + allRecords: tc.givenRecords, + returnError: tc.givenError, + } + provider := &Provider{client: mockDNSClient, domainFilter: tc.givenDomainFilter} + endpoints, err := provider.Records(ctx) + if tc.expectedError != nil { + require.Error(t, err) + require.Equal(t, tc.expectedError, err) + return + } + require.NoError(t, err) + require.Len(t, endpoints, len(tc.expectedEndpoints)) + assert.ElementsMatch(t, tc.expectedEndpoints, endpoints) + }) + } +} + +func TestApplyChanges(t *testing.T) { + log.SetLevel(log.DebugLevel) + log.SetReportCaller(true) + deZoneName := "de" + comZoneName := "com" + ctx := context.Background() + testCases := []struct { + name string + givenRecords []*anxcloudDns.Record + givenZones []*anxcloudDns.Zone + givenZoneRecords map[string][]*anxcloudDns.Record + givenDomainFilter endpoint.DomainFilter + whenChanges *plan.Changes + expectedRecordsCreated map[string][]*anxcloudDns.Record + expectedRecordsDeleted map[string][]string + }{ + { + name: "no changes", + givenZones: createZoneSlice(0, nil), + givenZoneRecords: map[string][]*anxcloudDns.Record{}, + whenChanges: &plan.Changes{}, + expectedRecordsCreated: nil, + expectedRecordsDeleted: nil, + }, + { + name: "create one record in a blank zone", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(0, nil), + }, + whenChanges: &plan.Changes{ + Create: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a." + deZoneName, "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + }, + expectedRecordsCreated: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + expectedRecordsDeleted: nil, + }, + { + name: "create a record which is filtered out from the domain filter", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(0, nil), + }, + givenDomainFilter: endpoint.NewDomainFilter([]string{"b.de"}), + whenChanges: &plan.Changes{ + Create: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a." + deZoneName, "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + }, + expectedRecordsCreated: nil, + expectedRecordsDeleted: nil, + }, + { + name: "create 2 records from one endpoint in a blank zone", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(0, nil), + }, + whenChanges: &plan.Changes{ + Create: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a." + deZoneName, "A", endpoint.TTL(300), []string{"1.2.3.4", "5.6.7.8"} + }), + }, + expectedRecordsCreated: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(2, func(i int) (string, string, string, int, string) { + if i == 0 { + return "a", deZoneName, "A", 300, "1.2.3.4" + } + return "a", deZoneName, "A", 300, "5.6.7.8" + }), + }, + expectedRecordsDeleted: nil, + }, + { + name: "delete the only record in a zone", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + whenChanges: &plan.Changes{ + Delete: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + }, + expectedRecordsDeleted: map[string][]string{ + deZoneName: {"0"}, + }, + }, + { + name: "delete a record which is filtered out from the domain filter", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + givenDomainFilter: endpoint.NewDomainFilter([]string{"b.de"}), + whenChanges: &plan.Changes{ + Delete: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + }, + expectedRecordsDeleted: nil, + }, + { + name: "delete multiple records, in different zones", + givenZones: createZoneSlice(2, func(i int) string { + if i == 0 { + return deZoneName + } + return comZoneName + + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(2, func(n int) (string, string, string, int, string) { + if n == 0 { + return "a", deZoneName, "A", 300, "1.2.3.4" + } + return "a", deZoneName, "A", 300, "5.6.7.8" + + }), + comZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", comZoneName, "A", 300, "11.22.33.44" + }), + }, + whenChanges: &plan.Changes{ + Delete: createEndpointSlice(2, func(i int) (string, string, endpoint.TTL, []string) { + if i == 0 { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4", "5.6.7.8"} + } + return "a.com", "A", endpoint.TTL(300), []string{"11.22.33.44"} + + }), + }, + expectedRecordsDeleted: map[string][]string{ + deZoneName: {"0", "1"}, + comZoneName: {"0"}, + }, + }, + { + name: "delete record which is not in the zone, deletes nothing", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(0, nil), + }, + whenChanges: &plan.Changes{ + Delete: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + }, + expectedRecordsDeleted: nil, + }, + { + name: "delete one record from targets part of endpoint", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + whenChanges: &plan.Changes{ + Delete: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4", "5.6.7.8"} + }), + }, + expectedRecordsDeleted: map[string][]string{ + deZoneName: {"0"}, + }, + }, + { + name: "update single record", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + whenChanges: &plan.Changes{ + UpdateOld: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + UpdateNew: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"5.6.7.8"} + }), + }, + expectedRecordsDeleted: map[string][]string{ + deZoneName: {"0"}, + }, + expectedRecordsCreated: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "5.6.7.8" + }), + }, + }, + { + name: "update a record which is filtered out by domain filter, does nothing", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + givenDomainFilter: endpoint.NewDomainFilter([]string{"b.de"}), + + whenChanges: &plan.Changes{ + UpdateOld: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + UpdateNew: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"5.6.7.8"} + }), + }, + expectedRecordsDeleted: nil, + expectedRecordsCreated: nil, + }, + { + name: "update when old and new endpoint are the same, does nothing", + givenZones: createZoneSlice(1, func(_ int) string { + return deZoneName + }), + givenZoneRecords: map[string][]*anxcloudDns.Record{ + deZoneName: createRecordSlice(1, func(_ int) (string, string, string, int, string) { + return "a", deZoneName, "A", 300, "1.2.3.4" + }), + }, + whenChanges: &plan.Changes{ + UpdateOld: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + UpdateNew: createEndpointSlice(1, func(_ int) (string, string, endpoint.TTL, []string) { + return "a.de", "A", endpoint.TTL(300), []string{"1.2.3.4"} + }), + }, + expectedRecordsDeleted: nil, + expectedRecordsCreated: nil, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDNSClient := &mockDNSClient{ + allRecords: tc.givenRecords, + allZones: tc.givenZones, + zoneRecords: tc.givenZoneRecords, + } + provider := &Provider{client: mockDNSClient, domainFilter: tc.givenDomainFilter} + err := provider.ApplyChanges(ctx, tc.whenChanges) + + require.NoError(t, err) + require.Len(t, mockDNSClient.createdRecords, len(tc.expectedRecordsCreated)) + + for zoneName, expectedDeletedRecordIDs := range tc.expectedRecordsDeleted { + require.Len(t, mockDNSClient.deletedRecords[zoneName], len(expectedDeletedRecordIDs), "deleted records in zone '%s' do not fit", zoneName) + actualDeletedRecordIDs, ok := mockDNSClient.deletedRecords[zoneName] + require.True(t, ok) + assert.ElementsMatch(t, expectedDeletedRecordIDs, actualDeletedRecordIDs) + } + }) + } +} + +func TestAdjustEndpoints(t *testing.T) { + provider := &Provider{} + endpoints := createEndpointSlice(rand.Intn(5), func(_ int) (string, string, endpoint.TTL, []string) { + return RandStringRunes(10), RandStringRunes(1), endpoint.TTL(300), []string{RandStringRunes(5)} + }) + actualEndpoints, _ := provider.AdjustEndpoints(endpoints) + require.Equal(t, endpoints, actualEndpoints) +} + +type mockDNSClient struct { + returnError error + allRecords []*anxcloudDns.Record + zoneRecords map[string][]*anxcloudDns.Record + allZones []*anxcloudDns.Zone + createdRecords map[string][]*anxcloudDns.Record // zoneName -> recordCreates + deletedRecords map[string][]string // zoneName -> recordIDs +} + +func (c *mockDNSClient) GetRecords(_ context.Context) ([]*anxcloudDns.Record, error) { + log.Debugf("GetAllRecords called") + return c.allRecords, c.returnError +} + +func (c *mockDNSClient) GetZoneRecords(_ context.Context, zoneName string) ([]*anxcloudDns.Record, error) { + log.Debugf("GetZoneRecords called with zoneName %s", zoneName) + return c.zoneRecords[zoneName], c.returnError +} + +func (c *mockDNSClient) GetRecordsByZoneNameAndName(_ context.Context, zoneName, name string) ([]*anxcloudDns.Record, error) { + log.Debugf("GetRecordsByzoneNameAndName called with zoneName %s and name %s", zoneName, name) + result := make([]*anxcloudDns.Record, 0) + recordsOfZone := c.zoneRecords[zoneName] + for _, record := range recordsOfZone { + if record.Name == name { + result = append(result, record) + } + } + return result, c.returnError +} + +func (c *mockDNSClient) GetZones(_ context.Context) ([]*anxcloudDns.Zone, error) { + log.Debug("GetZones called ") + if c.allZones != nil { + for _, zone := range c.allZones { + log.Debugf("GetZones: zone '%s'", zone.Name) + } + } else { + log.Debug("GetZones: no zones") + } + return c.allZones, c.returnError +} + +func (c *mockDNSClient) GetZonesByDomainName(_ context.Context, domainName string) ([]*anxcloudDns.Zone, error) { + log.Debugf("GetZonesByDomainName called with domainName %s", domainName) + result := make([]*anxcloudDns.Zone, 0) + for _, zone := range c.allZones { + if strings.HasSuffix(domainName, zone.Name) { + result = append(result, zone) + } + } + return result, c.returnError +} + +func (c *mockDNSClient) CreateRecord(_ context.Context, zoneName string, record *anxcloudDns.Record) error { + log.Debugf("CreateRecord called with zoneName %s and record %v", zoneName, record) + if c.createdRecords == nil { + c.createdRecords = make(map[string][]*anxcloudDns.Record) + } + c.createdRecords[zoneName] = append(c.createdRecords[zoneName], record) + return c.returnError +} + +func (c *mockDNSClient) DeleteRecord(_ context.Context, zoneName string, recordID string) error { + log.Debugf("DeleteRecord called with zoneName %s and recordID %s", zoneName, recordID) + if c.deletedRecords == nil { + c.deletedRecords = make(map[string][]string) + } + c.deletedRecords[zoneName] = append(c.deletedRecords[zoneName], recordID) + return c.returnError +} + +func RandStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +func createRecordSlice(count int, modifier func(int) (string, string, string, int, string)) []*anxcloudDns.Record { + records := make([]*anxcloudDns.Record, count) + for i := 0; i < count; i++ { + name, zone, typ, ttl, target := modifier(i) + records[i] = &anxcloudDns.Record{ + Name: name, + Type: typ, + TTL: ttl, + ZoneName: zone, + RData: target, + Identifier: fmt.Sprintf("%d", i), + } + } + return records +} + +func createEndpointSlice(count int, modifier func(int) (string, string, endpoint.TTL, []string)) []*endpoint.Endpoint { + endpoints := make([]*endpoint.Endpoint, count) + for i := 0; i < count; i++ { + name, typ, ttl, targets := modifier(i) + endpoints[i] = &endpoint.Endpoint{ + DNSName: name, + RecordType: typ, + Targets: targets, + RecordTTL: ttl, + } + } + return endpoints +} + +func createZoneSlice(count int, modifier func(int) string) []*anxcloudDns.Zone { + zones := make([]*anxcloudDns.Zone, count) + for i := 0; i < count; i++ { + zoneName := modifier(i) + zones[i] = &anxcloudDns.Zone{ + Name: zoneName, + } + } + return zones +} diff --git a/pkg/webhook/mediatype.go b/pkg/webhook/mediatype.go new file mode 100644 index 0000000..c17a90f --- /dev/null +++ b/pkg/webhook/mediatype.go @@ -0,0 +1,41 @@ +package webhook + +import ( + "fmt" + "strings" +) + +const ( + mediaTypeFormat = "application/external.dns.webhook+json;" + supportedMediaVersions = "1" +) + +var mediaTypeVersion1 = mediaTypeVersion("1") + +type mediaType string + +func mediaTypeVersion(v string) mediaType { + return mediaType(mediaTypeFormat + "version=" + v) +} + +func (m mediaType) Is(headerValue string) bool { + return string(m) == headerValue +} + +func checkAndGetMediaTypeHeaderValue(value string) (string, error) { + for _, v := range strings.Split(supportedMediaVersions, ",") { + if mediaTypeVersion(v).Is(value) { + return v, nil + } + } + + supportedMediaTypesString := "" + for i, v := range strings.Split(supportedMediaVersions, ",") { + sep := "" + if i < len(supportedMediaVersions)-1 { + sep = ", " + } + supportedMediaTypesString += string(mediaTypeVersion(v)) + sep + } + return "", fmt.Errorf("Unsupported media type version: '%s'. Supported media types are: '%s'", value, supportedMediaTypesString) +} diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go new file mode 100644 index 0000000..4ecd2bc --- /dev/null +++ b/pkg/webhook/webhook.go @@ -0,0 +1,228 @@ +package webhook + +import ( + "encoding/json" + "fmt" + "net/http" + + log "github.com/sirupsen/logrus" + + "sigs.k8s.io/external-dns/endpoint" + "sigs.k8s.io/external-dns/plan" + "sigs.k8s.io/external-dns/provider" +) + +const ( + contentTypeHeader = "Content-Type" + contentTypePlaintext = "text/plain" + acceptHeader = "Accept" + varyHeader = "Vary" + healthPath = "/healthz" + logFieldRequestPath = "requestPath" + logFieldRequestMethod = "requestMethod" + logFieldError = "error" +) + +// Webhook for external dns provider +type Webhook struct { + provider provider.Provider +} + +// New creates a new instance of the Webhook +func New(provider provider.Provider) *Webhook { + p := Webhook{provider: provider} + return &p +} + +// Health handles the health request +func Health(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == healthPath { + w.WriteHeader(http.StatusOK) + return + } + next.ServeHTTP(w, r) + }) +} + +func (p *Webhook) contentTypeHeaderCheck(w http.ResponseWriter, r *http.Request) error { + return p.headerCheck(true, w, r) +} + +func (p *Webhook) acceptHeaderCheck(w http.ResponseWriter, r *http.Request) error { + return p.headerCheck(false, w, r) +} + +func (p *Webhook) headerCheck(isContentType bool, w http.ResponseWriter, r *http.Request) error { + var header string + if isContentType { + header = r.Header.Get(contentTypeHeader) + } else { + header = r.Header.Get(acceptHeader) + } + + if len(header) == 0 { + w.Header().Set(contentTypeHeader, contentTypePlaintext) + w.WriteHeader(http.StatusNotAcceptable) + + msg := "client must provide " + if isContentType { + msg += "a content type" + } else { + msg += "an accept header" + } + err := fmt.Errorf(msg) + + _, writeErr := fmt.Fprint(w, err.Error()) + if writeErr != nil { + requestLog(r).WithField(logFieldError, writeErr).Fatalf("error writing error message to response writer") + } + return err + } + + // as we support only one media type version, we can ignore the returned value + if _, err := checkAndGetMediaTypeHeaderValue(header); err != nil { + w.Header().Set(contentTypeHeader, contentTypePlaintext) + w.WriteHeader(http.StatusUnsupportedMediaType) + + msg := "Client must provide a valid versioned media type in the " + if isContentType { + msg += "content type" + } else { + msg += "accept header" + } + + err := fmt.Errorf(msg+": %s", err.Error()) + _, writeErr := fmt.Fprint(w, err.Error()) + if writeErr != nil { + requestLog(r).WithField(logFieldError, writeErr).Fatalf("error writing error message to response writer") + } + return err + } + + return nil +} + +// Records handles the get request for records +func (p *Webhook) Records(w http.ResponseWriter, r *http.Request) { + if err := p.acceptHeaderCheck(w, r); err != nil { + requestLog(r).WithField(logFieldError, err).Error("accept header check failed") + return + } + + requestLog(r).Debug("requesting records") + ctx := r.Context() + records, err := p.provider.Records(ctx) + if err != nil { + requestLog(r).WithField(logFieldError, err).Error("error getting records") + w.WriteHeader(http.StatusInternalServerError) + return + } + + requestLog(r).Debugf("returning records count: %d", len(records)) + w.Header().Set(contentTypeHeader, string(mediaTypeVersion1)) + w.Header().Set(varyHeader, contentTypeHeader) + err = json.NewEncoder(w).Encode(records) + if err != nil { + requestLog(r).WithField(logFieldError, err).Error("error encoding records") + w.WriteHeader(http.StatusInternalServerError) + return + } +} + +// ApplyChanges handles the post request for record changes +func (p *Webhook) ApplyChanges(w http.ResponseWriter, r *http.Request) { + if err := p.contentTypeHeaderCheck(w, r); err != nil { + requestLog(r).WithField(logFieldError, err).Error("content type header check failed") + return + } + + var changes plan.Changes + ctx := r.Context() + if err := json.NewDecoder(r.Body).Decode(&changes); err != nil { + w.Header().Set(contentTypeHeader, contentTypePlaintext) + w.WriteHeader(http.StatusBadRequest) + + errMsg := fmt.Sprintf("error decoding changes: %s", err.Error()) + if _, writeError := fmt.Fprint(w, errMsg); writeError != nil { + requestLog(r).WithField(logFieldError, writeError).Fatalf("error writing error message to response writer") + } + requestLog(r).WithField(logFieldError, err).Info(errMsg) + return + } + + requestLog(r).Debugf("requesting apply changes, create: %d , updateOld: %d, updateNew: %d, delete: %d", + len(changes.Create), len(changes.UpdateOld), len(changes.UpdateNew), len(changes.Delete)) + if err := p.provider.ApplyChanges(ctx, &changes); err != nil { + w.Header().Set(contentTypeHeader, contentTypePlaintext) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) +} + +// AdjustEndpoints handles the post request for adjusting endpoints +func (p *Webhook) AdjustEndpoints(w http.ResponseWriter, r *http.Request) { + if err := p.contentTypeHeaderCheck(w, r); err != nil { + log.Errorf("content type header check failed, request method: %s, request path: %s", r.Method, r.URL.Path) + return + } + if err := p.acceptHeaderCheck(w, r); err != nil { + log.Errorf("accept header check failed, request method: %s, request path: %s", r.Method, r.URL.Path) + return + } + + var pve []*endpoint.Endpoint + if err := json.NewDecoder(r.Body).Decode(&pve); err != nil { + w.Header().Set(contentTypeHeader, contentTypePlaintext) + w.WriteHeader(http.StatusBadRequest) + + errMessage := fmt.Sprintf("failed to decode request body: %v", err) + log.Infof(errMessage+" , request method: %s, request path: %s", r.Method, r.URL.Path) + if _, writeError := fmt.Fprint(w, errMessage); writeError != nil { + requestLog(r).WithField(logFieldError, writeError).Fatalf("error writing error message to response writer") + } + return + } + + log.Debugf("requesting adjust endpoints count: %d", len(pve)) + pve, err := p.provider.AdjustEndpoints(pve) + if err != nil { + w.Header().Set(contentTypeHeader, contentTypePlaintext) + w.WriteHeader(http.StatusInternalServerError) + return + } + out, _ := json.Marshal(&pve) + + log.Debugf("return adjust endpoints response, resultEndpointCount: %d", len(pve)) + w.Header().Set(contentTypeHeader, string(mediaTypeVersion1)) + w.Header().Set(varyHeader, contentTypeHeader) + if _, writeError := fmt.Fprint(w, string(out)); writeError != nil { + requestLog(r).WithField(logFieldError, writeError).Fatalf("error writing response") + } +} + +func (p *Webhook) Negotiate(w http.ResponseWriter, r *http.Request) { + if err := p.acceptHeaderCheck(w, r); err != nil { + requestLog(r).WithField(logFieldError, err).Error("accept header check failed") + return + } + + b, err := p.provider.GetDomainFilter().MarshalJSON() + if err != nil { + log.Errorf("failed to marshal domain filter, request method: %s, request path: %s", r.Method, r.URL.Path) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set(contentTypeHeader, string(mediaTypeVersion1)) + if _, writeError := w.Write(b); writeError != nil { + requestLog(r).WithField(logFieldError, writeError).Error("error writing response") + w.WriteHeader(http.StatusInternalServerError) + return + } +} + +func requestLog(r *http.Request) *log.Entry { + return log.WithFields(log.Fields{logFieldRequestMethod: r.Method, logFieldRequestPath: r.URL.Path}) +}