diff --git a/.editorconfig b/.editorconfig index 007f685b..e7c8df00 100644 --- a/.editorconfig +++ b/.editorconfig @@ -33,3 +33,10 @@ profile = black [*.sh] indent_size = 4 +indent_style = tab + +[Makefile] +indent_style = tab + +[*entrypoint*] +indent_style = tab diff --git a/.github/linters/.golangci.yml b/.github/linters/.golangci.yml index 32c844b7..b7f3374f 100644 --- a/.github/linters/.golangci.yml +++ b/.github/linters/.golangci.yml @@ -1,2 +1,3 @@ +version: "2" run: timeout: 10m diff --git a/.github/super-linter.env b/.github/super-linter.env index 427ac01b..222e1796 100644 --- a/.github/super-linter.env +++ b/.github/super-linter.env @@ -8,6 +8,7 @@ VALIDATE_JAVASCRIPT_STANDARD=false VALIDATE_JSCPD=false VALIDATE_JSON_PRETTIER=false VALIDATE_KUBERNETES_KUBECONFORM=false +VALIDATE_MARKDOWN=false VALIDATE_MARKDOWN_PRETTIER=false VALIDATE_YAML=false VALIDATE_YAML_PRETTIER=false @@ -19,4 +20,7 @@ VALIDATE_PYTHON_MYPY=false VALIDATE_PYTHON_PYINK=false VALIDATE_PYTHON_PYLINT=false +VALIDATE_DOCKERFILE_TRIVY=false +VALIDATE_TRIVY=false + VALIDATE_GO=false diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 024d5383..0cd1b9c4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -23,6 +23,10 @@ env: jobs: multiplatform_build: + permissions: + contents: read + packages: write + actions: read strategy: fail-fast: false matrix: @@ -39,35 +43,40 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Validate + env: + EVENT_NAME: ${{ github.event_name }} + GITHUB_REF: ${{ github.ref }} run: | - if [[ "${{ github.event_name }}" == "workflow_dispatch" && "${{ github.ref }}" == refs/tags* ]]; then + if [[ "$EVENT_NAME" == "workflow_dispatch" && "$GITHUB_REF" == refs/tags/* ]]; then echo -e "\033[91mManual workflow run on tags is not allowed!\033[0m" exit 1 fi - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + persist-credentials: false - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${GITHUB_ACTOR} password: ${{secrets.GITHUB_TOKEN}} - name: Prepare Tag run: echo "TAG_NAME=$(echo ${TAG_NAME} | sed 's@refs/tags/@@;s@refs/heads/@@;s@/@_@g')" >> $GITHUB_ENV - - name: Get package IDs for delete + - name: Get package IDs for delete id: get-ids-for-delete - uses: Netcracker/get-package-ids@v0.0.1 + uses: Netcracker/get-package-ids@84bc8eb8bed50218be76e671b3a24c35a1300979 with: component-name: ${{ matrix.component.name }} component-tag: ${{ env.TAG_NAME }} - access-token: ${{secrets.GITHUB_TOKEN}} + access-token: ${{secrets.GITHUB_TOKEN}} if: ${{ env.PUSH }} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@16ebe778df0e7752d2cfcbd924afdbbd89c1a755 # v6.6.1 with: no-cache: true context: ${{ matrix.component.context }} @@ -76,8 +85,8 @@ jobs: push: ${{ env.PUSH }} tags: ghcr.io/netcracker/${{ matrix.component.name }}:${{ env.TAG_NAME }} provenance: false - - uses: actions/delete-package-versions@v5 - with: + - uses: actions/delete-package-versions@e5bc658cc4c965c472efe991f8beea3981499c55 # v5.0.0 + with: package-name: ${{ matrix.component.name }} package-type: 'container' package-version-ids: ${{ steps.get-ids-for-delete.outputs.ids-for-delete }} diff --git a/.github/workflows/clean.yaml b/.github/workflows/clean.yaml index 0db92fa1..093df3cd 100644 --- a/.github/workflows/clean.yaml +++ b/.github/workflows/clean.yaml @@ -8,6 +8,8 @@ env: jobs: delete: if: github.event.ref_type == 'branch' + permissions: + packages: write runs-on: ubuntu-24.04 strategy: fail-fast: false @@ -23,20 +25,20 @@ jobs: - name: Prepare Tag run: echo "TAG_NAME=$(echo ${TAG_NAME} | sed 's@refs/heads/@@;s@/@_@g')" >> $GITHUB_ENV - name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${GITHUB_ACTOR} password: ${{secrets.GITHUB_TOKEN}} - - name: Get package IDs for delete + - name: Get package IDs for delete id: get-ids-for-delete - uses: Netcracker/get-package-ids@v0.0.1 + uses: Netcracker/get-package-ids@84bc8eb8bed50218be76e671b3a24c35a1300979 with: component-name: ${{ matrix.component.name }} component-tag: ${{ env.TAG_NAME }} access-token: ${{secrets.GITHUB_TOKEN}} - - uses: actions/delete-package-versions@v5 - with: + - uses: actions/delete-package-versions@e5bc658cc4c965c472aba07bbc604b8e78b0e8e5 # v5.0.0 + with: package-name: ${{ matrix.component.name }} package-type: 'container' package-version-ids: ${{ steps.get-ids-for-delete.outputs.ids-for-delete }} diff --git a/.github/workflows/license.yaml b/.github/workflows/license.yaml index 82c392c3..55509f10 100644 --- a/.github/workflows/license.yaml +++ b/.github/workflows/license.yaml @@ -8,15 +8,24 @@ env: COPYRIGHT_YEAR: '2024-2025' jobs: license: + permissions: + contents: write + pull-requests: write runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 - - run: docker run -v "${PWD}:/src" -i ghcr.io/google/addlicense -v -c "${{ env.COPYRIGHT_COMPANY }}" -y "${{ env.COPYRIGHT_YEAR }}" $(find . -type f -name "*.go" -o -type f -name "*.sh" -o -type f -name "*.py" | xargs echo) + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + persist-credentials: false + token: ${{ secrets.GITHUB_TOKEN }} + - run: | + find . -type f \( -name "*.go" -o -name "*.sh" -o -name "*.py" \) -print0 | \ + xargs -0 docker run -v "${PWD}:/src" -i ghcr.io/google/addlicense \ + -v -c "${{ env.COPYRIGHT_COMPANY }}" -y "${{ env.COPYRIGHT_YEAR }}" - name: Create Pull Request - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5 with: commit-message: Auto-update license header branch: license-update title: Add License Header body: Automated license header update - delete-branch: true \ No newline at end of file + delete-branch: true diff --git a/.github/workflows/super-linter.yaml b/.github/workflows/super-linter.yaml index da308b2c..b8b74539 100644 --- a/.github/workflows/super-linter.yaml +++ b/.github/workflows/super-linter.yaml @@ -37,6 +37,7 @@ jobs: with: # Full git history is needed to get a proper list of changed files within `super-linter` fetch-depth: 0 + persist-credentials: false - name: "Load super-linter environment file" run: | @@ -47,7 +48,7 @@ jobs: fi - name: Lint Code Base - uses: super-linter/super-linter@v7 + uses: super-linter/super-linter@ffde3b2b33b745cb612d787f669ef9442b1339a6 env: VALIDATE_ALL_CODEBASE: ${{ inputs.full_scan || false }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index b6940fed..ac18b82f 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ build/_output extras/target/* deployments target/* -charts/**/*.gz \ No newline at end of file +charts/**/*.gz diff --git a/.sqlfluff b/.sqlfluff new file mode 100644 index 00000000..c84cfeb5 --- /dev/null +++ b/.sqlfluff @@ -0,0 +1,3 @@ +[sqlfluff] +# Specify the SQL dialect to use for linting +dialect = postgres diff --git a/Makefile b/Makefile index ea073faf..c3f3e5e6 100755 --- a/Makefile +++ b/Makefile @@ -50,7 +50,7 @@ vet: compile: CGO_ENABLED=0 go build -o ./build/_output/bin/postgres-operator \ - -gcflags all=-trimpath=${GOPATH} -asmflags all=-trimpath=${GOPATH} ./cmd/pgskipper-operator + -gcflags all=-trimpath=${GOPATH} -asmflags all=-trimpath=${GOPATH} ./cmd/pgskipper-operator docker-build: $(foreach docker_tag,$(DOCKER_NAMES),docker build --file="${DOCKER_FILE}" --pull -t $(docker_tag) ./;) @@ -67,18 +67,18 @@ clean: # Generate code generate: controller-gen $(CONTROLLER_GEN) crd:crdVersions={v1} \ - object:headerFile="generator/boilerplate.go.txt" \ - paths="./api/common/v1" + object:headerFile="generator/boilerplate.go.txt" \ + paths="./api/common/v1" $(CONTROLLER_GEN) crd:crdVersions={v1} \ - object:headerFile="generator/boilerplate.go.txt" \ - paths="./api/apps/v1" \ - output:crd:artifacts:config=charts/patroni-services/crds/ + object:headerFile="generator/boilerplate.go.txt" \ + paths="./api/apps/v1" \ + output:crd:artifacts:config=charts/patroni-services/crds/ $(CONTROLLER_GEN) crd:crdVersions={v1} \ - object:headerFile="generator/boilerplate.go.txt" \ - paths="./api/patroni/v1" \ - output:crd:artifacts:config=charts/patroni-core/crds/ + object:headerFile="generator/boilerplate.go.txt" \ + paths="./api/patroni/v1" \ + output:crd:artifacts:config=charts/patroni-core/crds/ # Find or download controller-gen # download controller-gen if necessary controller-gen: diff --git a/README.md b/README.md index d8edfec5..f23b9a3c 100644 --- a/README.md +++ b/README.md @@ -28,4 +28,4 @@ There are no well-defined rules for troubleshooting, as each task is unique, but ## Useful links * [Installation Guide](/docs/public/installation.md) -* [Features](/docs/public/features) \ No newline at end of file +* [Features](/docs/public/features) diff --git a/build/bin/entrypoint b/build/bin/entrypoint index 94b60477..b8dec799 100755 --- a/build/bin/entrypoint +++ b/build/bin/entrypoint @@ -3,10 +3,10 @@ # This is documented here: # https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines -if ! whoami &>/dev/null; then - if [ -w /etc/passwd ]; then - echo "${USER_NAME:-postgres-operator}:x:$(id -u):$(id -g):${USER_NAME:-postgres-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd - fi +if ! whoami >/dev/null 2>&1; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME:-postgres-operator}:x:$(id -u):$(id -g):${USER_NAME:-postgres-operator} user:${HOME}:/sbin/nologin" >>/etc/passwd + fi fi -exec ${OPERATOR} $@ +exec ${OPERATOR} "$@" diff --git a/build/configs/patroni.config.yaml b/build/configs/patroni.config.yaml index b20f7386..9928b9e6 100644 --- a/build/configs/patroni.config.yaml +++ b/build/configs/patroni.config.yaml @@ -80,4 +80,4 @@ tags: clonefrom: false nofailover: ${DR_MODE} noloadbalance: false - nosync: false \ No newline at end of file + nosync: false diff --git a/build/configs/postgres-backup-daemon.collector-config b/build/configs/postgres-backup-daemon.collector-config index f5fda104..0b31c354 100644 --- a/build/configs/postgres-backup-daemon.collector-config +++ b/build/configs/postgres-backup-daemon.collector-config @@ -12,4 +12,4 @@ "output_selector": "health" } } -] \ No newline at end of file +] diff --git a/build/configs/postgres-granular-backup-daemon.collector-config b/build/configs/postgres-granular-backup-daemon.collector-config index ef087539..15c3c878 100644 --- a/build/configs/postgres-granular-backup-daemon.collector-config +++ b/build/configs/postgres-granular-backup-daemon.collector-config @@ -12,4 +12,4 @@ "output_selector": "health" } } -] \ No newline at end of file +] diff --git a/build/configs/postgresql/postgresql_prepare.sql b/build/configs/postgresql/postgresql_prepare.sql index 79104381..9fdcca33 100644 --- a/build/configs/postgresql/postgresql_prepare.sql +++ b/build/configs/postgresql/postgresql_prepare.sql @@ -1,120 +1,220 @@ /* -Function pg_patroni_service_slot_cleaner removes stuck replication slots which can damage PostgresSQL cluster. -Replication slot will be removed if they are stuck more than wal_keep_segments WAL files. +Function pg_patroni_service_slot_cleaner removes stuck replication slots +which can damage PostgresSQL cluster. +Replication slot will be removed if they are stuck more than +wal_keep_segments WAL files. Parameters: -* ud_allowed_slot_delay - use this value instead of value of wal_keep_segments if value >=0. 0 means no delay and all slots will be removed. +* ud_allowed_slot_delay - use this value instead of value of + wal_keep_segments if value >=0. 0 means no delay and all slots + will be removed. Function uses dblink to connect to replicas and remove replication slots. -For more information about dblink see https://www.postgresql.org/docs/9.6/static/dblink.html. +For more information about dblink see +https://www.postgresql.org/docs/9.6/static/dblink.html. */ CREATE EXTENSION IF NOT EXISTS dblink; -DROP FUNCTION IF EXISTS pg_patroni_service_slot_cleaner_for_host(host text, allowed_slot_delay integer, use_old_cmp_function boolean); -CREATE FUNCTION pg_patroni_service_slot_cleaner_for_host(host text, allowed_slot_delay integer, use_old_cmp_function boolean) - RETURNS void AS $$ +DROP FUNCTION IF EXISTS pg_patroni_service_slot_cleaner_for_host( + host text, + allowed_slot_delay integer, + use_old_cmp_function boolean +); +CREATE FUNCTION pg_patroni_service_slot_cleaner_for_host( + host text, + allowed_slot_delay integer, + use_old_cmp_function boolean +) +RETURNS void AS $$ DECLARE - slots text[]; - get_slots_query text DEFAULT 'SELECT slot_name, restart_lsn, active, active_pid FROM pg_replication_slots'; - sql_query text; - pg_password text; - conn_string text; - fe_slot_name RECORD; - remove_result text; - active_pid_result integer; - slot_delay float; + slots text[]; + get_slots_query text DEFAULT 'SELECT slot_name, restart_lsn, active, active_pid FROM pg_replication_slots'; + sql_query text; + pg_password text; + conn_string text; + fe_slot_name RECORD; + remove_result text; + active_pid_result integer; + slot_delay float; BEGIN - SELECT INTO pg_password command_output FROM pg_patroni_service_slot_cleaner_passwd_output LIMIT 1; + SELECT INTO pg_password command_output + FROM pg_patroni_service_slot_cleaner_passwd_output + LIMIT 1; - -- perform slot cleanup for each active replica - RAISE NOTICE 'Start check for host %', host; - conn_string := format('hostaddr=%s port=5432 user=postgres password=' || pg_password, host); + -- perform slot cleanup for each active replica + RAISE NOTICE 'Start check for host %', host; + conn_string := format( + 'hostaddr=%s port=5432 user=postgres password=' || pg_password, + host + ); - -- Iterate over slots and remove stuck slots - FOR fe_slot_name IN SELECT * FROM dblink(conn_string, get_slots_query) as remote_repl_slots(slot_name text, restart_lsn pg_lsn, active boolean, active_pid integer) LOOP - RAISE NOTICE 'Found slot %', fe_slot_name; - IF use_old_cmp_function THEN - SELECT INTO slot_delay pg_xlog_location_diff(pg_current_xlog_location(), fe_slot_name.restart_lsn)::bigint ; - ELSE - SELECT INTO slot_delay pg_wal_lsn_diff(pg_current_wal_lsn(), fe_slot_name.restart_lsn)::bigint ; - END IF; - RAISE NOTICE 'Slot delay % bytes', slot_delay; - slot_delay := 1.0 * slot_delay / 1024 / 1024 ; - RAISE NOTICE 'Slot delay % Mb, slot active: %', slot_delay, fe_slot_name.active; - IF slot_delay > allowed_slot_delay THEN - IF fe_slot_name.active THEN - RAISE NOTICE 'Will terminate backend with pid % which keeps slot', fe_slot_name.active_pid; - sql_query := 'select pg_terminate_backend(' || fe_slot_name.active_pid || ')'; - SELECT into remove_result rem_res FROM dblink(conn_string, sql_query) as remote_rem_res(rem_res text); - RAISE NOTICE 'Result %', remove_result; - FOR i IN 1..30 LOOP - sql_query := 'select active, active_pid from pg_replication_slots where slot_name='|| QUOTE_LITERAL(fe_slot_name.slot_name); - SELECT rem_active, rem_active_pid into remove_result, active_pid_result FROM dblink(conn_string, sql_query) as remote_rem_res(rem_active text, rem_active_pid integer); - EXIT WHEN remove_result = 'f'; - RAISE NOTICE 'Wait slot {active: %, pid: % }. Will try to repeat termination.', remove_result, active_pid_result; - sql_query := 'select pg_terminate_backend(' || active_pid_result || ')'; - SELECT into remove_result rem_res FROM dblink(conn_string, sql_query) as remote_rem_res(rem_res text); - PERFORM pg_sleep(2); - END LOOP; - RAISE NOTICE 'Slot active: %', remove_result; - IF NOT remove_result = 'f' THEN - RAISE NOTICE 'Skip removal because slot still active'; - CONTINUE ; + -- Iterate over slots and remove stuck slots + FOR fe_slot_name IN + SELECT * + FROM dblink(conn_string, get_slots_query) AS remote_repl_slots( + slot_name text, + restart_lsn pg_lsn, + active boolean, + active_pid integer + ) + LOOP + RAISE NOTICE 'Found slot %', fe_slot_name; + IF use_old_cmp_function THEN + SELECT INTO slot_delay + pg_xlog_location_diff( + pg_current_xlog_location(), + fe_slot_name.restart_lsn + )::bigint; + ELSE + SELECT INTO slot_delay + pg_wal_lsn_diff( + pg_current_wal_lsn(), + fe_slot_name.restart_lsn + )::bigint; + END IF; + RAISE NOTICE 'Slot delay % bytes', slot_delay; + slot_delay := 1.0 * slot_delay / 1024 / 1024; + RAISE NOTICE 'Slot delay % Mb, slot active: %', + slot_delay, fe_slot_name.active; + IF slot_delay > allowed_slot_delay THEN + IF fe_slot_name.active THEN + RAISE NOTICE 'Will terminate backend with pid % which keeps slot', + fe_slot_name.active_pid; + sql_query := 'SELECT pg_terminate_backend(' || + fe_slot_name.active_pid || ')'; + SELECT INTO remove_result rem_res + FROM dblink(conn_string, sql_query) AS remote_rem_res(rem_res text); + RAISE NOTICE 'Result %', remove_result; + FOR i IN 1..30 LOOP + sql_query := 'SELECT active, active_pid FROM pg_replication_slots WHERE slot_name=' || + QUOTE_LITERAL(fe_slot_name.slot_name); + SELECT + rem_active, + rem_active_pid + INTO remove_result, active_pid_result + FROM dblink(conn_string, sql_query) AS remote_rem_res( + rem_active text, + rem_active_pid integer + ); + EXIT WHEN remove_result = 'f'; + RAISE NOTICE 'Wait slot {active: %, pid: % }. Will try to repeat termination.', + remove_result, active_pid_result; + sql_query := 'SELECT pg_terminate_backend(' || + active_pid_result || ')'; + SELECT INTO remove_result rem_res + FROM dblink(conn_string, sql_query) AS remote_rem_res(rem_res text); + PERFORM pg_sleep(2); + END LOOP; + RAISE NOTICE 'Slot active: %', remove_result; + IF NOT remove_result = 'f' THEN + RAISE NOTICE 'Skip removal because slot still active'; + CONTINUE; + END IF; + END IF; + RAISE NOTICE 'Will remove slot % because delay % more than allowed value %.', + fe_slot_name.slot_name, slot_delay, allowed_slot_delay; + sql_query := 'SELECT pg_drop_replication_slot(' || + QUOTE_LITERAL(fe_slot_name.slot_name) || ')'; + SELECT INTO remove_result rem_res + FROM dblink(conn_string, sql_query) AS remote_rem_res(rem_res text); + RAISE NOTICE 'Remove result %', remove_result; END IF; - END IF; - RAISE NOTICE 'Will remove slot % because delay % more than allowed value % .', fe_slot_name.slot_name, slot_delay, allowed_slot_delay; - sql_query := 'select pg_drop_replication_slot(' || QUOTE_LITERAL(fe_slot_name.slot_name) || ')'; - SELECT into remove_result rem_res FROM dblink(conn_string, sql_query) as remote_rem_res(rem_res text); - RAISE NOTICE 'Remove result %', remove_result; - END IF; END LOOP; --check if slots are empty - sql_query := 'SELECT slot_name FROM pg_replication_slots'; - slots := ARRAY(SELECT * FROM dblink(conn_string, sql_query) as remote_repl_slots(slot_name text)); - RAISE NOTICE 'Slots on host % after cleanup %', host, slots; + sql_query := 'SELECT slot_name FROM pg_replication_slots'; + slots := ARRAY( + SELECT slot_name + FROM dblink(conn_string, sql_query) AS remote_repl_slots(slot_name text) + ); + RAISE NOTICE 'Slots on host % after cleanup %', host, slots; END; $$ LANGUAGE plpgsql; DROP FUNCTION IF EXISTS pg_patroni_service_slot_cleaner(); -DROP FUNCTION IF EXISTS pg_patroni_service_slot_cleaner(ud_allowed_slot_delay integer); -CREATE FUNCTION pg_patroni_service_slot_cleaner(ud_allowed_slot_delay integer default -1) +DROP FUNCTION IF EXISTS pg_patroni_service_slot_cleaner( + ud_allowed_slot_delay integer +); +CREATE FUNCTION pg_patroni_service_slot_cleaner( + ud_allowed_slot_delay integer DEFAULT -1 +) RETURNS void AS $$ DECLARE - replica RECORD; - allowed_slot_delay integer; - use_old_cmp_function boolean; + replica RECORD; + allowed_slot_delay integer; + use_old_cmp_function boolean; BEGIN - -- get password from env. shell scripts cannot be executed directly but can be executed inside COPY block - CREATE TEMPORARY TABLE pg_patroni_service_slot_cleaner_passwd_output (tt_id serial PRIMARY KEY NOT NULL, command_output text ); - COPY pg_patroni_service_slot_cleaner_passwd_output (command_output) FROM PROGRAM 'strings /proc/1/environ | sed -n "s/^PG_ROOT_PASSWORD=\(.*\)/\1/p"'; + -- get password from env. shell scripts cannot be executed directly + -- but can be executed inside COPY block + CREATE TEMPORARY TABLE pg_patroni_service_slot_cleaner_passwd_output ( + tt_id serial PRIMARY KEY NOT NULL, + command_output text + ); + COPY pg_patroni_service_slot_cleaner_passwd_output (command_output) + FROM PROGRAM 'strings /proc/1/environ | sed -n "s/^PG_ROOT_PASSWORD=\(.*\)/\1/p"'; - -- get current wal_keep_size value and determine allowed_slot_delay - IF ud_allowed_slot_delay < 0 THEN - SELECT setting::integer INTO allowed_slot_delay FROM pg_settings where name='wal_keep_size'; - ELSE - allowed_slot_delay = ud_allowed_slot_delay; - END IF; + -- get current wal_keep_size value and determine allowed_slot_delay + IF ud_allowed_slot_delay < 0 THEN + SELECT INTO allowed_slot_delay setting + FROM pg_settings + WHERE name='wal_keep_size'; + ELSE + allowed_slot_delay = ud_allowed_slot_delay; + END IF; - -- check if we have pg_xlog_location_diff or not (postgresql 9.6 vs postgresql 10) - select into use_old_cmp_function exists(select * from pg_proc where proname = 'pg_xlog_location_diff'); + -- check if we have pg_xlog_location_diff or not + -- (postgresql 9.6 vs postgresql 10) + SELECT INTO use_old_cmp_function + EXISTS( + SELECT * + FROM pg_proc + WHERE proname = 'pg_xlog_location_diff' + ); - RAISE NOTICE 'allowed_slot_delay: % Mb', allowed_slot_delay; + --todo[anin] 16Mb size per WAL file is used. + --Honest calculation should get value from pg_settings + RAISE NOTICE 'allowed_slot_delay: % Mb', allowed_slot_delay; - -- perform slot cleanup for each active replica - FOR replica IN SELECT * FROM pg_stat_replication where application_name like 'pg-%-node%' LOOP - RAISE NOTICE 'Replica: % with addr %', replica.application_name, replica.client_addr; - PERFORM pg_patroni_service_slot_cleaner_for_host(host(replica.client_addr), allowed_slot_delay, use_old_cmp_function); - END LOOP; + -- perform slot cleanup for each active replica + FOR replica IN + SELECT * + FROM pg_stat_replication + WHERE application_name LIKE 'pg-%-node%' + LOOP + RAISE NOTICE 'Replica: % with addr %', + replica.application_name, replica.client_addr; + PERFORM pg_patroni_service_slot_cleaner_for_host( + host(replica.client_addr), + allowed_slot_delay, + use_old_cmp_function + ); + END LOOP; - --check slots on master - RAISE NOTICE 'Start master check'; - PERFORM pg_patroni_service_slot_cleaner_for_host('127.0.0.1', allowed_slot_delay, use_old_cmp_function); + --check slots on master + RAISE NOTICE 'Start master check'; + PERFORM pg_patroni_service_slot_cleaner_for_host( + '127.0.0.1', + allowed_slot_delay, + use_old_cmp_function + ); END; $$ LANGUAGE plpgsql; /** Schedule pg_patroni_service_slot_cleaner() for execution each 10 min */ CREATE EXTENSION IF NOT EXISTS pg_cron; -SELECT jobid, schedule, command, cron.unschedule(jobid) FROM cron.job WHERE command like '%pg_patroni_service_slot_cleaner%'; -SELECT cron.schedule('*/10 * * * *', 'select pg_patroni_service_slot_cleaner()'); -SELECT * FROM cron.job; \ No newline at end of file +SELECT + jobid, + schedule, + command, + cron.unschedule(jobid) AS unschedule_result +FROM cron.job +WHERE command LIKE '%pg_patroni_service_slot_cleaner%'; +SELECT cron.schedule( + '*/10 * * * *', + 'SELECT pg_patroni_service_slot_cleaner()' +); +SELECT + jobid, + schedule, + command +FROM cron.job; diff --git a/build/configs/telegraf-configmap b/build/configs/telegraf-configmap index 1068b007..7f53db3b 100644 --- a/build/configs/telegraf-configmap +++ b/build/configs/telegraf-configmap @@ -85,4 +85,4 @@ # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] ## Export metric collection time. - # export_timestamp = false \ No newline at end of file + # export_timestamp = false diff --git a/charts/patroni-core/patroni-core-quickstart-sample.yaml b/charts/patroni-core/patroni-core-quickstart-sample.yaml index 56a011a1..e5793aaa 100644 --- a/charts/patroni-core/patroni-core-quickstart-sample.yaml +++ b/charts/patroni-core/patroni-core-quickstart-sample.yaml @@ -54,4 +54,4 @@ tests: - check_scale_down_replica - check_scale_down_master - check_delete_master - - check_manual_switchover \ No newline at end of file + - check_manual_switchover diff --git a/charts/patroni-core/templates/hooks/creds-hook.yaml b/charts/patroni-core/templates/hooks/creds-hook.yaml index 07ab42ea..7673835a 100644 --- a/charts/patroni-core/templates/hooks/creds-hook.yaml +++ b/charts/patroni-core/templates/hooks/creds-hook.yaml @@ -39,11 +39,11 @@ spec: - name: APPLICATION_NAME valueFrom: fieldRef: - fieldPath: metadata.labels['app.kubernetes.io/part-of'] + fieldPath: metadata.labels['app.kubernetes.io/part-of'] - name: SESSION_ID valueFrom: fieldRef: - fieldPath: metadata.labels['deployment.netcracker.com/sessionId'] + fieldPath: metadata.labels['deployment.netcracker.com/sessionId'] resources: {{ toYaml .Values.operatorInit.resources | nindent 10 }} securityContext: @@ -53,6 +53,6 @@ spec: {{- if .Values.privateRegistry.enabled }} imagePullSecrets: {{- range $i, $v := .Values.privateRegistry.secrets }} - - name: {{ $v.name }} - {{- end }} - {{- end }} \ No newline at end of file + - name: {{ $v.name }} + {{- end }} + {{- end }} diff --git a/charts/patroni-core/templates/hooks/role_binding.yaml b/charts/patroni-core/templates/hooks/role_binding.yaml index 210e3d78..0ffdbe45 100644 --- a/charts/patroni-core/templates/hooks/role_binding.yaml +++ b/charts/patroni-core/templates/hooks/role_binding.yaml @@ -17,4 +17,4 @@ roleRef: kind: Role name: hook-role apiGroup: rbac.authorization.k8s.io -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-core/templates/hooks/serviceaccount.yaml b/charts/patroni-core/templates/hooks/serviceaccount.yaml index 114416cd..a865e057 100644 --- a/charts/patroni-core/templates/hooks/serviceaccount.yaml +++ b/charts/patroni-core/templates/hooks/serviceaccount.yaml @@ -10,4 +10,4 @@ metadata: labels: name: patroni-core {{ include "kubernetes.labels" . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-core/templates/patroni-pdb.yaml b/charts/patroni-core/templates/patroni-pdb.yaml index d6499349..9a33548e 100644 --- a/charts/patroni-core/templates/patroni-pdb.yaml +++ b/charts/patroni-core/templates/patroni-pdb.yaml @@ -8,4 +8,4 @@ spec: selector: matchLabels: app: patroni -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-core/templates/role_binding.yaml b/charts/patroni-core/templates/role_binding.yaml index ce82cc53..6cc2f53f 100644 --- a/charts/patroni-core/templates/role_binding.yaml +++ b/charts/patroni-core/templates/role_binding.yaml @@ -13,4 +13,4 @@ roleRef: kind: Role name: patroni-core-operator apiGroup: rbac.authorization.k8s.io -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-core/templates/secrets/postgres-secret.yaml b/charts/patroni-core/templates/secrets/postgres-secret.yaml index 4deb9629..029744f3 100644 --- a/charts/patroni-core/templates/secrets/postgres-secret.yaml +++ b/charts/patroni-core/templates/secrets/postgres-secret.yaml @@ -12,4 +12,4 @@ data: password: {{ ( include "postgres.adminPassword" . ) | b64enc }} username: {{ ( include "postgres.adminUser" . ) | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-core/templates/secrets/powa-secret.yaml b/charts/patroni-core/templates/secrets/powa-secret.yaml index 2093e068..6742dcb3 100644 --- a/charts/patroni-core/templates/secrets/powa-secret.yaml +++ b/charts/patroni-core/templates/secrets/powa-secret.yaml @@ -10,4 +10,4 @@ metadata: data: password: {{ default "Pow@pASsWORD" .Values.patroni.powa.password | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-core/templates/secrets/replicator-secret.yaml b/charts/patroni-core/templates/secrets/replicator-secret.yaml index 12b3f6c6..a9eb3e06 100644 --- a/charts/patroni-core/templates/secrets/replicator-secret.yaml +++ b/charts/patroni-core/templates/secrets/replicator-secret.yaml @@ -9,4 +9,4 @@ metadata: data: password: {{ .Values.replicatorPassword | b64enc }} username: {{ "replicator" | b64enc }} -type: Opaque \ No newline at end of file +type: Opaque diff --git a/charts/patroni-core/templates/serviceaccount.yaml b/charts/patroni-core/templates/serviceaccount.yaml index a1dafee0..fab8dca9 100644 --- a/charts/patroni-core/templates/serviceaccount.yaml +++ b/charts/patroni-core/templates/serviceaccount.yaml @@ -6,4 +6,4 @@ metadata: labels: name: patroni-core {{ include "kubernetes.labels" . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-core/templates/tests/tests-config.yaml b/charts/patroni-core/templates/tests/tests-config.yaml index 8abba1be..20d08f8c 100644 --- a/charts/patroni-core/templates/tests/tests-config.yaml +++ b/charts/patroni-core/templates/tests/tests-config.yaml @@ -11,4 +11,4 @@ data: {{ else }} dd_images: "" {{ end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-core/templates/tls/postgres-tls-certificate.yaml b/charts/patroni-core/templates/tls/postgres-tls-certificate.yaml index 4e35cc45..e195bfa5 100644 --- a/charts/patroni-core/templates/tls/postgres-tls-certificate.yaml +++ b/charts/patroni-core/templates/tls/postgres-tls-certificate.yaml @@ -32,4 +32,4 @@ spec: group: cert-manager.io {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-core/templates/tls/postgres-tls-issuer.yaml b/charts/patroni-core/templates/tls/postgres-tls-issuer.yaml index d7d1caa5..d8dc2555 100644 --- a/charts/patroni-core/templates/tls/postgres-tls-issuer.yaml +++ b/charts/patroni-core/templates/tls/postgres-tls-issuer.yaml @@ -12,4 +12,4 @@ spec: selfSigned: {} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/monitoring/aws-grafana-dashboard.json b/charts/patroni-services/monitoring/aws-grafana-dashboard.json index f3f679b9..64f4ef23 100644 --- a/charts/patroni-services/monitoring/aws-grafana-dashboard.json +++ b/charts/patroni-services/monitoring/aws-grafana-dashboard.json @@ -2043,4 +2043,4 @@ "title": "Amazon Aurora Metrics", "version": 2, "weekStart": "" -} \ No newline at end of file +} diff --git a/charts/patroni-services/monitoring/azure-grafana-dashboard.json b/charts/patroni-services/monitoring/azure-grafana-dashboard.json index 562dd022..5c12d642 100644 --- a/charts/patroni-services/monitoring/azure-grafana-dashboard.json +++ b/charts/patroni-services/monitoring/azure-grafana-dashboard.json @@ -1231,4 +1231,4 @@ "title": "Postgres Azure Metrics", "version": 3, "weekStart": "" -} \ No newline at end of file +} diff --git a/charts/patroni-services/monitoring/cloudsql-grafana-dashboard.json b/charts/patroni-services/monitoring/cloudsql-grafana-dashboard.json index 6c585974..5e8ac083 100644 --- a/charts/patroni-services/monitoring/cloudsql-grafana-dashboard.json +++ b/charts/patroni-services/monitoring/cloudsql-grafana-dashboard.json @@ -1242,4 +1242,4 @@ "title": "CloudSQL Postgres", "version": 3, "weekStart": "" -} \ No newline at end of file +} diff --git a/charts/patroni-services/monitoring/dbaas-postgres-adapter.collector-config b/charts/patroni-services/monitoring/dbaas-postgres-adapter.collector-config index 3590d9ac..fb12d835 100644 --- a/charts/patroni-services/monitoring/dbaas-postgres-adapter.collector-config +++ b/charts/patroni-services/monitoring/dbaas-postgres-adapter.collector-config @@ -26,4 +26,4 @@ "metrics_type": "prometheus" } } -] \ No newline at end of file +] diff --git a/charts/patroni-services/monitoring/grafana-dashboard.json b/charts/patroni-services/monitoring/grafana-dashboard.json index 351f9359..36886412 100644 --- a/charts/patroni-services/monitoring/grafana-dashboard.json +++ b/charts/patroni-services/monitoring/grafana-dashboard.json @@ -9421,4 +9421,4 @@ "uid": "bc9f739dee3fe919d1a280c6e2bbea5255ec26ff", "version": 12, "weekStart": "" -} \ No newline at end of file +} diff --git a/charts/patroni-services/monitoring/postgres-exporter-grafana-dashboard.json b/charts/patroni-services/monitoring/postgres-exporter-grafana-dashboard.json index 708cc8de..fc0480fc 100644 --- a/charts/patroni-services/monitoring/postgres-exporter-grafana-dashboard.json +++ b/charts/patroni-services/monitoring/postgres-exporter-grafana-dashboard.json @@ -11874,4 +11874,4 @@ "title": "Postgres Exporter", "version": 2, "weekStart": "" -} \ No newline at end of file +} diff --git a/charts/patroni-services/templates/dbaas/dbaas-adapter-credentials.yml b/charts/patroni-services/templates/dbaas/dbaas-adapter-credentials.yml index d247b099..4a1b819e 100644 --- a/charts/patroni-services/templates/dbaas/dbaas-adapter-credentials.yml +++ b/charts/patroni-services/templates/dbaas/dbaas-adapter-credentials.yml @@ -16,4 +16,4 @@ data: username: {{ "dbaas-aggregator" | b64enc }} {{ end }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/dbaas/dbaas-aggregator-registration-credentials.yaml b/charts/patroni-services/templates/dbaas/dbaas-aggregator-registration-credentials.yaml index 1b3b5322..6e29ae31 100644 --- a/charts/patroni-services/templates/dbaas/dbaas-aggregator-registration-credentials.yaml +++ b/charts/patroni-services/templates/dbaas/dbaas-aggregator-registration-credentials.yaml @@ -11,4 +11,4 @@ data: password: {{ ( include "postgres.DBAAS_CLUSTER_DBA_CREDENTIALS_PASSWORD" . ) | b64enc }} username: {{ ( include "postgres.DBAAS_CLUSTER_DBA_CREDENTIALS_USERNAME" . ) | b64enc }} type: Opaque -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/dbaas/dbaas-physical-databases-labels.yaml b/charts/patroni-services/templates/dbaas/dbaas-physical-databases-labels.yaml index 6279e725..fe4c2983 100644 --- a/charts/patroni-services/templates/dbaas/dbaas-physical-databases-labels.yaml +++ b/charts/patroni-services/templates/dbaas/dbaas-physical-databases-labels.yaml @@ -9,4 +9,4 @@ metadata: {{ include "kubernetes.labels" . | nindent 4 }} data: dbaas.physical_databases.registration.labels.json: {{ .Values.dbaas.labels | toJson | quote }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/dbaas/dbaas-service.yaml b/charts/patroni-services/templates/dbaas/dbaas-service.yaml index df4a98c7..7442ab42 100644 --- a/charts/patroni-services/templates/dbaas/dbaas-service.yaml +++ b/charts/patroni-services/templates/dbaas/dbaas-service.yaml @@ -21,4 +21,4 @@ spec: {{- end }} selector: app: dbaas-postgres-adapter -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/monitoring-templates/dashboards/azure-grafana-dashboard.yaml b/charts/patroni-services/templates/monitoring-templates/dashboards/azure-grafana-dashboard.yaml index 9f902f0c..437f40bc 100644 --- a/charts/patroni-services/templates/monitoring-templates/dashboards/azure-grafana-dashboard.yaml +++ b/charts/patroni-services/templates/monitoring-templates/dashboards/azure-grafana-dashboard.yaml @@ -11,4 +11,4 @@ metadata: spec: gzipJson: {{ .Files.Get "monitoring/azure-grafana-dashboard.json.gz" | b64enc | quote }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/monitoring-templates/dashboards/cloud-sql-grafana-dashboard.yaml b/charts/patroni-services/templates/monitoring-templates/dashboards/cloud-sql-grafana-dashboard.yaml index e98b6f8a..25c7673c 100644 --- a/charts/patroni-services/templates/monitoring-templates/dashboards/cloud-sql-grafana-dashboard.yaml +++ b/charts/patroni-services/templates/monitoring-templates/dashboards/cloud-sql-grafana-dashboard.yaml @@ -11,4 +11,4 @@ metadata: spec: gzipJson: {{ .Files.Get "monitoring/cloudsql-grafana-dashboard.json.gz" | b64enc | quote }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/monitoring-templates/dashboards/grafana-dashboard.yaml b/charts/patroni-services/templates/monitoring-templates/dashboards/grafana-dashboard.yaml index 79d2150f..50481bf1 100644 --- a/charts/patroni-services/templates/monitoring-templates/dashboards/grafana-dashboard.yaml +++ b/charts/patroni-services/templates/monitoring-templates/dashboards/grafana-dashboard.yaml @@ -9,4 +9,4 @@ metadata: {{ include "kubernetes.labels" . | nindent 4 }} spec: gzipJson: {{ .Files.Get "monitoring/grafana-dashboard.json.gz" | b64enc | quote }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/patroni-pdb.yaml b/charts/patroni-services/templates/patroni-pdb.yaml index 38096dda..51424ea9 100644 --- a/charts/patroni-services/templates/patroni-pdb.yaml +++ b/charts/patroni-services/templates/patroni-pdb.yaml @@ -11,4 +11,4 @@ spec: selector: matchLabels: app: patroni -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/powa-ingress-deployment.yaml b/charts/patroni-services/templates/powa-ingress-deployment.yaml index 48ac0253..a726a0ff 100644 --- a/charts/patroni-services/templates/powa-ingress-deployment.yaml +++ b/charts/patroni-services/templates/powa-ingress-deployment.yaml @@ -32,4 +32,4 @@ spec: path: / pathType: ImplementationSpecific {{ end }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/query-exporter/query-exporter-config-map.yaml b/charts/patroni-services/templates/query-exporter/query-exporter-config-map.yaml index ac686201..8a1829ba 100644 --- a/charts/patroni-services/templates/query-exporter/query-exporter-config-map.yaml +++ b/charts/patroni-services/templates/query-exporter/query-exporter-config-map.yaml @@ -12,4 +12,4 @@ data: {{- include "query-exporter.queryContent" . | nindent 4 }} initial-config.yaml: | {{- include "query-exporter.queryContent" . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/query-exporter/query-exporter-grafana-dashboard.yaml b/charts/patroni-services/templates/query-exporter/query-exporter-grafana-dashboard.yaml index d904ccd9..2d9ef0d9 100644 --- a/charts/patroni-services/templates/query-exporter/query-exporter-grafana-dashboard.yaml +++ b/charts/patroni-services/templates/query-exporter/query-exporter-grafana-dashboard.yaml @@ -9,4 +9,4 @@ metadata: {{ include "kubernetes.labels" . | nindent 4 }} spec: gzipJson: {{ .Files.Get "monitoring/query-exporter-grafana-dashboard.json.gz" | b64enc | quote }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/query-exporter/query-exporter-service-monitor.yaml b/charts/patroni-services/templates/query-exporter/query-exporter-service-monitor.yaml index de9902bd..d4747762 100644 --- a/charts/patroni-services/templates/query-exporter/query-exporter-service-monitor.yaml +++ b/charts/patroni-services/templates/query-exporter/query-exporter-service-monitor.yaml @@ -28,4 +28,4 @@ spec: selector: matchLabels: app: query-exporter -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/role_binding.yaml b/charts/patroni-services/templates/role_binding.yaml index 8f7c8f1a..9005b193 100644 --- a/charts/patroni-services/templates/role_binding.yaml +++ b/charts/patroni-services/templates/role_binding.yaml @@ -13,4 +13,4 @@ roleRef: kind: Role name: postgres-operator apiGroup: rbac.authorization.k8s.io -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/aws-credentials-secret.yaml b/charts/patroni-services/templates/secrets/aws-credentials-secret.yaml index ab3c0c1f..4b10e2d2 100644 --- a/charts/patroni-services/templates/secrets/aws-credentials-secret.yaml +++ b/charts/patroni-services/templates/secrets/aws-credentials-secret.yaml @@ -13,4 +13,4 @@ data: key_id: {{ default "#" .Values.externalDataBase.accessKeyId | b64enc }} type: Opaque {{ end }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/influx-db-secret.yaml b/charts/patroni-services/templates/secrets/influx-db-secret.yaml index 3ec4bfeb..34b02adc 100644 --- a/charts/patroni-services/templates/secrets/influx-db-secret.yaml +++ b/charts/patroni-services/templates/secrets/influx-db-secret.yaml @@ -16,4 +16,4 @@ data: username: "" {{ end }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/logical-repliction-controller.yaml b/charts/patroni-services/templates/secrets/logical-repliction-controller.yaml index 5d65d57e..10f8c261 100644 --- a/charts/patroni-services/templates/secrets/logical-repliction-controller.yaml +++ b/charts/patroni-services/templates/secrets/logical-repliction-controller.yaml @@ -11,4 +11,4 @@ data: username: {{ default "replicator" .Values.replicationController.apiUser | b64enc }} password: {{ default "paSsW0rdForReplicat!oN" .Values.replicationController.apiPassword | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/monitoring-secret.yaml b/charts/patroni-services/templates/secrets/monitoring-secret.yaml index 1e5c3a09..1a337097 100644 --- a/charts/patroni-services/templates/secrets/monitoring-secret.yaml +++ b/charts/patroni-services/templates/secrets/monitoring-secret.yaml @@ -11,4 +11,4 @@ data: username: {{ "monitoring-user" | b64enc }} password: {{ .Values.metricCollector.userPassword | b64enc }} type: Opaque -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/secrets/pgbouncer-secret.yaml b/charts/patroni-services/templates/secrets/pgbouncer-secret.yaml index d9a186d9..984ec84a 100644 --- a/charts/patroni-services/templates/secrets/pgbouncer-secret.yaml +++ b/charts/patroni-services/templates/secrets/pgbouncer-secret.yaml @@ -10,4 +10,4 @@ metadata: data: userlist.txt: {{ printf "\"%s\" \"%s\"" (default "pgbouncer" .Values.connectionPooler.username) (default "pgbouncer" .Values.connectionPooler.password) | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/postgres-secret.yaml b/charts/patroni-services/templates/secrets/postgres-secret.yaml index 3a9083a8..d8245360 100644 --- a/charts/patroni-services/templates/secrets/postgres-secret.yaml +++ b/charts/patroni-services/templates/secrets/postgres-secret.yaml @@ -12,4 +12,4 @@ data: password: {{ ( include "postgres.adminPassword" . ) | b64enc }} username: {{ ( include "postgres.adminUser" . ) | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/replicator-secret.yaml b/charts/patroni-services/templates/secrets/replicator-secret.yaml index 5756f6d7..5350bd09 100644 --- a/charts/patroni-services/templates/secrets/replicator-secret.yaml +++ b/charts/patroni-services/templates/secrets/replicator-secret.yaml @@ -11,4 +11,4 @@ data: password: {{ .Values.replicatorPassword | b64enc }} username: {{ "replicator" | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/secrets/s3-storage-secret.yaml b/charts/patroni-services/templates/secrets/s3-storage-secret.yaml index 352f09fa..d046b5af 100644 --- a/charts/patroni-services/templates/secrets/s3-storage-secret.yaml +++ b/charts/patroni-services/templates/secrets/s3-storage-secret.yaml @@ -11,4 +11,4 @@ data: access_key: {{ .Values.backupDaemon.s3Storage.secretAccessKey | b64enc }} key_id: {{ .Values.backupDaemon.s3Storage.accessKeyId | b64enc }} type: Opaque -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/patroni-services/templates/service_account.yaml b/charts/patroni-services/templates/service_account.yaml index 195c6ea3..76c19f4f 100644 --- a/charts/patroni-services/templates/service_account.yaml +++ b/charts/patroni-services/templates/service_account.yaml @@ -6,4 +6,4 @@ metadata: labels: name: {{ .Values.serviceAccount.name }} {{ include "kubernetes.labels" . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/tests/tests-config.yaml b/charts/patroni-services/templates/tests/tests-config.yaml index 48e9d090..cd0c087c 100644 --- a/charts/patroni-services/templates/tests/tests-config.yaml +++ b/charts/patroni-services/templates/tests/tests-config.yaml @@ -11,4 +11,4 @@ data: {{ else }} dd_images: "" {{ end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/tls/postgres-services-tls-certificate.yaml b/charts/patroni-services/templates/tls/postgres-services-tls-certificate.yaml index 08525c76..dc2973b2 100644 --- a/charts/patroni-services/templates/tls/postgres-services-tls-certificate.yaml +++ b/charts/patroni-services/templates/tls/postgres-services-tls-certificate.yaml @@ -31,4 +31,4 @@ spec: group: cert-manager.io {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/patroni-services/templates/tls/postgres-services-tls-issuer.yaml b/charts/patroni-services/templates/tls/postgres-services-tls-issuer.yaml index c6200f05..3cfd6bc6 100644 --- a/charts/patroni-services/templates/tls/postgres-services-tls-issuer.yaml +++ b/charts/patroni-services/templates/tls/postgres-services-tls-issuer.yaml @@ -11,4 +11,4 @@ spec: selfSigned: {} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/controllers/patroni_core_controller.go b/controllers/patroni_core_controller.go index 0d885f97..4e678830 100644 --- a/controllers/patroni_core_controller.go +++ b/controllers/patroni_core_controller.go @@ -274,7 +274,7 @@ func (pr *PatroniCoreReconciler) Reconcile(ctx context.Context, request ctrl.Req return reconcile.Result{RequeueAfter: time.Minute}, err } - if err := pr.helper.ResourceManager.UpdatePatroniConfigMaps(); err != nil { + if err := pr.helper.UpdatePatroniConfigMaps(); err != nil { pr.logger.Error("error during update of patroni config maps", zap.Error(err)) // will not return err because there is a slight chance, that // update could happen at the same time when patroni will update leader/config info @@ -349,7 +349,7 @@ func (pr *PatroniCoreReconciler) stanzaUpgrade(create bool) error { if create { command = stanzaCreateStandbyCommand } - masterPod, err := pr.helper.ResourceManager.GetPodsByLabel(MasterLabel) + masterPod, err := pr.helper.GetPodsByLabel(MasterLabel) if err != nil || len(masterPod.Items) == 0 { pr.logger.Error("Can't get Patroni Leader for stanza upgrade execution", zap.Error(err)) return err @@ -443,12 +443,12 @@ func (pr *PatroniCoreReconciler) AddExcludeLabelToCm(c client.Client, cmName str pr.logger.Info(fmt.Sprintf("ConfigMap %s not found", cmName)) return nil } - if foundCm.ObjectMeta.Labels == nil { - foundCm.ObjectMeta.Labels = make(map[string]string) - foundCm.ObjectMeta.Labels["velero.io/exclude-from-backup"] = "true" + if foundCm.Labels == nil { + foundCm.Labels = make(map[string]string) + foundCm.Labels["velero.io/exclude-from-backup"] = "true" err = c.Update(context.TODO(), foundCm) if err != nil { - pr.logger.Error(fmt.Sprintf("Failed to update configMap %s", foundCm.ObjectMeta.Name), zap.Error(err)) + pr.logger.Error(fmt.Sprintf("Failed to update configMap %s", foundCm.Name), zap.Error(err)) return err } } @@ -464,7 +464,7 @@ func (pr *PatroniCoreReconciler) createTestsPods(cr *qubershipv1.PatroniCore) er } if state != "Running" { if state != "NotFound" { - if err := pr.helper.ResourceManager.DeletePodWithWaiting(integrationTestsPod); err != nil { + if err := pr.helper.DeletePodWithWaiting(integrationTestsPod); err != nil { pr.logger.Error("Error deleting pod with tests. Let's try to continue.", zap.Error(err)) } } @@ -472,7 +472,7 @@ func (pr *PatroniCoreReconciler) createTestsPods(cr *qubershipv1.PatroniCore) er pr.logger.Info("Policies is not empty, setting them to Test Pod") integrationTestsPod.Spec.Tolerations = cr.Spec.Policies.Tolerations } - if err := pr.helper.ResourceManager.CreatePod(integrationTestsPod); err != nil { + if err := pr.helper.CreatePod(integrationTestsPod); err != nil { return err } } diff --git a/controllers/postgresservice_controller.go b/controllers/postgresservice_controller.go index 4273aca9..a633a6cf 100644 --- a/controllers/postgresservice_controller.go +++ b/controllers/postgresservice_controller.go @@ -457,7 +457,7 @@ func (r *PostgresServiceReconciler) createTestsPods(cr *qubershipv1.PatroniServi } if state != "Running" { if state != "NotFound" { - if err := r.helper.ResourceManager.DeletePodWithWaiting(integrationTestsPod); err != nil { + if err := r.helper.DeletePodWithWaiting(integrationTestsPod); err != nil { r.logger.Error("Error deleting pod with tests. Let's try to continue.", zap.Error(err)) } } @@ -465,7 +465,7 @@ func (r *PostgresServiceReconciler) createTestsPods(cr *qubershipv1.PatroniServi r.logger.Info("Policies is not empty, setting them to Test Pod") integrationTestsPod.Spec.Tolerations = cr.Spec.Policies.Tolerations } - if err := r.helper.ResourceManager.CreatePod(integrationTestsPod); err != nil { + if err := r.helper.CreatePod(integrationTestsPod); err != nil { return err } } @@ -596,12 +596,12 @@ func (r *PostgresServiceReconciler) AddExcludeLabelToCm(c client.Client, cmName r.logger.Info(fmt.Sprintf("ConfigMap %s not found", cmName)) return nil } - if foundCm.ObjectMeta.Labels == nil { - foundCm.ObjectMeta.Labels = make(map[string]string) - foundCm.ObjectMeta.Labels["velero.io/exclude-from-backup"] = "true" + if foundCm.Labels == nil { + foundCm.Labels = make(map[string]string) + foundCm.Labels["velero.io/exclude-from-backup"] = "true" err = c.Update(context.TODO(), foundCm) if err != nil { - r.logger.Error(fmt.Sprintf("Failed to update configMap %s", foundCm.ObjectMeta.Name), zap.Error(err)) + r.logger.Error(fmt.Sprintf("Failed to update configMap %s", foundCm.Name), zap.Error(err)) return err } } diff --git a/docker-transfer/Dockerfile b/docker-transfer/Dockerfile index a5f00d71..3a36d739 100644 --- a/docker-transfer/Dockerfile +++ b/docker-transfer/Dockerfile @@ -2,4 +2,4 @@ FROM scratch # Transfer -COPY charts /charts \ No newline at end of file +COPY charts /charts diff --git a/docs/public/features/connection-pooler.md b/docs/public/features/connection-pooler.md index 28017af1..667d0fb8 100644 --- a/docs/public/features/connection-pooler.md +++ b/docs/public/features/connection-pooler.md @@ -1,4 +1,4 @@ # Limitations 1) Custom parameters for connections are not allowed [PG bouncer settings#ignore_startup_parameters](https://www.pgbouncer.org/config.html#generic-settings) -2) For Streaming usage with enabled PgBouncer, you should use *pg-patroni-direct* service instead *pg-patroni*, to avoid connection through PgBouncer. \ No newline at end of file +2) For Streaming usage with enabled PgBouncer, you should use *pg-patroni-direct* service instead *pg-patroni*, to avoid connection through PgBouncer. diff --git a/docs/public/features/major-upgrade.md b/docs/public/features/major-upgrade.md index 2154b2c3..04fab996 100644 --- a/docs/public/features/major-upgrade.md +++ b/docs/public/features/major-upgrade.md @@ -198,4 +198,4 @@ All of the `CREATE`/`DROP` statements should be performed in `powa` PostgreSQL d Follow official postgres release notes: [https://www.postgresql.org/docs/13/release-15.html](https://www.postgresql.org/docs/13/release-13.html) [https://www.postgresql.org/docs/14/release-15.html](https://www.postgresql.org/docs/14/release-14.html) -[https://www.postgresql.org/docs/15/release-15.html](https://www.postgresql.org/docs/15/release-15.html) \ No newline at end of file +[https://www.postgresql.org/docs/15/release-15.html](https://www.postgresql.org/docs/15/release-15.html) diff --git a/docs/public/features/tls-configuration.md b/docs/public/features/tls-configuration.md index 154e9cab..dafb455e 100644 --- a/docs/public/features/tls-configuration.md +++ b/docs/public/features/tls-configuration.md @@ -45,9 +45,9 @@ Follow the TLS Configuration section and configure `generateCerts` parameters. # Disable TLS -In case of `enabled` TLS in postgres service you can connect to `pg-patroni` service without ssl configuration. +In case of `enabled` TLS in postgres service you can connect to `pg-patroni` service without SSL configuration. -If by some reason you can not use `ssl` in your services, you can use `sslmode=disable` to avoid connection with ssl. +If by some reason you can not use `ssl` in your services, you can use `sslmode=disable` to avoid connection with SSL. # Certificate Update @@ -95,7 +95,7 @@ INTERNAL_TLS_ENABLED: true You can also place your certificates manually in deployment parameters if you don't want to use certificates generated by cert-manager or in cases if you are not using cert-manager. -1) For that you should have the following certificates in BASE64 format : +1) For that you should have the following certificates in base64 format : ```` ca.crt: ${ROOT_CA_CERTIFICATE} @@ -107,11 +107,11 @@ tls.key: ${PRIVATE_KEY} Where: -* ${ROOT_CA_CERTIFICATE} is the root CA in BASE64 format. +* ${ROOT_CA_CERTIFICATE} is the root CA in base64 format. -* ${CERTIFICATE} is the certificate in BASE64 format. +* ${CERTIFICATE} is the certificate in base64 format. -* ${PRIVATE_KEY} is the private key in BASE64 format. +* ${PRIVATE_KEY} is the private key in base64 format. Specify the certificates and other deployment parameters for patroni-core service and postgres-supplementary services like following example : @@ -170,4 +170,4 @@ the exception list at the balancers # Dbaas adapter configuration -In order to configure dbaas-adapter with TLS it's necessary to set `INTERNAL_TLS_ENABLED=true` parameter and configure `tls` parameters section. Dbaas adapter will accepting connections by `https` protocol on `8443` port (will be forwarded on service level). If `dbaas.aggregator.registrationAddress` contains `https` protocol, there is no need to change `dbaas.adapter.address` parameter, as self address will be modified with `https` protocol and `8443` port automatically. \ No newline at end of file +In order to configure dbaas-adapter with TLS it's necessary to set `INTERNAL_TLS_ENABLED=true` parameter and configure `tls` parameters section. Dbaas adapter will accepting connections by `https` protocol on `8443` port (will be forwarded on service level). If `dbaas.aggregator.registrationAddress` contains `https` protocol, there is no need to change `dbaas.adapter.address` parameter, as self address will be modified with `https` protocol and `8443` port automatically. diff --git a/pkg/client/client.go b/pkg/client/client.go index a4e423a1..9ab04a78 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -111,7 +111,12 @@ func (c *PostgresClient) ExecuteForDB(dbName, query string) error { if err != nil { return err } - defer conn.Close(context.Background()) + defer func() { + err := conn.Close(context.Background()) + if err != nil { + logger.Error("Error during closing connection", zap.Error(err)) + } + }() if _, err := conn.Exec(context.Background(), query); err == nil { return nil } else { diff --git a/pkg/deployment/backup.go b/pkg/deployment/backup.go index aa9a3a64..72271f47 100644 --- a/pkg/deployment/backup.go +++ b/pkg/deployment/backup.go @@ -15,7 +15,6 @@ package deployment import ( - "fmt" "strconv" netcrackerv1 "github.com/Netcracker/pgskipper-operator/api/apps/v1" @@ -290,7 +289,7 @@ func NewBackupDaemonDeployment(backupDaemon *netcrackerv1.BackupDaemon, pgCluste Name: "backup-data", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: fmt.Sprintf("postgres-backup-pvc"), + ClaimName: "postgres-backup-pvc", ReadOnly: false, }, }, diff --git a/pkg/deployment/patroni.go b/pkg/deployment/patroni.go index 2acf69ae..6ed549b2 100644 --- a/pkg/deployment/patroni.go +++ b/pkg/deployment/patroni.go @@ -89,27 +89,6 @@ func getMaxPreparedTransactions(cr *patroniv1.PatroniCore) string { return ExtractParamsFromCRByName(cr, "max_prepared_transactions") } -func getDefaultPatroniAffinity() *corev1.Affinity { - return &corev1.Affinity{ - PodAntiAffinity: &corev1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "app.kubernetes.io/name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"patroni-core"}, - }, - }, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - } -} - func NewPatroniStatefulset(cr *patroniv1.PatroniCore, deploymentIdx int, clusterName string, patroniTemplate string, postgreSQLUserConf string, patroniLabels map[string]string) *appsv1.StatefulSet { logger := util.GetLogger() patroniSpec := cr.Spec.Patroni @@ -117,12 +96,6 @@ func NewPatroniStatefulset(cr *patroniv1.PatroniCore, deploymentIdx int, cluster dockerImage := patroniSpec.DockerImage nodes := patroniSpec.Storage.Nodes - affinity := patroniSpec.Affinity.DeepCopy() - if affinity == nil || (affinity.NodeAffinity == nil && affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { - logger.Info("applying default affinity for patroni") - affinity = getDefaultPatroniAffinity() - } - stSet := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: statefulsetName, @@ -316,14 +289,14 @@ func NewPatroniStatefulset(cr *patroniv1.PatroniCore, deploymentIdx int, cluster if patroniSpec.PriorityClassName != "" { stSet.Spec.Template.Spec.PriorityClassName = patroniSpec.PriorityClassName } - if stSet.Spec.Template.ObjectMeta.Annotations == nil { - stSet.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + if stSet.Spec.Template.Annotations == nil { + stSet.Spec.Template.Annotations = make(map[string]string) } - stSet.Spec.Template.ObjectMeta.Annotations["argocd.argoproj.io/ignore-resource-updates"] = "true" + stSet.Spec.Template.Annotations["argocd.argoproj.io/ignore-resource-updates"] = "true" if patroniSpec.PodAnnotations != nil { for k, v := range patroniSpec.PodAnnotations { - stSet.Spec.Template.ObjectMeta.Annotations[k] = v + stSet.Spec.Template.Annotations[k] = v } } diff --git a/pkg/disasterrecovery/cloud_sql.go b/pkg/disasterrecovery/cloud_sql.go index f96eb4f0..bc22353c 100644 --- a/pkg/disasterrecovery/cloud_sql.go +++ b/pkg/disasterrecovery/cloud_sql.go @@ -172,7 +172,9 @@ func (manager *CloudSQLDRManager) getStatus(response http.ResponseWriter) error func (manager *CloudSQLDRManager) changeMode(mode string) error { cloudSQlClient := manager.sqlClient log.Info(fmt.Sprintf("Received change to %s, processing ...", mode)) - if mode == "standby" { + + switch mode { + case "standby": primaryDbInstance, err := cloudSQlClient.getPrimaryNotInCurrentRegion() if err != nil { return err @@ -204,7 +206,8 @@ func (manager *CloudSQLDRManager) changeMode(mode string) error { if err := cloudSQlClient.dropInstance(primaryDbInstanceInCurRegion.Name); err != nil { return err } - } else if mode == "active" { + + case "active": dbInstance, err := cloudSQlClient.getReplicaInCurrentRegion() if err != nil { return err @@ -235,7 +238,10 @@ func (manager *CloudSQLDRManager) changeMode(mode string) error { return err } + default: + return fmt.Errorf("unsupported mode: %s", mode) } + return nil } @@ -613,10 +619,11 @@ func (manager *CloudSQLDRManager) processPreConfigureRequest(response http.Respo func (manager *CloudSQLDRManager) doPreConfigure(request v1.SiteManagerStatus) { if err := func(mode string, noWait bool) error { - if mode == "active" { + switch mode { + case "active": log.Info("Skipping Pre Configuration for standby -> active change") time.Sleep(30 * time.Second) - } else if mode == "standby" { + case "standby": if !noWait { log.Info("No-Wait flag has been passed as false, doing replication check") _ = manager.waitTillStandbyIsSynced() @@ -641,6 +648,8 @@ func (manager *CloudSQLDRManager) doPreConfigure(request v1.SiteManagerStatus) { log.Error("Failed to reconfigure CloudSQL proxy", zap.Error(err)) return err } + default: + return fmt.Errorf("unsupported mode: %s", mode) } return nil }(request.Mode, request.NoWait); err != nil { diff --git a/pkg/disasterrecovery/patroni.go b/pkg/disasterrecovery/patroni.go index 5a0d5a2d..02627466 100644 --- a/pkg/disasterrecovery/patroni.go +++ b/pkg/disasterrecovery/patroni.go @@ -369,14 +369,15 @@ func (m *PatroniDRManager) clearStandbyClusterConfigInCR() error { func (m *PatroniDRManager) updateExternalService(mode string) { log.Info("Site Manager: Update external service") - if mode == "standby" { + switch mode { + case "standby": if cr, err := m.helper.GetPostgresServiceCR(); err == nil { activeHost := cr.Spec.SiteManager.ActiveClusterHost extService := m.helper.GetService("pg-"+m.cluster.ClusterName+"-external", namespace) extService.Spec.ExternalName = activeHost _ = m.helper.UpdateService(extService) } - } else if mode == "active" { + case "active": extService := m.helper.GetService("pg-"+m.cluster.ClusterName+"-external", namespace) extService.Spec.ExternalName = fmt.Sprintf("pg-%s.%s.svc.cluster.local", m.cluster.ClusterName, namespace) _ = m.helper.UpdateService(extService) diff --git a/pkg/helper/helper.go b/pkg/helper/helper.go index 11a424ca..7cb11936 100644 --- a/pkg/helper/helper.go +++ b/pkg/helper/helper.go @@ -85,9 +85,9 @@ func (h *Helper) AddNameAndUID(name string, uid types.UID, kind string) error { logger.Error(message, zap.Error(err)) return err } - h.ResourceManager.name = name - h.ResourceManager.uid = uid - h.ResourceManager.kind = kind + h.name = name + h.uid = uid + h.kind = kind return nil } func (h *Helper) SetCustomResource(cr *qubershipv1.PatroniServices) error { @@ -112,7 +112,7 @@ func (h *Helper) GetClient() client.Client { func (h *Helper) UpdatePostgresService(service *qubershipv1.PatroniServices) error { err := h.kubeClient.Update(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to update PatroniServices %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update PatroniServices %v", service.Name), zap.Error(err)) return err } return nil @@ -165,8 +165,8 @@ func (h *Helper) WaitUntilReconcileIsDone() error { return false, nil } if strings.ToLower(cr.Status.Conditions[0].Type) == "failed" { - logger.Error("Recocile status failed, please fix your cluster and try again", zap.Error(err)) - return true, genericerror.New("Reconcile status failed") + logger.Error("recocile status failed, please fix your cluster and try again", zap.Error(err)) + return true, genericerror.New("reconcile status failed") } return true, nil }) diff --git a/pkg/helper/patroni_core_helper.go b/pkg/helper/patroni_core_helper.go index 7c857848..6e45ec64 100644 --- a/pkg/helper/patroni_core_helper.go +++ b/pkg/helper/patroni_core_helper.go @@ -66,7 +66,7 @@ func GetPatroniHelper() *PatroniHelper { func (ph *PatroniHelper) UpdatePatroniCore(service *qubershipv1.PatroniCore) error { err := ph.kubeClient.Update(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to update PatroniCore %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update PatroniCore %v", service.Name), zap.Error(err)) return err } return nil @@ -79,9 +79,9 @@ func (ph *PatroniHelper) AddNameAndUID(name string, uid types.UID, kind string) logger.Error(message, zap.Error(err)) return err } - ph.ResourceManager.name = name - ph.ResourceManager.uid = uid - ph.ResourceManager.kind = kind + ph.name = name + ph.uid = uid + ph.kind = kind return nil } @@ -103,7 +103,7 @@ func (ph *PatroniHelper) GetCustomResource() qubershipv1.PatroniCore { func (ph *PatroniHelper) UpdatePostgresService(service *qubershipv1.PatroniCore) error { err := ph.kubeClient.Update(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to update PatroniCore %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update PatroniCore %v", service.Name), zap.Error(err)) return err } return nil @@ -242,8 +242,8 @@ func (ph *PatroniHelper) WaitUntilReconcileIsDone() error { return false, nil } if strings.ToLower(cr.Status.Conditions[0].Type) == "failed" { - logger.Error("Recocile status failed, please fix your cluster and try again", zap.Error(err)) - return true, genericerror.New("Reconcile status failed") + logger.Error("recocile status failed, please fix your cluster and try again", zap.Error(err)) + return true, genericerror.New("reconcile status failed") } return true, nil }) @@ -536,11 +536,11 @@ func (ph *PatroniHelper) getStandbyClusterConfigurationFromSiteManager() map[str func (ph *PatroniHelper) DeleteCleanerInitContainer(clusterName string) error { patroniDeploymentName := fmt.Sprintf("pg-%s-node", clusterName) - if satefulsetsList, err := ph.ResourceManager.GetStatefulsetByNameRegExp(patroniDeploymentName); err != nil { + if satefulsetsList, err := ph.GetStatefulsetByNameRegExp(patroniDeploymentName); err != nil { logger.Error("Can't get Patroni Deployments", zap.Error(err)) return err } else { - return ph.ResourceManager.DeleteInitContainer(satefulsetsList, "pg-cleaner") + return ph.DeleteInitContainer(satefulsetsList, "pg-cleaner") } } @@ -584,7 +584,7 @@ func (ph *PatroniHelper) IsPatroniClusterDegradedDuringUpdate(config *ClusterSta expectedMembersCount := ph.ifExpectedCountOfMembers(expectedMembersNum, *config) isExpectedReplicationCount := ph.isExpectedReplicationCount(pgHost, expectedMembersNum-1) logger.Info(fmt.Sprintf("Check Is Patroni Cluster Degraded: isExpectedReplicationCount: %t; expectedMembersCount: %t;", isExpectedReplicationCount, expectedMembersCount)) - return !(isExpectedReplicationCount && expectedMembersCount) + return !isExpectedReplicationCount || !expectedMembersCount } func (ph *PatroniHelper) IsPatroniClusterHealthy(config *ClusterStatus) bool { @@ -610,7 +610,7 @@ func (ph *PatroniHelper) IsPatroniClusterDegraded(config *ClusterStatus, pgHost expectedMembersCount := ph.ifExpectedCountOfMembers(expectedMembersNum, *config) isExpectedReplicationCount := ph.isExpectedReplicationCount(pgHost, expectedMembersNum-1) logger.Info(fmt.Sprintf("Check Is Patroni Cluster Degraded: isExpectedReplicationCount: %t; expectedMembersCount: %t;", isExpectedReplicationCount, expectedMembersCount)) - return !(isExpectedReplicationCount && expectedMembersCount) + return !isExpectedReplicationCount || !expectedMembersCount } func (ph *PatroniHelper) StoreDataToCM(key string, value string) { @@ -658,7 +658,10 @@ func (ph *PatroniHelper) GetLocaleVersion(podName string) string { version := ph.GetLocaleVersionFromPod(podName) if version != "" { versionCM.Data["locale-version"] = version - ph.CreateOrUpdateConfigMap(versionCM) + _, err := ph.CreateOrUpdateConfigMap(versionCM) + if err != nil { + logger.Error("Failed to create or update config map deployment-info", zap.Error(err)) + } } return version } diff --git a/pkg/helper/resource_management.go b/pkg/helper/resource_management.go index 14bb35cf..c33e68cb 100644 --- a/pkg/helper/resource_management.go +++ b/pkg/helper/resource_management.go @@ -30,11 +30,11 @@ import ( qubershipv1 "github.com/Netcracker/pgskipper-operator/api/apps/v1" patroniv1 "github.com/Netcracker/pgskipper-operator/api/patroni/v1" "github.com/Netcracker/pgskipper-operator/pkg/util" - opUtil "github.com/Netcracker/pgskipper-operator/pkg/util" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" k8sauth "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -238,28 +238,28 @@ func (rm *ResourceManager) IsPodReady(pod corev1.Pod) bool { } func (rm *ResourceManager) CreatePod(pod *corev1.Pod) error { - logger.Info(fmt.Sprintf("Creating pod %v", pod.ObjectMeta.Name)) - pod.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() + logger.Info(fmt.Sprintf("Creating pod %v", pod.Name)) + pod.OwnerReferences = rm.GetOwnerReferences() err := rm.kubeClient.Create(context.TODO(), pod) if err != nil { - logger.Error(fmt.Sprintf("Failed to create Pod %v", pod.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create Pod %v", pod.Name), zap.Error(err)) return err } return nil } func (rm *ResourceManager) UpdateService(service *corev1.Service) error { - service.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() + service.OwnerReferences = rm.GetOwnerReferences() err := rm.kubeClient.Update(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to update service %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update service %v", service.Name), zap.Error(err)) return err } return nil } func (rm *ResourceManager) UpdateDaemonSet(ds *appsv1.DaemonSet) (err error) { - ds.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() + ds.OwnerReferences = rm.GetOwnerReferences() if err := rm.kubeClient.Update(context.TODO(), ds); err != nil { return err } @@ -274,24 +274,24 @@ func (rm *ResourceManager) CreateOrUpdateConfigMap(cm *corev1.ConfigMap) (bool, Name: cm.Name, Namespace: cm.Namespace, }, foundCm) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s configMap", cm.ObjectMeta.Name)) - cm.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - cm.ObjectMeta.Labels = rm.getLabels(cm.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s configMap", cm.Name)) + cm.OwnerReferences = rm.GetOwnerReferences() + cm.Labels = rm.getLabels(cm.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), cm) if err != nil { - logger.Error(fmt.Sprintf("Failed to create configMap %s", cm.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create configMap %s", cm.Name), zap.Error(err)) return false, err } } else { if !reflect.DeepEqual(foundCm, cm) || !reflect.DeepEqual(foundCm.Data, cm.Data) { - logger.Info(fmt.Sprintf("Updating %s k8s cm", cm.ObjectMeta.Name)) - cm.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - if cm.ObjectMeta.Name != "patroni-leader" && cm.ObjectMeta.Name != "patroni-config" { - cm.ObjectMeta.Labels = rm.getLabels(cm.ObjectMeta) + logger.Info(fmt.Sprintf("Updating %s k8s cm", cm.Name)) + cm.OwnerReferences = rm.GetOwnerReferences() + if cm.Name != "patroni-leader" && cm.Name != "patroni-config" { + cm.Labels = rm.getLabels(cm.ObjectMeta) } err = rm.kubeClient.Update(context.TODO(), cm) if err != nil { - logger.Error(fmt.Sprintf("Failed to update cm %v", cm.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update cm %v", cm.Name), zap.Error(err)) return false, err } return true, nil @@ -306,24 +306,24 @@ func (rm *ResourceManager) CreateOrUpdateService(service *corev1.Service) error Name: service.Name, Namespace: service.Namespace, }, foundSrv) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s k8s service", service.ObjectMeta.Name)) - service.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - service.ObjectMeta.Labels = rm.getLabels(service.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s k8s service", service.Name)) + service.OwnerReferences = rm.GetOwnerReferences() + service.Labels = rm.getLabels(service.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to create service %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create service %v", service.Name), zap.Error(err)) return err } } else { if !reflect.DeepEqual(foundSrv, service) { - logger.Info(fmt.Sprintf("Updating %s k8s service", service.ObjectMeta.Name)) - service.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - service.ObjectMeta.Labels = rm.getLabels(service.ObjectMeta) + logger.Info(fmt.Sprintf("Updating %s k8s service", service.Name)) + service.OwnerReferences = rm.GetOwnerReferences() + service.Labels = rm.getLabels(service.ObjectMeta) // Service update requires resource version service.ResourceVersion = foundSrv.ResourceVersion err = rm.kubeClient.Update(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to update service %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update service %v", service.Name), zap.Error(err)) return err } } @@ -352,40 +352,40 @@ func (rm *ResourceManager) CreateOrUpdateDeploymentForce(deployment *appsv1.Depl } func (rm *ResourceManager) CreateOrUpdateDeployment(deployment *appsv1.Deployment, waitStability bool) error { - err, deploymentBefore := rm.FindDeployment(deployment) + deploymentBefore, err := rm.FindDeployment(deployment) oldGeneration, deplRevision := deployment.Generation, int64(0) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s k8s deployment", deployment.ObjectMeta.Name)) - deployment.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - deployment.ObjectMeta.Labels = rm.getLabels(deployment.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s k8s deployment", deployment.Name)) + deployment.OwnerReferences = rm.GetOwnerReferences() + deployment.Labels = rm.getLabels(deployment.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), deployment) if err != nil { - logger.Error(fmt.Sprintf("Failed to create deployment %v", deployment.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create deployment %v", deployment.Name), zap.Error(err)) return err } } else { copySystemAnnotations(&deploymentBefore.Spec.Template, &deployment.Spec.Template) - logger.Info(fmt.Sprintf("Updating %s k8s deployment", deployment.ObjectMeta.Name)) - deployment.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - deployment.ObjectMeta.Labels = rm.getLabels(deployment.ObjectMeta) + logger.Info(fmt.Sprintf("Updating %s k8s deployment", deployment.Name)) + deployment.OwnerReferences = rm.GetOwnerReferences() + deployment.Labels = rm.getLabels(deployment.ObjectMeta) err = rm.kubeClient.Update(context.TODO(), deployment) if err != nil { - logger.Error(fmt.Sprintf("Failed to update deployment %v", deployment.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update deployment %v", deployment.Name), zap.Error(err)) return err } } // Wait for patroni and Monitoring-collector deployment stability if waitStability { - _, deploymentAfter := rm.FindDeployment(deployment) - depBeforeHash := opUtil.HashJson(deploymentBefore.Spec) - depAfterHash := opUtil.HashJson(deploymentAfter.Spec) + deploymentAfter, _ := rm.FindDeployment(deployment) + depBeforeHash := util.HashJson(deploymentBefore.Spec) + depAfterHash := util.HashJson(deploymentAfter.Spec) if depBeforeHash != depAfterHash { logger.Info("Deployment.Spec hash has differences") deplRevision++ } else { logger.Info("Deployment.Spec hash has no differences") } - if err := opUtil.WaitForStabilityDepl(*deployment, deplRevision, oldGeneration); err != nil { + if err := util.WaitForStabilityDepl(*deployment, deplRevision, oldGeneration); err != nil { logger.Error(fmt.Sprintf("Failed to wait for stable deployment: %s", deployment.Name), zap.Error(err)) return err } @@ -418,56 +418,56 @@ func copySystemAnnotationsToMap(source, target map[string]string) bool { func (rm *ResourceManager) CreateOrUpdateStatefulset(statefulSet *appsv1.StatefulSet, waitStability bool) error { // Adding label to patroni statefulset for velero backup and restore - statefulSet.ObjectMeta.Labels["clone-mode-type"] = "data" + statefulSet.Labels["clone-mode-type"] = "data" - err, statefulSetBefore := rm.FindStatefulSet(statefulSet) + statefulSetBefore, err := rm.FindStatefulSet(statefulSet) oldGeneration, stSetRevision := statefulSet.Generation, statefulSet.Status.CurrentRevision if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s k8s StatefulSet", statefulSet.ObjectMeta.Name)) - statefulSet.ObjectMeta.Labels = rm.getLabels(statefulSet.ObjectMeta) - statefulSet.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() + logger.Info(fmt.Sprintf("Creating %s k8s StatefulSet", statefulSet.Name)) + statefulSet.Labels = rm.getLabels(statefulSet.ObjectMeta) + statefulSet.OwnerReferences = rm.GetOwnerReferences() err = rm.kubeClient.Create(context.TODO(), statefulSet) if err != nil { - logger.Error(fmt.Sprintf("Failed to create StatefulSet %v", statefulSet.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create StatefulSet %v", statefulSet.Name), zap.Error(err)) return err } } else { if !equality.Semantic.DeepEqual(statefulSetBefore.Spec, statefulSet.Spec) { - logger.Info(fmt.Sprintf("Updating %s k8s StatefulSet", statefulSet.ObjectMeta.Name)) + logger.Info(fmt.Sprintf("Updating %s k8s StatefulSet", statefulSet.Name)) err = rm.kubeClient.Delete(context.TODO(), statefulSetBefore) if err != nil { - logger.Error(fmt.Sprintf("Failed to delete StatefulSet %v", statefulSetBefore.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to delete StatefulSet %v", statefulSetBefore.Name), zap.Error(err)) return err } - statefulSet.ObjectMeta.Labels = rm.getLabels(statefulSet.ObjectMeta) - statefulSet.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - statefulSet.ObjectMeta.ResourceVersion = "" + statefulSet.Labels = rm.getLabels(statefulSet.ObjectMeta) + statefulSet.OwnerReferences = rm.GetOwnerReferences() + statefulSet.ResourceVersion = "" oldGeneration = 0 err = rm.kubeClient.Create(context.TODO(), statefulSet) - _, statefulSetAfter := rm.FindStatefulSet(statefulSet) + statefulSetAfter, _ := rm.FindStatefulSet(statefulSet) stSetRevision = statefulSetAfter.Status.CurrentRevision if err != nil { - logger.Error(fmt.Sprintf("Failed to create StatefulSet %v", statefulSet.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create StatefulSet %v", statefulSet.Name), zap.Error(err)) return err } } } // Wait for patroni and Monitoring-collector deployment stability if waitStability { - _, stSetAfter := rm.FindStatefulSet(statefulSet) - stSetBeforeHash := opUtil.HashJson(statefulSetBefore.Spec) - stSetAfterHash := opUtil.HashJson(stSetAfter.Spec) + stSetAfter, _ := rm.FindStatefulSet(statefulSet) + stSetBeforeHash := util.HashJson(statefulSetBefore.Spec) + stSetAfterHash := util.HashJson(stSetAfter.Spec) if stSetBeforeHash != stSetAfterHash { logger.Info("StatefulSet.Spec hash has differences") stSetRevision = stSetAfter.Status.CurrentRevision } else { logger.Info("StatefulSet.Spec hash has no differences") } - if err := opUtil.WaitForStabilityStatefulSet(*statefulSet, stSetRevision, oldGeneration); err != nil { + if err := util.WaitForStabilityStatefulSet(*statefulSet, stSetRevision, oldGeneration); err != nil { logger.Error(fmt.Sprintf("Failed to wait for stable StatefulSet: %s", statefulSet.Name), zap.Error(err)) return err } @@ -475,20 +475,20 @@ func (rm *ResourceManager) CreateOrUpdateStatefulset(statefulSet *appsv1.Statefu return nil } -func (rm *ResourceManager) FindDeployment(deployment *appsv1.Deployment) (error, *appsv1.Deployment) { +func (rm *ResourceManager) FindDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) { foundDepl := &appsv1.Deployment{} err := rm.kubeClient.Get(context.TODO(), types.NamespacedName{ Name: deployment.Name, Namespace: deployment.Namespace, }, foundDepl) - return err, foundDepl + return foundDepl, err } -func (rm *ResourceManager) FindStatefulSet(statefulset *appsv1.StatefulSet) (error, *appsv1.StatefulSet) { +func (rm *ResourceManager) FindStatefulSet(statefulset *appsv1.StatefulSet) (*appsv1.StatefulSet, error) { foundStSet := &appsv1.StatefulSet{} err := rm.kubeClient.Get(context.TODO(), types.NamespacedName{ Name: statefulset.Name, Namespace: statefulset.Namespace, }, foundStSet) - return err, foundStSet + return foundStSet, err } func (rm *ResourceManager) CreatePvcIfNotExists(pvc *corev1.PersistentVolumeClaim) error { @@ -496,20 +496,20 @@ func (rm *ResourceManager) CreatePvcIfNotExists(pvc *corev1.PersistentVolumeClai err := rm.kubeClient.Get(context.TODO(), types.NamespacedName{ Name: pvc.Name, Namespace: pvc.Namespace, }, foundPvc) - logger.Info(fmt.Sprintf("Start to check if pvc %s exists", pvc.ObjectMeta.Name)) + logger.Info(fmt.Sprintf("Start to check if pvc %s exists", pvc.Name)) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s PVC", pvc.ObjectMeta.Name)) + logger.Info(fmt.Sprintf("Creating %s PVC", pvc.Name)) err = rm.kubeClient.Create(context.TODO(), pvc) if err != nil { - logger.Error(fmt.Sprintf("Failed to create pvc %s", pvc.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create pvc %s", pvc.Name), zap.Error(err)) return err } } else { - logger.Info(fmt.Sprintf("PVC %s exists, clearing owner reference...", pvc.ObjectMeta.Name)) + logger.Info(fmt.Sprintf("PVC %s exists, clearing owner reference...", pvc.Name)) foundPvc.OwnerReferences = nil err := rm.kubeClient.Update(context.TODO(), foundPvc) if err != nil { - logger.Error(fmt.Sprintf("Failed to clear Owner Reference for %s", pvc.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to clear Owner Reference for %s", pvc.Name), zap.Error(err)) return err } } @@ -522,12 +522,12 @@ func (rm *ResourceManager) CreateSecretIfNotExists(secret *corev1.Secret) error Name: secret.Name, Namespace: secret.Namespace, }, foundSecret) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s secret", secret.ObjectMeta.Name)) - secret.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - secret.ObjectMeta.Labels = rm.getLabels(secret.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s secret", secret.Name)) + secret.OwnerReferences = rm.GetOwnerReferences() + secret.Labels = rm.getLabels(secret.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), secret) if err != nil { - logger.Error(fmt.Sprintf("Failed to create secret %s", secret.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create secret %s", secret.Name), zap.Error(err)) return err } } @@ -540,24 +540,24 @@ func (rm *ResourceManager) CreateOrUpdateSecret(secret *corev1.Secret) error { Name: secret.Name, Namespace: secret.Namespace, }, foundSecret) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s secret", secret.ObjectMeta.Name)) - secret.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - secret.ObjectMeta.Labels = rm.getLabels(secret.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s secret", secret.Name)) + secret.OwnerReferences = rm.GetOwnerReferences() + secret.Labels = rm.getLabels(secret.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), secret) if err != nil { - logger.Error(fmt.Sprintf("Failed to create secret %s", secret.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create secret %s", secret.Name), zap.Error(err)) return err } } else { if !reflect.DeepEqual(foundSecret, secret) { - logger.Info(fmt.Sprintf("Updating %s k8s secret", secret.ObjectMeta.Name)) - secret.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - secret.ObjectMeta.Labels = rm.getLabels(secret.ObjectMeta) + logger.Info(fmt.Sprintf("Updating %s k8s secret", secret.Name)) + secret.OwnerReferences = rm.GetOwnerReferences() + secret.Labels = rm.getLabels(secret.ObjectMeta) // Service update requires resource version secret.ResourceVersion = foundSecret.ResourceVersion err = rm.kubeClient.Update(context.TODO(), secret) if err != nil { - logger.Error(fmt.Sprintf("Failed to update secret %v", secret.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update secret %v", secret.Name), zap.Error(err)) return err } } @@ -565,17 +565,17 @@ func (rm *ResourceManager) CreateOrUpdateSecret(secret *corev1.Secret) error { return nil } -func (rm *ResourceManager) CreateEndpointIfNotExists(endpoint *corev1.Endpoints) error { - foundEndpoint := &corev1.Endpoints{} +func (rm *ResourceManager) CreateEndpointSliceIfNotExists(endpointSlice *discoveryv1.EndpointSlice) error { + foundEndpointSlice := &discoveryv1.EndpointSlice{} err := rm.kubeClient.Get(context.TODO(), types.NamespacedName{ - Name: endpoint.Name, Namespace: endpoint.Namespace, - }, foundEndpoint) + Name: endpointSlice.Name, Namespace: endpointSlice.Namespace, + }, foundEndpointSlice) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s k8s service", endpoint.ObjectMeta.Name)) - endpoint.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - err = rm.kubeClient.Create(context.TODO(), endpoint) + logger.Info(fmt.Sprintf("Creating %s k8s EndpointSlice", endpointSlice.Name)) + endpointSlice.OwnerReferences = rm.GetOwnerReferences() + err = rm.kubeClient.Create(context.TODO(), endpointSlice) if err != nil { - logger.Error(fmt.Sprintf("Failed to create service %s", endpoint.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create EndpointSlice %s", endpointSlice.Name), zap.Error(err)) return err } } else if err != nil { @@ -610,11 +610,11 @@ func (rm *ResourceManager) DeletePod(pod *corev1.Pod) error { Name: pod.Name, Namespace: pod.Namespace, }, foundPod) if err != nil && !errors.IsNotFound(err) { - logger.Error(fmt.Sprintf("ERROR DELETE POD %v ", foundPod.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("ERROR DELETE POD %v ", foundPod.Name), zap.Error(err)) return err } if err := rm.kubeClient.Delete(context.TODO(), foundPod); err != nil { - logger.Error(fmt.Sprintf("ERROR DELETE POD %v", foundPod.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("ERROR DELETE POD %v", foundPod.Name), zap.Error(err)) return err } @@ -626,7 +626,7 @@ func (rm *ResourceManager) DeletePodWithWaiting(pod *corev1.Pod) error { if err != nil { return err } - err = opUtil.WaitDeletePod(pod) + err = util.WaitDeletePod(pod) if err != nil { return err } @@ -639,14 +639,14 @@ func (rm *ResourceManager) DeleteDeployment(deploymentName string) error { Name: deploymentName, Namespace: util.GetNameSpace(), }, foundDeployment) if err != nil && !errors.IsNotFound(err) { - logger.Error(fmt.Sprintf("error during Deployment deletion %v ", foundDeployment.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("error during Deployment deletion %v ", foundDeployment.Name), zap.Error(err)) return err } else if err != nil { logger.Info(fmt.Sprintf("Deployment %s is not exist", deploymentName)) return nil } if err := rm.kubeClient.Delete(context.TODO(), foundDeployment); err != nil { - logger.Error(fmt.Sprintf("error during Deployment deletion %v", foundDeployment.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("error during Deployment deletion %v", foundDeployment.Name), zap.Error(err)) return err } err = rm.WaitTillDeploymentDeleted(foundDeployment) @@ -675,8 +675,8 @@ func (rm *ResourceManager) UpdatePGService() error { var svcNames = []string{"postgres-operator", "dbaas-postgres-adapter"} for _, svcName := range svcNames { svc := rm.GetService(svcName, util.GetNameSpace()) - svc.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() - svc.ObjectMeta.Labels = rm.getLabels(svc.ObjectMeta) + svc.OwnerReferences = rm.GetOwnerReferences() + svc.Labels = rm.getLabels(svc.ObjectMeta) if err := rm.UpdateService(svc); err != nil { logger.Error("error during update of pgService in resource_management.go", zap.Error(err)) return err @@ -692,7 +692,7 @@ func (rm *ResourceManager) UpdatePatroniConfigMaps() error { if err != nil { return err } - cmap.ObjectMeta.OwnerReferences = rm.GetOwnerReferences() + cmap.OwnerReferences = rm.GetOwnerReferences() if _, err := rm.CreateOrUpdateConfigMap(cmap); err != nil { logger.Error("error during update of patroni configMap in resource_management.go", zap.Error(err)) return err @@ -748,11 +748,11 @@ func (rm *ResourceManager) CreateServiceIfNotExists(service *corev1.Service) err Name: service.Name, Namespace: service.Namespace, }, foundService) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s k8s service", service.ObjectMeta.Name)) - service.ObjectMeta.Labels = rm.getLabels(service.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s k8s service", service.Name)) + service.Labels = rm.getLabels(service.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to create service %s", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create service %s", service.Name), zap.Error(err)) return err } } else if err != nil { @@ -767,11 +767,11 @@ func (rm *ResourceManager) CreateConfigMapIfNotExists(cm *corev1.ConfigMap) erro Name: cm.Name, Namespace: cm.Namespace, }, foundCm) if err != nil && errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating %s configMap", cm.ObjectMeta.Name)) - cm.ObjectMeta.Labels = rm.getLabels(cm.ObjectMeta) + logger.Info(fmt.Sprintf("Creating %s configMap", cm.Name)) + cm.Labels = rm.getLabels(cm.ObjectMeta) err = rm.kubeClient.Create(context.TODO(), cm) if err != nil { - logger.Error(fmt.Sprintf("Failed to create configMap %s", cm.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to create configMap %s", cm.Name), zap.Error(err)) return err } } @@ -878,7 +878,7 @@ func (rm *ResourceManager) UpdatePatroniReplicas(replicas int32, clusterName str for idx := 0; idx < len(stSetsToUpdate); idx++ { err := rm.kubeClient.Update(context.TODO(), stSetsToUpdate[idx]) if err != nil { - logger.Error(fmt.Sprintf("Failed to update statefulsets %v to scale to %v", stSetsToUpdate[idx].ObjectMeta.Name, replicas), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update statefulsets %v to scale to %v", stSetsToUpdate[idx].Name, replicas), zap.Error(err)) return err } } @@ -982,7 +982,7 @@ func (rm *ResourceManager) getOperatorLabels() (map[string]string, error) { } deployment := foundDeployment[0] - labels := deployment.Spec.Template.ObjectMeta.Labels + labels := deployment.Spec.Template.Labels return labels, nil } diff --git a/pkg/patroni/patroni.go b/pkg/patroni/patroni.go index b294ddb8..8e16475d 100644 --- a/pkg/patroni/patroni.go +++ b/pkg/patroni/patroni.go @@ -201,7 +201,12 @@ func GetPatroniCurrentConfig(patroniUrl string) (map[string]interface{}, error) if err != nil { return nil, fmt.Errorf("failed to retrieve patroni config: %w", err) } - defer resp.Body.Close() + defer func() { + err := resp.Body.Close() + if err != nil { + logger.Error("Error during closing response body", zap.Error(err)) + } + }() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("unexpected status code %d while retrieving patroni config", resp.StatusCode) diff --git a/pkg/postgresexporter/watcher.go b/pkg/postgresexporter/watcher.go index c6e9fe1a..a22900cf 100644 --- a/pkg/postgresexporter/watcher.go +++ b/pkg/postgresexporter/watcher.go @@ -17,6 +17,8 @@ package postgresexporter import ( "context" "fmt" + "sync" + "github.com/Netcracker/pgskipper-operator/pkg/helper" "github.com/Netcracker/pgskipper-operator/pkg/util" "go.uber.org/zap" @@ -26,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" - "sync" ) const ( @@ -153,17 +154,17 @@ func (exp *Watcher) handleWatcher(clientSet *kubernetes.Clientset, namespace str continue } exp.cmList[cm.Namespace] = append(exp.cmList[cm.Namespace], cm.Name) - logger.Info(fmt.Sprintf("CM %s was added in namespace %s", cm.ObjectMeta.Name, cm.ObjectMeta.Namespace)) + logger.Info(fmt.Sprintf("CM %s was added in namespace %s", cm.Name, cm.Namespace)) if err := exp.updateCM(); err != nil { continue } case watch.Modified: - logger.Info(fmt.Sprintf("CM %s was modified in namespace %s", cm.ObjectMeta.Name, cm.ObjectMeta.Namespace)) + logger.Info(fmt.Sprintf("CM %s was modified in namespace %s", cm.Name, cm.Namespace)) if err := exp.updateCM(); err != nil { continue } case watch.Deleted: - logger.Info(fmt.Sprintf("CM %s was deleted in namespace %s", cm.ObjectMeta.Name, cm.ObjectMeta.Namespace)) + logger.Info(fmt.Sprintf("CM %s was deleted in namespace %s", cm.Name, cm.Namespace)) exp.removeCMFromList(cm.Namespace, cm.Name) if err := exp.updateCM(); err != nil { continue diff --git a/pkg/queryexporter/watcher.go b/pkg/queryexporter/watcher.go index fcc2c21d..512b760e 100644 --- a/pkg/queryexporter/watcher.go +++ b/pkg/queryexporter/watcher.go @@ -172,17 +172,17 @@ func (exp *Watcher) handleWatcher(clientSet *kubernetes.Clientset, namespace str continue } exp.cmList[cm.Namespace] = append(exp.cmList[cm.Namespace], cm.Name) - logger.Info(fmt.Sprintf("CM %s was added in namespace %s", cm.ObjectMeta.Name, cm.ObjectMeta.Namespace)) + logger.Info(fmt.Sprintf("CM %s was added in namespace %s", cm.Name, cm.Namespace)) if err := exp.updateCM(); err != nil { continue } case watch.Modified: - logger.Info(fmt.Sprintf("CM %s was modified in namespace %s", cm.ObjectMeta.Name, cm.ObjectMeta.Namespace)) + logger.Info(fmt.Sprintf("CM %s was modified in namespace %s", cm.Name, cm.Namespace)) if err := exp.updateCM(); err != nil { continue } case watch.Deleted: - logger.Info(fmt.Sprintf("CM %s was deleted in namespace %s", cm.ObjectMeta.Name, cm.ObjectMeta.Namespace)) + logger.Info(fmt.Sprintf("CM %s was deleted in namespace %s", cm.Name, cm.Namespace)) exp.removeCMFromList(cm.Namespace, cm.Name) if err := exp.updateCM(); err != nil { continue diff --git a/pkg/reconciler/backup_daemon.go b/pkg/reconciler/backup_daemon.go index fd33c821..d15cf9c7 100644 --- a/pkg/reconciler/backup_daemon.go +++ b/pkg/reconciler/backup_daemon.go @@ -169,8 +169,8 @@ func (r *BackupDaemonReconciler) Reconcile() error { backupDaemonDeployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(backupDaemonDeployment.Spec.Template.Spec.Containers[0].VolumeMounts, util.GetTlsSecretVolumeMount()) backupDaemonDeployment.Spec.Template.Spec.Volumes = append(backupDaemonDeployment.Spec.Template.Spec.Volumes, util.GetTlsSecretVolume(cr.Spec.Tls.CertificateSecretName)) backupDaemonDeployment.Spec.Template.Spec.Containers[0].Env = append(backupDaemonDeployment.Spec.Template.Spec.Containers[0].Env, r.getTlsEnv()) - backupDaemonDeployment.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme = "HTTPS" - backupDaemonDeployment.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme = "HTTPS" + backupDaemonDeployment.Spec.Template.Spec.Containers[0].LivenessProbe.HTTPGet.Scheme = "HTTPS" + backupDaemonDeployment.Spec.Template.Spec.Containers[0].ReadinessProbe.HTTPGet.Scheme = "HTTPS" } if cr.Spec.Tracing != nil && cr.Spec.Tracing.Enabled { diff --git a/pkg/reconciler/common.go b/pkg/reconciler/common.go index e2e2ed13..7dcc4378 100644 --- a/pkg/reconciler/common.go +++ b/pkg/reconciler/common.go @@ -22,9 +22,11 @@ import ( "github.com/Netcracker/pgskipper-operator/pkg/util" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -51,18 +53,18 @@ func GetService(service *corev1.Service) (*corev1.Service, error) { Name: service.Name, Namespace: service.Namespace, }, foundService) if err != nil { - logger.Error(fmt.Sprintf("There is an error during getting service %s", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("There is an error during getting service %s", service.Name), zap.Error(err)) return nil, err } - logger.Info(fmt.Sprintf("Getting %s k8s service for patch", service.ObjectMeta.Name)) + logger.Info(fmt.Sprintf("Getting %s k8s service for patch", service.Name)) return foundService, nil } func UpdateService(service *corev1.Service) error { - logger.Info(fmt.Sprintf("Updating %s k8s service", service.ObjectMeta.Name)) + logger.Info(fmt.Sprintf("Updating %s k8s service", service.Name)) err := k8sClient.Update(context.TODO(), service) if err != nil { - logger.Error(fmt.Sprintf("Failed to update service %v", service.ObjectMeta.Name), zap.Error(err)) + logger.Error(fmt.Sprintf("Failed to update service %v", service.Name), zap.Error(err)) return err } return nil @@ -151,17 +153,37 @@ func reconcileExternalService(name string, labels map[string]string, externalHos return service } -func reconcileEndpoint(name string, labels map[string]string) *corev1.Endpoints { - endpoint := &corev1.Endpoints{ +func reconcileEndpointSlice(name string, labels map[string]string) *discoveryv1.EndpointSlice { + endpointSlice := &discoveryv1.EndpointSlice{ TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Endpoints", + APIVersion: "discovery.k8s.io/v1", + Kind: "EndpointSlice", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, - Labels: labels, + Labels: map[string]string{ + "kubernetes.io/service-name": name, + }, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Ports: []discoveryv1.EndpointPort{ + { + Name: ptr.To("pg"), + Port: ptr.To[int32](5432), + Protocol: ptr.To(corev1.ProtocolTCP), + }, + { + Name: ptr.To("patroni"), + Port: ptr.To[int32](8008), + Protocol: ptr.To(corev1.ProtocolTCP), + }, }, + Endpoints: []discoveryv1.Endpoint{}, + } + + for k, v := range labels { + endpointSlice.Labels[k] = v } - return endpoint + return endpointSlice } diff --git a/pkg/reconciler/metric_collector.go b/pkg/reconciler/metric_collector.go index 51e8683b..7ac895b5 100644 --- a/pkg/reconciler/metric_collector.go +++ b/pkg/reconciler/metric_collector.go @@ -32,8 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -var monitoringSecrets = []string{"monitoring-user"} - type MetricCollectorReconciler struct { cr *qubershipv1.PatroniServices helper *helper.Helper diff --git a/pkg/reconciler/patroni.go b/pkg/reconciler/patroni.go index 31577561..45860278 100644 --- a/pkg/reconciler/patroni.go +++ b/pkg/reconciler/patroni.go @@ -111,13 +111,13 @@ func (r *PatroniReconciler) Reconcile() error { } } - if _, err := r.helper.ResourceManager.CreateOrUpdateConfigMap(patroniConfigMap); err != nil { + if _, err := r.helper.CreateOrUpdateConfigMap(patroniConfigMap); err != nil { logger.Error(fmt.Sprintf("Cannot create or update config map %s", patroniConfigMap.Name), zap.Error(err)) return err } pgParamsConfigMap := deployment.ConfigMapForPostgreSQL(r.cluster.ClusterName, r.cluster.PatroniPropertiesCM) - if _, err := r.helper.ResourceManager.CreateOrUpdateConfigMap(pgParamsConfigMap); err != nil { + if _, err := r.helper.CreateOrUpdateConfigMap(pgParamsConfigMap); err != nil { logger.Error(fmt.Sprintf("Cannot create config map %s", pgParamsConfigMap.Name), zap.Error(err)) return err } @@ -125,7 +125,7 @@ func (r *PatroniReconciler) Reconcile() error { for _, userName := range Secrets { logger.Info(fmt.Sprintf("Checking for %s secret existence", userName)) pgSecret := deployment.PatroniSecret(cr.Namespace, userName, r.cluster.PatroniLabels) - if err := r.helper.ResourceManager.CreateSecretIfNotExists(pgSecret); err != nil { + if err := r.helper.CreateSecretIfNotExists(pgSecret); err != nil { logger.Error(fmt.Sprintf("Cannot create secret %s", pgSecret.Name), zap.Error(err)) return err } @@ -134,7 +134,7 @@ func (r *PatroniReconciler) Reconcile() error { // find possible deployments by pods // try to get master pod - masterPod, err := r.helper.ResourceManager.GetPodsByLabel(r.cluster.PatroniMasterSelectors) + masterPod, err := r.helper.GetPodsByLabel(r.cluster.PatroniMasterSelectors) if err != nil { if !errors.IsNotFound(err) { return err @@ -159,11 +159,11 @@ func (r *PatroniReconciler) Reconcile() error { var statefulCount int logger.Info("Update Postgres Service") patroniStatefulSetName := fmt.Sprintf("pg-%s-node", r.cluster.ClusterName) - statefulCount, _ = r.helper.ResourceManager.GetStatefulsetCountByNameRegExp(patroniStatefulSetName) + statefulCount, _ = r.helper.GetStatefulsetCountByNameRegExp(patroniStatefulSetName) if statefulCount == 0 { patroniDeploymentName := fmt.Sprintf("pg-%s-node", r.cluster.ClusterName) - foundDeployment, err := r.helper.ResourceManager.GetDeploymentsByNameRegExp(patroniDeploymentName) + foundDeployment, err := r.helper.GetDeploymentsByNameRegExp(patroniDeploymentName) if err != nil { return err } @@ -172,7 +172,7 @@ func (r *PatroniReconciler) Reconcile() error { } else { - if statefulCount, err = r.helper.ResourceManager.GetStatefulsetCountByNameRegExp(patroniStatefulSetName); err != nil { + if statefulCount, err = r.helper.GetStatefulsetCountByNameRegExp(patroniStatefulSetName); err != nil { logger.Error("Can't get existing Patroni Deployments", zap.Error(err)) return err } @@ -190,7 +190,7 @@ func (r *PatroniReconciler) Reconcile() error { // check locale version, because different versions can affect postgres data localeVersion := r.helper.GetLocaleVersion(masterPod.Items[0].Name) - replicaPods, err := r.helper.ResourceManager.GetPodsByLabel(r.cluster.PatroniReplicasSelector) + replicaPods, err := r.helper.GetPodsByLabel(r.cluster.PatroniReplicasSelector) if err != nil { logger.Error("Can not get replica pods") return err @@ -226,7 +226,7 @@ func (r *PatroniReconciler) Reconcile() error { } } - existingStatefulsets, err := r.helper.ResourceManager.GetStatefulsetByNameRegExp(patroniStatefulSetName) + existingStatefulsets, err := r.helper.GetStatefulsetByNameRegExp(patroniStatefulSetName) if err != nil { return err } @@ -255,7 +255,7 @@ func (r *PatroniReconciler) Reconcile() error { } // compare locale versions and run fix for collation in postres - updatedMasterPod, _ := r.helper.ResourceManager.GetPodsByLabel(r.cluster.PatroniMasterSelectors) + updatedMasterPod, _ := r.helper.GetPodsByLabel(r.cluster.PatroniMasterSelectors) newLocaleVersion := r.helper.GetLocaleVersionFromPod(updatedMasterPod.Items[0].Name) pgVersion, err := strconv.ParseInt(r.helper.GetPGVersionFromPod(updatedMasterPod.Items[0].Name), 10, 64) if err != nil { @@ -377,7 +377,7 @@ func (r *PatroniReconciler) Reconcile() error { } for _, name := range []string{"patroni-config", "patroni-leader"} { - cm, err := r.helper.ResourceManager.GetConfigMap(name) + cm, err := r.helper.GetConfigMap(name) if err != nil { if errors.IsNotFound(err) { continue @@ -394,7 +394,7 @@ func (r *PatroniReconciler) Reconcile() error { cm.Annotations[k] = v } } - if _, err := r.helper.ResourceManager.CreateOrUpdateConfigMap(cm); err != nil { + if _, err := r.helper.CreateOrUpdateConfigMap(cm); err != nil { logger.Error("failed to annotate Patroni ConfigMap", zap.Error(err)) return err } @@ -420,37 +420,37 @@ func (r *PatroniReconciler) processPatroniServices(cr *v1.PatroniCore, patroniSp } else { pgService := reconcileService(r.cluster.PostgresServiceName, r.cluster.PatroniLabels, r.cluster.PatroniMasterSelectors, deployment.GetPortsForPatroniService(r.cluster.ClusterName), false) - if err := r.helper.ResourceManager.CreateOrUpdateService(pgService); err != nil { + if err := r.helper.CreateOrUpdateService(pgService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgService.Name), zap.Error(err)) return err } pgReadOnlyService := reconcileService(r.cluster.PostgresServiceName+"-ro", r.cluster.PatroniLabels, r.cluster.PatroniReplicasSelector, deployment.GetPortsForPatroniService(r.cluster.ClusterName), false) - if err := r.helper.ResourceManager.CreateServiceIfNotExists(pgReadOnlyService); err != nil { + if err := r.helper.CreateServiceIfNotExists(pgReadOnlyService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgReadOnlyService.Name), zap.Error(err)) return err } patroniApiService := reconcileService(r.cluster.PostgresServiceName+"-api", r.cluster.PatroniLabels, r.cluster.PatroniCommonLabels, deployment.GetPortsForPatroniService(r.cluster.ClusterName), false) - if err := r.helper.ResourceManager.CreateServiceIfNotExists(patroniApiService); err != nil { + if err := r.helper.CreateServiceIfNotExists(patroniApiService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgService.Name), zap.Error(err)) return err } if cr.Spec.PgBackRest != nil { pgBackRestService := deployment.GetPgBackRestService(r.cluster.PatroniMasterSelectors, false) - if err := r.helper.ResourceManager.CreateOrUpdateService(pgBackRestService); err != nil { + if err := r.helper.CreateOrUpdateService(pgBackRestService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgBackRestService.Name), zap.Error(err)) return err } if cr.Spec.PgBackRest.BackupFromStandby { pgBackRestStandbyService := deployment.GetPgBackRestService(r.cluster.PatroniReplicasSelector, true) - if err := r.helper.ResourceManager.CreateOrUpdateService(pgBackRestStandbyService); err != nil { + if err := r.helper.CreateOrUpdateService(pgBackRestStandbyService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgBackRestStandbyService.Name), zap.Error(err)) return err } } pgBackRestHeadless := deployment.GetBackrestHeadless() - if err := r.helper.ResourceManager.CreateOrUpdateService(pgBackRestHeadless); err != nil { + if err := r.helper.CreateOrUpdateService(pgBackRestHeadless); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgBackRestHeadless.Name), zap.Error(err)) return err } @@ -469,13 +469,13 @@ func (r *PatroniReconciler) processPatroniStatefulset(cr *v1.PatroniCore, deploy patroniSpec := cr.Spec.Patroni pvc := storage.NewPvc(fmt.Sprintf("%s-data-%v", opUtil.GetPatroniClusterName(cr.Spec.Patroni.ClusterName), deploymentIdx), patroniSpec.Storage, deploymentIdx) - if err := r.helper.ResourceManager.CreatePvcIfNotExists(pvc); err != nil { + if err := r.helper.CreatePvcIfNotExists(pvc); err != nil { logger.Error(fmt.Sprintf("Cannot create pvc %s", pvc.Name), zap.Error(err)) return err } if patroniSpec.PgWalStorage != nil { pvc := storage.NewPvc(fmt.Sprintf("%s-wals-data-%v", opUtil.GetPatroniClusterName(cr.Spec.Patroni.ClusterName), deploymentIdx), patroniSpec.PgWalStorage, deploymentIdx) - if err := r.helper.ResourceManager.CreatePvcIfNotExists(pvc); err != nil { + if err := r.helper.CreatePvcIfNotExists(pvc); err != nil { logger.Error(fmt.Sprintf("Cannot create pvc %s", pvc.Name), zap.Error(err)) return err } @@ -484,7 +484,7 @@ func (r *PatroniReconciler) processPatroniStatefulset(cr *v1.PatroniCore, deploy pgBackrestStorage := cr.Spec.PgBackRest.Rwx pgBackrestStorage.AccessModes = []string{"ReadWriteMany"} pvc = storage.NewPvc("pgbackrest-backups", pgBackrestStorage, 1) - if err := r.helper.ResourceManager.CreatePvcIfNotExists(pvc); err != nil { + if err := r.helper.CreatePvcIfNotExists(pvc); err != nil { logger.Error(fmt.Sprintf("Cannot create pvc %s", pvc.Name), zap.Error(err)) return err } @@ -511,7 +511,7 @@ func (r *PatroniReconciler) processPatroniStatefulset(cr *v1.PatroniCore, deploy return err } - if err := r.helper.ResourceManager.CreateOrUpdateStatefulset(patroniDeployment, true); err != nil { + if err := r.helper.CreateOrUpdateStatefulset(patroniDeployment, true); err != nil { logger.Error(fmt.Sprintf("Cannot create or update deployment %s", patroniDeployment.Name), zap.Error(err)) return err } @@ -519,14 +519,15 @@ func (r *PatroniReconciler) processPatroniStatefulset(cr *v1.PatroniCore, deploy } func (r *PatroniReconciler) createEndpointsForEtcdAsDcs() error { - pgEndpoint := reconcileEndpoint(r.cluster.PostgresServiceName, r.cluster.PatroniLabels) - if err := r.helper.ResourceManager.CreateEndpointIfNotExists(pgEndpoint); err != nil { - logger.Error(fmt.Sprintf("Cannot create endpoint %s", pgEndpoint.Name), zap.Error(err)) + pgEndpointSlice := reconcileEndpointSlice(r.cluster.PostgresServiceName, r.cluster.PatroniLabels) + if err := r.helper.CreateEndpointSliceIfNotExists(pgEndpointSlice); err != nil { + logger.Error(fmt.Sprintf("Cannot create EndpointSlice %s", pgEndpointSlice.Name), zap.Error(err)) return err } - pgReadOnlyEndpoint := reconcileEndpoint(r.cluster.PostgresServiceName, r.cluster.PatroniLabels) - if err := r.helper.ResourceManager.CreateEndpointIfNotExists(pgReadOnlyEndpoint); err != nil { - logger.Error(fmt.Sprintf("Cannot create endpoint %s", pgReadOnlyEndpoint.Name), zap.Error(err)) + + pgReadOnlyEndpointSlice := reconcileEndpointSlice(r.cluster.PostgresServiceName+"-ro", r.cluster.PatroniLabels) + if err := r.helper.CreateEndpointSliceIfNotExists(pgReadOnlyEndpointSlice); err != nil { + logger.Error(fmt.Sprintf("Cannot create EndpointSlice %s", pgReadOnlyEndpointSlice.Name), zap.Error(err)) return err } return nil @@ -535,13 +536,13 @@ func (r *PatroniReconciler) createEndpointsForEtcdAsDcs() error { func (r *PatroniReconciler) createServicesForEtcdAsDcs() error { pgService := reconcileService(r.cluster.PostgresServiceName, r.cluster.PatroniLabels, r.cluster.PatroniMasterSelectors, deployment.GetPortsForPatroniService(r.cluster.ClusterName), true) - if err := r.helper.ResourceManager.CreateOrUpdateService(pgService); err != nil { + if err := r.helper.CreateOrUpdateService(pgService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgService.Name), zap.Error(err)) return err } pgReadOnlyService := reconcileService(r.cluster.PatroniReplicasServiceName+"-ro", r.cluster.PatroniLabels, r.cluster.PatroniReplicasSelector, deployment.GetPortsForPatroniService(r.cluster.ClusterName), true) - if err := r.helper.ResourceManager.CreateOrUpdateService(pgReadOnlyService); err != nil { + if err := r.helper.CreateOrUpdateService(pgReadOnlyService); err != nil { logger.Error(fmt.Sprintf("Cannot create service %s", pgReadOnlyService.Name), zap.Error(err)) return err } @@ -631,7 +632,9 @@ func (r *PatroniReconciler) fixCollationVersionForDB(pgClient *pgClient.Postgres } if len(brokenIndNames) > 0 { - fixBrokenIndexes(pgClient, db, brokenIndNames) + if err = fixBrokenIndexes(pgClient, db, brokenIndNames); err != nil { + return + } } if pgVersion >= 15 { @@ -682,25 +685,27 @@ func findBrokenIndexes(pgClient *pgClient.PostgresClient, db string) ([]string, logger.Debug(fmt.Sprintf("check broken indexes for db: %s", db)) brokenIndexQuery := ` - SELECT + SELECT n.nspname AS schema_name, c.relname AS index_name - FROM + FROM pg_index i - JOIN + JOIN pg_class c ON i.indexrelid = c.oid - JOIN + JOIN pg_namespace n ON c.relnamespace = n.oid - WHERE + WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND i.indisvalid = false AND ( c.relname like '%_ccnew%' or c.relname like '%_ccold%') - ORDER BY + ORDER BY schema_name, index_name;` conn, err := pgClient.GetConnectionToDb(db) if err != nil { return nil, err } - defer conn.Close(context.Background()) + defer func() { + _ = conn.Close(context.Background()) + }() rows, err := conn.Query(context.Background(), brokenIndexQuery) if err != nil { @@ -744,12 +749,17 @@ func (r *PatroniReconciler) getCollationsForRefresh(pgClient *pgClient.PostgresC if err != nil { return nil, err } - defer conn.Close(context.Background()) + defer func() { + err := conn.Close(context.Background()) + if err != nil { + logger.Error("error during closing connection", zap.Error(err)) + } + }() rows, err := conn.Query(context.Background(), `SELECT distinct c.collname AS "Collation" - FROM pg_depend d - JOIN pg_collation c ON (refclassid = 'pg_collation'::regclass AND refobjid = c.oid) - WHERE c.collversion <> pg_collation_actual_version(c.oid) or c.collversion is null;`) + FROM pg_depend d + JOIN pg_collation c ON (refclassid = 'pg_collation'::regclass AND refobjid = c.oid) + WHERE c.collversion <> pg_collation_actual_version(c.oid) or c.collversion is null;`) if err != nil { logger.Error(fmt.Sprintf("error during fetching collations for database %s", db)) return nil, err @@ -772,7 +782,7 @@ func (r *PatroniReconciler) processPgWalStorageExternal() error { logger.Info("Start pg_wal copying process") - replicaPods, err := r.helper.ResourceManager.GetPodsByLabel(r.cluster.PatroniReplicasSelector) + replicaPods, err := r.helper.GetPodsByLabel(r.cluster.PatroniReplicasSelector) if err != nil { logger.Warn("Can not get replica pods to execute pg_wal copying command") } @@ -789,7 +799,7 @@ func (r *PatroniReconciler) processPgWalStorageExternal() error { } } - masterPod, err := r.helper.ResourceManager.GetPodsByLabel(r.cluster.PatroniMasterSelectors) + masterPod, err := r.helper.GetPodsByLabel(r.cluster.PatroniMasterSelectors) if err != nil { logger.Warn("Can not get master pod to execute pg_wal copying command") return err @@ -836,7 +846,7 @@ func (r PatroniReconciler) checkSymlinkAlreadyExist(podName, podIdentity string) func (r *PatroniReconciler) preparePgbackRest(cr *v1.PatroniCore, patroniConfigMap *corev1.ConfigMap) error { // Prepare pgbackrest configuration CM pgBackRestCm := deployment.GetPgBackRestCM(cr) - if _, err := r.helper.ResourceManager.CreateOrUpdateConfigMap(pgBackRestCm); err != nil { + if _, err := r.helper.CreateOrUpdateConfigMap(pgBackRestCm); err != nil { logger.Error(fmt.Sprintf("Cannot create or update config map %s", "pgbackrest-config"), zap.Error(err)) return err } diff --git a/pkg/reconciler/replication_controller.go b/pkg/reconciler/replication_controller.go index 889b4dcf..a4a22991 100644 --- a/pkg/reconciler/replication_controller.go +++ b/pkg/reconciler/replication_controller.go @@ -75,11 +75,11 @@ func (r *RCReconciler) Reconcile() error { rcDeployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(rcDeployment.Spec.Template.Spec.Containers[0].VolumeMounts, opUtil.GetTlsSecretVolumeMount()) rcDeployment.Spec.Template.Spec.Volumes = append(rcDeployment.Spec.Template.Spec.Volumes, opUtil.GetTlsSecretVolume(cr.Spec.Tls.CertificateSecretName)) - rcDeployment.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme = "HTTPS" - rcDeployment.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Port = intstr.IntOrString{IntVal: replicationcontroller.HttpsPort} + rcDeployment.Spec.Template.Spec.Containers[0].LivenessProbe.HTTPGet.Scheme = "HTTPS" + rcDeployment.Spec.Template.Spec.Containers[0].LivenessProbe.HTTPGet.Port = intstr.IntOrString{IntVal: replicationcontroller.HttpsPort} - rcDeployment.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme = "HTTPS" - rcDeployment.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Port = intstr.IntOrString{IntVal: replicationcontroller.HttpsPort} + rcDeployment.Spec.Template.Spec.Containers[0].ReadinessProbe.HTTPGet.Scheme = "HTTPS" + rcDeployment.Spec.Template.Spec.Containers[0].ReadinessProbe.HTTPGet.Port = intstr.IntOrString{IntVal: replicationcontroller.HttpsPort} rcDeployment.Spec.Template.Spec.Containers[0].Ports = append(rcDeployment.Spec.Template.Spec.Containers[0].Ports, replicationcontroller.GetTLSContainerPort()) // update RC service srv.Spec.Ports = append(srv.Spec.Ports, replicationcontroller.GetTLSPort()) diff --git a/pkg/tests/tests.go b/pkg/tests/tests.go index f3594d5b..0ee01787 100644 --- a/pkg/tests/tests.go +++ b/pkg/tests/tests.go @@ -21,12 +21,11 @@ import ( "github.com/Netcracker/pgskipper-operator/pkg/deployerrors" "github.com/Netcracker/pgskipper-operator/pkg/deployment" "github.com/Netcracker/pgskipper-operator/pkg/helper" - "github.com/Netcracker/pgskipper-operator/pkg/util" opUtil "github.com/Netcracker/pgskipper-operator/pkg/util" "go.uber.org/zap" ) -var logger = util.GetLogger() +var logger = opUtil.GetLogger() type Creator struct { cr *qubershipv1.PatroniServices @@ -73,7 +72,7 @@ func (r *Creator) CreateTestsPods() error { logger.Info("Policies is not empty, setting them to Test Pod") integrationTestsPod.Spec.Tolerations = cr.Spec.Policies.Tolerations } - if err := r.helper.ResourceManager.CreatePod(integrationTestsPod); err != nil { + if err := r.helper.CreatePod(integrationTestsPod); err != nil { return err } } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 68e3f3ee..3d46a6c0 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -133,13 +133,13 @@ func (u *Upgrade) UpdateUpgradeToDone() error { func (u *Upgrade) CleanInitializeKey(clusterName string) error { var cm *corev1.ConfigMap cmName := fmt.Sprintf("%s-config", clusterName) - cm, err := u.helper.ResourceManager.GetConfigMap(cmName) + cm, err := u.helper.GetConfigMap(cmName) if err != nil { logger.Error(fmt.Sprintf("Can't get configmap %s-config", cmName), zap.Error(err)) return err } - cm.ObjectMeta.Annotations["initialize"] = "" - delete(cm.ObjectMeta.Annotations, "initialize") + cm.Annotations["initialize"] = "" + delete(cm.Annotations, "initialize") if err = u.client.Update(context.TODO(), cm); err != nil { logger.Error(fmt.Sprintf("Could not update %s config map", cmName), zap.Error(err)) return err @@ -150,7 +150,7 @@ func (u *Upgrade) CleanInitializeKey(clusterName string) error { func (u *Upgrade) GetInitDbArgs(patroniTemplate string, configMapKey string) (string, error) { var cm *corev1.ConfigMap cmName := patroniTemplate - cm, err := u.helper.ResourceManager.GetConfigMap(cmName) + cm, err := u.helper.GetConfigMap(cmName) if err != nil { logger.Error(fmt.Sprintf("Can't get configmap %s", cmName), zap.Error(err)) return "", err @@ -203,7 +203,12 @@ func (u *Upgrade) IfAbsTimeIsUsed(pgC *pgClient.PostgresClient, db string) bool if err != nil { return false } - defer conn.Close(context.Background()) + defer func() { + err := conn.Close(context.Background()) + if err != nil { + logger.Warn("Error closing connection", zap.Error(err)) + } + }() checkForAbsTimeQuery := "SELECT 1 FROM information_schema.columns WHERE data_type = 'abstime' AND table_schema <> 'pg_catalog';" rows, err := conn.Query(context.Background(), checkForAbsTimeQuery) @@ -237,7 +242,7 @@ func (u *Upgrade) applyCleanerInitContainer(leaderName string, patroniSpec *v1.P var err error cleanerInitContainer := u.GetCleanerInitContainer(patroniSpec.DockerImage) patroniDeploymentName := fmt.Sprintf("pg-%s-node", cluster.ClusterName) - if deploymentList, err = u.helper.ResourceManager.GetStatefulsetByNameRegExp(patroniDeploymentName); err != nil { + if deploymentList, err = u.helper.GetStatefulsetByNameRegExp(patroniDeploymentName); err != nil { logger.Error("Can't get Patroni Deployments", zap.Error(err)) return err } @@ -251,7 +256,7 @@ func (u *Upgrade) applyCleanerInitContainer(leaderName string, patroniSpec *v1.P dep.Spec.Template.Spec.Containers[0].Image = patroniSpec.DockerImage dep.Spec.Replicas = &replicas - if err := u.helper.ResourceManager.CreateOrUpdateStatefulset(dep, true); err != nil { + if err := u.helper.CreateOrUpdateStatefulset(dep, true); err != nil { logger.Error("Can't update Patroni deployment", zap.Error(err)) return err } @@ -322,7 +327,7 @@ func (u *Upgrade) CheckForPreparedTransactions(pgHost string) error { func (u *Upgrade) ProceedUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterSettings) error { - masterPod, err := u.helper.ResourceManager.GetPodsByLabel(cluster.PatroniMasterSelectors) + masterPod, err := u.helper.GetPodsByLabel(cluster.PatroniMasterSelectors) if err != nil || len(masterPod.Items) == 0 { logger.Error("Can't get Patroni Leader for pg_dumpall execution, failing major upgrade", zap.Error(err)) return err @@ -380,7 +385,7 @@ func (u *Upgrade) ProceedUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterS } //deleting powa pod - if err := u.helper.ResourceManager.DeletePodsByLabel(powaUILabels); err != nil { + if err := u.helper.DeletePodsByLabel(powaUILabels); err != nil { return err } @@ -392,7 +397,7 @@ func (u *Upgrade) ProceedUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterS } // wait until all patroni pods will power off - patroniPods, err := u.helper.ResourceManager.GetNamespacePodListBySelectors(cluster.PatroniCommonLabels) + patroniPods, err := u.helper.GetNamespacePodListBySelectors(cluster.PatroniCommonLabels) if err != nil { return err } @@ -424,7 +429,7 @@ func (u *Upgrade) ProceedUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterS upgradePod.Spec.SecurityContext = patroniDeployment.Spec.Template.Spec.SecurityContext // create pod and wait till completed - if err := u.helper.ResourceManager.CreatePod(upgradePod); err != nil { + if err := u.helper.CreatePod(upgradePod); err != nil { return err } @@ -442,7 +447,7 @@ func (u *Upgrade) ProceedUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterS } // upgrade completed, apply patroni deployment - if err := u.helper.ResourceManager.CreateOrUpdateStatefulset(patroniDeployment, true); err != nil { + if err := u.helper.CreateOrUpdateStatefulset(patroniDeployment, true); err != nil { logger.Error("Can't update Patroni deployment", zap.Error(err)) return err } @@ -460,7 +465,7 @@ func (u *Upgrade) ProceedUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterS } // Store pg version after upgrade - updatedMasterPod, err := u.helper.ResourceManager.GetPodsByLabel(cluster.PatroniMasterSelectors) + updatedMasterPod, err := u.helper.GetPodsByLabel(cluster.PatroniMasterSelectors) if err != nil { logger.Info("Can not get master pod") } @@ -597,7 +602,7 @@ func (u *Upgrade) getPgVersionContainer(targetDockerImage string) []corev1.Conta func (u *Upgrade) ScalePowaDeployment(replicas int32) error { powaDeploymentName := "powa-ui" - deploymentsToUpdate, err := u.helper.ResourceManager.GetDeploymentsByNameRegExp(powaDeploymentName) + deploymentsToUpdate, err := u.helper.GetDeploymentsByNameRegExp(powaDeploymentName) if err != nil { return err } @@ -609,7 +614,7 @@ func (u *Upgrade) ScalePowaDeployment(replicas int32) error { for _, dep := range deploymentsToUpdate { logger.Info(fmt.Sprintf("Scale %v to %v", dep.Name, replicas)) dep.Spec.Replicas = &replicas - if err := u.helper.ResourceManager.CreateOrUpdateDeployment(dep, true); err != nil { + if err := u.helper.CreateOrUpdateDeployment(dep, true); err != nil { logger.Error("Can't update powa-ui deployment", zap.Error(err)) return err } @@ -643,7 +648,7 @@ func (u *Upgrade) RunUpgradePatroniPod(cr *v1.PatroniCore, cluster *v1.PatroniCl } } // create pod and wait till completed - if err := u.helper.ResourceManager.CreatePod(upgradeCheckPod); err != nil { + if err := u.helper.CreatePod(upgradeCheckPod); err != nil { return nil, err } if err := u.waitTillPodIsRunning(upgradeCheckPod); err != nil { @@ -688,7 +693,7 @@ func (u *Upgrade) getUpgradeCheckPod(cr *v1.PatroniCore) *corev1.Pod { func (u *Upgrade) CheckUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterSettings) bool { - masterPod, err := u.helper.ResourceManager.GetPodsByLabel(cluster.PatroniMasterSelectors) + masterPod, err := u.helper.GetPodsByLabel(cluster.PatroniMasterSelectors) if err != nil || len(masterPod.Items) == 0 { logger.Error("Can't check is major upgrade required. Master pod is not available", zap.Error(err)) return false @@ -697,7 +702,7 @@ func (u *Upgrade) CheckUpgrade(cr *v1.PatroniCore, cluster *v1.PatroniClusterSet u.helper.StoreDataToCM("pg-version", currentVersion) upgradeCheckPod, _ := u.RunUpgradePatroniPod(cr, cluster) targetVersion := u.helper.GetPGVersionFromPod(upgradeCheckPod.Name) - if err := u.helper.ResourceManager.DeletePod(upgradeCheckPod); err != nil { + if err := u.helper.DeletePod(upgradeCheckPod); err != nil { logger.Warn("Can't delete pg-upgrade-check-pod", zap.Error(err)) } diff --git a/tests/docker/pip.conf b/tests/docker/pip.conf index 9e012ed3..74347759 100644 --- a/tests/docker/pip.conf +++ b/tests/docker/pip.conf @@ -1,4 +1,4 @@ [global] index-url = https://pypi.org/simple -trusted-host = pypi.org \ No newline at end of file +trusted-host = pypi.org diff --git a/tests/docker/requirements.txt b/tests/docker/requirements.txt index d2fb3c0a..cd3d3b9a 100644 --- a/tests/docker/requirements.txt +++ b/tests/docker/requirements.txt @@ -35,4 +35,4 @@ typing_extensions==4.2.0 urllib3==1.26.17 websocket-client==1.2.3 Werkzeug==3.0.6 -zipp==3.8.0 \ No newline at end of file +zipp==3.8.0 diff --git a/tests/docker/uid_entrypoint b/tests/docker/uid_entrypoint old mode 100644 new mode 100755 index 1e3d79cd..b4aabd29 --- a/tests/docker/uid_entrypoint +++ b/tests/docker/uid_entrypoint @@ -1,17 +1,16 @@ -#!/bin/sh +#!/bin/bash cur_user=$(id -u) -if [[ "$cur_user" != "26" ]] -then - if ! whoami &> /dev/null; then - if [ -w /etc/passwd ]; then - echo "${USER_NAME:-postgres}:x:$(id -u):0:${USER_NAME:-postgres} user:${HOME}:/sbin/nologin" >> /etc/passwd - fi - fi +if [[ "$cur_user" != "26" ]]; then + if ! whoami &>/dev/null; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME:-postgres}:x:$(id -u):0:${USER_NAME:-postgres} user:${HOME}:/sbin/nologin" >>/etc/passwd + fi + fi fi if [[ "${STATEFULSET}" =~ ^[Tt]rue$ ]]; then - echo "Copying statefulset specific robot tests" - cp -R /test_runs/statefulset/* /test_runs/ + echo "Copying statefulset specific robot tests" + cp -R /test_runs/statefulset/* /test_runs/ fi -exec "$@" \ No newline at end of file +exec "$@" diff --git a/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_backup_api.robot b/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_backup_api.robot index 3c897570..163b320d 100644 --- a/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_backup_api.robot +++ b/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_backup_api.robot @@ -67,4 +67,4 @@ Check Evict Backup By Dbaas Adapter Wait Until Keyword Succeeds ${RETRY_TIME} ${RETRY_INTERVAL} ... Check Backup Status By Dbaas Adapter ${trackId} Check Eviction Backup By Dbaas Adapter ${trackId} - [Teardown] Delete Test DB ${db_name} \ No newline at end of file + [Teardown] Delete Test DB ${db_name} diff --git a/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_multiple_users.robot b/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_multiple_users.robot index 163e5c69..05316ba5 100644 --- a/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_multiple_users.robot +++ b/tests/robot/check_dbaas_adapter_api/check_dbaas_adapter_multiple_users.robot @@ -22,4 +22,4 @@ Check Multiple Users Creating By Dbaas Adapter Should Contain str(${resp_conne_properties}) 'role': 'streaming' Should Contain str(${resp_conne_properties}) 'role': 'rw' Should Contain str(${resp_conne_properties}) 'role': 'ro' - [Teardown] Delete Test DB ${db_name} \ No newline at end of file + [Teardown] Delete Test DB ${db_name} diff --git a/tests/robot/check_full_backup_api/check_encrypted_backup.robot b/tests/robot/check_full_backup_api/check_encrypted_backup.robot index 93d4ca0d..21b4962e 100644 --- a/tests/robot/check_full_backup_api/check_encrypted_backup.robot +++ b/tests/robot/check_full_backup_api/check_encrypted_backup.robot @@ -39,4 +39,4 @@ Check Encryption For Encrypted Backups ${daemon_pod}= Get Pod Daemon Log ${daemon_pod} ${exit_code}= Check Archive Is Accessible lastSuccessfulId daemon_pod - Should Be Equal ${exit_code} ${1} \ No newline at end of file + Should Be Equal ${exit_code} ${1} diff --git a/tests/robot/check_full_backup_api/check_encryption_switching.robot b/tests/robot/check_full_backup_api/check_encryption_switching.robot index 29218ef0..f7934838 100644 --- a/tests/robot/check_full_backup_api/check_encryption_switching.robot +++ b/tests/robot/check_full_backup_api/check_encryption_switching.robot @@ -44,4 +44,4 @@ Get Encryption Status Return State ${state} ${key_source} ${key_name} Run Keyword If '${state}' == "Off" Turn Encryption Off - Run Keyword If '${state}' == "On" Turn Encryption On ${key_source} ${key_name} \ No newline at end of file + Run Keyword If '${state}' == "On" Turn Encryption On ${key_source} ${key_name} diff --git a/tests/robot/check_full_backup_api_no_metric_file/check_full_backup_creation.robot b/tests/robot/check_full_backup_api_no_metric_file/check_full_backup_creation.robot index 3b69b645..8b6eb737 100644 --- a/tests/robot/check_full_backup_api_no_metric_file/check_full_backup_creation.robot +++ b/tests/robot/check_full_backup_api_no_metric_file/check_full_backup_creation.robot @@ -8,4 +8,4 @@ Check Daemon Full Backup Creation With Previous Backup Without Metric File [Documentation] ... This test validates if daemon pod can perform full backup with previous backup without metric file ... - Check Backup Api With Broken Metric File \ No newline at end of file + Check Backup Api With Broken Metric File diff --git a/tests/robot/check_full_backup_eviction/check_full_backup_eviction.robot b/tests/robot/check_full_backup_eviction/check_full_backup_eviction.robot index b17c6ebb..8c29ac6e 100644 --- a/tests/robot/check_full_backup_eviction/check_full_backup_eviction.robot +++ b/tests/robot/check_full_backup_eviction/check_full_backup_eviction.robot @@ -8,4 +8,4 @@ Check Daemon Eviction API For Full Backups [Documentation] ... This test validates if daemon can evict backup via REST API ... - Check Evict Api \ No newline at end of file + Check Evict Api diff --git a/tests/robot/check_granular_api/check_granular_delete_backup_auth_api.robot b/tests/robot/check_granular_api/check_granular_delete_backup_auth_api.robot index 6d774440..1e89cac7 100644 --- a/tests/robot/check_granular_api/check_granular_delete_backup_auth_api.robot +++ b/tests/robot/check_granular_api/check_granular_delete_backup_auth_api.robot @@ -15,4 +15,4 @@ Check Backup Requests Status Endpoint Given Check /backups Endpoint For Granular Backups When Create Backup And Wait Till Complete And Delete Granular Backup - Then Backup Not Exist \ No newline at end of file + Then Backup Not Exist diff --git a/tests/robot/check_granular_api/check_granular_restore_status_auth_api.robot b/tests/robot/check_granular_api/check_granular_restore_status_auth_api.robot index da826bcd..b43ec26c 100644 --- a/tests/robot/check_granular_api/check_granular_restore_status_auth_api.robot +++ b/tests/robot/check_granular_api/check_granular_restore_status_auth_api.robot @@ -167,4 +167,4 @@ Check Enabled Auth not existing Create Session postgres_backup_daemon ${scheme}://postgres-backup-daemon:9000 auth=${auth} ${resp}= Get On Session postgres_backup_daemon url=/backup/status/restore-42 expected_status=404 - Should Be Equal ${resp.status_code} ${404} \ No newline at end of file + Should Be Equal ${resp.status_code} ${404} diff --git a/tests/robot/check_granular_api/check_granular_restore_with_owner_auth.robot b/tests/robot/check_granular_api/check_granular_restore_with_owner_auth.robot index 4eca48df..74df7f7f 100644 --- a/tests/robot/check_granular_api/check_granular_restore_with_owner_auth.robot +++ b/tests/robot/check_granular_api/check_granular_restore_with_owner_auth.robot @@ -117,4 +117,4 @@ Check Enabled Auth With Owner Of DB Check That Role Exists Check That Table Contains Values ${resp}= Get On Session postgres_backup_daemon url=/delete/${backup_id}?namespace=${name_space} - Should Be Equal ${resp.status_code} ${200} \ No newline at end of file + Should Be Equal ${resp.status_code} ${200} diff --git a/tests/robot/check_manual_switchover/check_manual_switchover.robot b/tests/robot/check_manual_switchover/check_manual_switchover.robot index fd0b4405..a23853bd 100644 --- a/tests/robot/check_manual_switchover/check_manual_switchover.robot +++ b/tests/robot/check_manual_switchover/check_manual_switchover.robot @@ -16,4 +16,4 @@ Manual Switchover Via Patroni REST [Tags] patroni full check_manual_switchover Given Patroni Cluster Is Healthy When Manual Switchover Via Patroni REST Is Called - Then Patroni Cluster Is Healthy \ No newline at end of file + Then Patroni Cluster Is Healthy diff --git a/tests/robot/check_terminate_backup_api/keywords.robot b/tests/robot/check_terminate_backup_api/keywords.robot index 45428b67..0a303ec6 100644 --- a/tests/robot/check_terminate_backup_api/keywords.robot +++ b/tests/robot/check_terminate_backup_api/keywords.robot @@ -37,4 +37,4 @@ Prepare Auth ${POSTGRES_USER}= Get Environment Variable POSTGRES_USER default=postgres ${PG_ROOT_PASSWORD}= Get Environment Variable PG_ROOT_PASSWORD ${auth}= Create List ${POSTGRES_USER} ${PG_ROOT_PASSWORD} - reterun ${auth} \ No newline at end of file + reterun ${auth} diff --git a/tests/robot/check_wal_archiving_is_working/check_wal_archiving_is_working.robot b/tests/robot/check_wal_archiving_is_working/check_wal_archiving_is_working.robot index cad47052..507dde9b 100644 --- a/tests/robot/check_wal_archiving_is_working/check_wal_archiving_is_working.robot +++ b/tests/robot/check_wal_archiving_is_working/check_wal_archiving_is_working.robot @@ -19,4 +19,4 @@ Check That After Switching Of WAL File, File Exists In Backup Storage [Tags] backup full Stability-tests check_wal_archiving Given Backup Daemon Is Working When WAL Archive Switched - Then New WAL Files Are Presented On Storage \ No newline at end of file + Then New WAL Files Are Presented On Storage diff --git a/tests/robot/patroni_rest_api_auth/check_rest_api_auth.robot b/tests/robot/patroni_rest_api_auth/check_rest_api_auth.robot index 11c011e0..17338e11 100644 --- a/tests/robot/patroni_rest_api_auth/check_rest_api_auth.robot +++ b/tests/robot/patroni_rest_api_auth/check_rest_api_auth.robot @@ -18,4 +18,4 @@ check patroni REST authentication ... Check if patroni REST authentication working -*** Keywords *** \ No newline at end of file +*** Keywords ***