diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ce95bb1c6..a5fd35fdc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ We warmly welcome and greatly appreciate contributions from the community. By participating you agree to the [code of -conduct](https://github.com/greenplum-db/gpbackup/blob/master/CODE-OF-CONDUCT.md). +conduct](https://github.com/greenplum-db/gpbackup/blob/main/CODE-OF-CONDUCT.md). Overall, we follow GPDB's comprehensive contribution policy. Please refer to it [here](https://github.com/greenplum-db/gpdb#contributing) for details. @@ -22,7 +22,7 @@ for details. * Try and follow similar coding styles as found throughout the code base. * Make commits as logical units for ease of reviewing. -* Rebase with master often to stay in sync with upstream. +* Rebase with main often to stay in sync with upstream. * Add new tests to cover your code. We use [Ginkgo](http://onsi.github.io/ginkgo/) and [Gomega](https://onsi.github.io/gomega/) for testing. @@ -44,9 +44,9 @@ git commit --fixup -- or -- git commit --squash ``` -* Once approved, before merging into master squash your fixups with: +* Once approved, before merging into main squash your fixups with: ``` -git rebase -i --autosquash origin/master +git rebase -i --autosquash origin/main git push --force-with-lease $USER ``` diff --git a/Makefile b/Makefile index 7f3dcf652..2effdbfcc 100644 --- a/Makefile +++ b/Makefile @@ -27,15 +27,25 @@ DEBUG=-gcflags=all="-N -l" CUSTOM_BACKUP_DIR ?= "/tmp" helper_path ?= $(BIN_DIR)/$(HELPER) +# Prefer gpsync as the newer utility, fall back to gpscp if not present (older installs) +ifeq (, $(shell which gpsync)) +COPYUTIL=gpscp +else +COPYUTIL=gpsync +endif + depend : go mod download -$(GINKGO) : # v1.14.0 is compatible with centos6 default gcc version - go install github.com/onsi/ginkgo/v2/ginkgo@v2.2.0 +$(GINKGO) : + go install github.com/onsi/ginkgo/v2/ginkgo@v2.8.4 $(GOIMPORTS) : go install golang.org/x/tools/cmd/goimports@latest +$(GOSQLITE) : + go install github.com/mattn/go-sqlite3 + format : $(GOIMPORTS) @goimports -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") @@ -56,7 +66,7 @@ unit_all_gpdb_versions : $(GINKGO) TEST_GPDB_VERSION=4.3.999 ginkgo $(GINKGO_FLAGS) $(SUBDIRS_HAS_UNIT) 2>&1 TEST_GPDB_VERSION=5.999.0 ginkgo $(GINKGO_FLAGS) $(SUBDIRS_HAS_UNIT) 2>&1 TEST_GPDB_VERSION=6.999.0 ginkgo $(GINKGO_FLAGS) $(SUBDIRS_HAS_UNIT) 2>&1 - TEST_GPDB_VERSION=7.999.0 ginkgo $(GINKGO_FLAGS) $(SUBDIRS_HAS_UNIT) 2>&1 # GPDB master + TEST_GPDB_VERSION=7.999.0 ginkgo $(GINKGO_FLAGS) $(SUBDIRS_HAS_UNIT) 2>&1 # GPDB main integration : $(GINKGO) ginkgo $(GINKGO_FLAGS) integration 2>&1 @@ -64,20 +74,20 @@ integration : $(GINKGO) test : build unit integration end_to_end : $(GINKGO) - ginkgo $(GINKGO_FLAGS) --slow-spec-threshold=10s end_to_end -- --custom_backup_dir $(CUSTOM_BACKUP_DIR) 2>&1 + ginkgo $(GINKGO_FLAGS) --timeout=3h --poll-progress-after=0s end_to_end -- --custom_backup_dir $(CUSTOM_BACKUP_DIR) 2>&1 coverage : @./show_coverage.sh -build : - $(GO_BUILD) -tags '$(BACKUP)' -o $(BIN_DIR)/$(BACKUP) -ldflags "-X $(BACKUP_VERSION_STR)" - $(GO_BUILD) -tags '$(RESTORE)' -o $(BIN_DIR)/$(RESTORE) -ldflags "-X $(RESTORE_VERSION_STR)" - $(GO_BUILD) -tags '$(HELPER)' -o $(BIN_DIR)/$(HELPER) -ldflags "-X $(HELPER_VERSION_STR)" +build : $(GOSQLITE) + CGO_ENABLED=1 $(GO_BUILD) -tags '$(BACKUP)' -o $(BIN_DIR)/$(BACKUP) --ldflags '-X $(BACKUP_VERSION_STR)' + CGO_ENABLED=1 $(GO_BUILD) -tags '$(RESTORE)' -o $(BIN_DIR)/$(RESTORE) --ldflags '-X $(RESTORE_VERSION_STR)' + CGO_ENABLED=1 $(GO_BUILD) -tags '$(HELPER)' -o $(BIN_DIR)/$(HELPER) --ldflags '-X $(HELPER_VERSION_STR)' debug : - $(GO_BUILD) -tags '$(BACKUP)' -o $(BIN_DIR)/$(BACKUP) -ldflags "-X $(BACKUP_VERSION_STR)" $(DEBUG) - $(GO_BUILD) -tags '$(RESTORE)' -o $(BIN_DIR)/$(RESTORE) -ldflags "-X $(RESTORE_VERSION_STR)" $(DEBUG) - $(GO_BUILD) -tags '$(HELPER)' -o $(BIN_DIR)/$(HELPER) -ldflags "-X $(HELPER_VERSION_STR)" $(DEBUG) + CGO_ENABLED=1 $(GO_BUILD) -tags '$(BACKUP)' -o $(BIN_DIR)/$(BACKUP) -ldflags "-X $(BACKUP_VERSION_STR)" $(DEBUG) + CGO_ENABLED=1 $(GO_BUILD) -tags '$(RESTORE)' -o $(BIN_DIR)/$(RESTORE) -ldflags "-X $(RESTORE_VERSION_STR)" $(DEBUG) + CGO_ENABLED=1 $(GO_BUILD) -tags '$(HELPER)' -o $(BIN_DIR)/$(HELPER) -ldflags "-X $(HELPER_VERSION_STR)" $(DEBUG) build_linux : env GOOS=linux GOARCH=amd64 $(GO_BUILD) -tags '$(BACKUP)' -o $(BACKUP) -ldflags "-X $(BACKUP_VERSION_STR)" @@ -88,7 +98,7 @@ install : cp $(BIN_DIR)/$(BACKUP) $(BIN_DIR)/$(RESTORE) $(GPHOME)/bin @psql -X -t -d template1 -c 'select distinct hostname from gp_segment_configuration where content != -1' > /tmp/seg_hosts 2>/dev/null; \ if [ $$? -eq 0 ]; then \ - gpscp -f /tmp/seg_hosts $(helper_path) =:$(GPHOME)/bin/$(HELPER); \ + $(COPYUTIL) -f /tmp/seg_hosts $(helper_path) =:$(GPHOME)/bin/$(HELPER); \ if [ $$? -eq 0 ]; then \ echo 'Successfully copied gpbackup_helper to $(GPHOME) on all segments'; \ else \ diff --git a/README.md b/README.md index edacb235c..f1a3cbdef 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ ## Pre-Requisites The project requires the Go Programming language version 1.11 or higher. Follow the directions [here](https://golang.org/doc/) for installation, usage and configuration instructions. +The project also has a dependency on `sqlite3`. This is installed by default on many platforms, but you must install it on your system if it is not present. ## Downloading diff --git a/arenadata/run_gpbackup_tests.bash b/arenadata/run_gpbackup_tests.bash index ccea91d1d..94c23daea 100644 --- a/arenadata/run_gpbackup_tests.bash +++ b/arenadata/run_gpbackup_tests.bash @@ -9,11 +9,11 @@ make -C gpdb_src/contrib/dummy_seclabel/ install gpdb_src/concourse/scripts/setup_gpadmin_user.bash make_cluster +wget https://golang.org/dl/go1.20.5.linux-amd64.tar.gz -O - | tar -C /opt -xz; + su - gpadmin -c " source /usr/local/greenplum-db-devel/greenplum_path.sh; source ~/gpdb_src/gpAux/gpdemo/gpdemo-env.sh; gpconfig -c shared_preload_libraries -v dummy_seclabel; gpstop -ar; -wget https://golang.org/dl/go1.17.6.linux-amd64.tar.gz; -tar -C ~/ -xzf go1.17.6.linux-amd64.tar.gz; -PATH=$PATH:~/go/bin GOPATH=~/go make depend build install test end_to_end -C go/src/github.com/greenplum-db/gpbackup/" +PATH=$PATH:/opt/go/bin:~/go/bin GOPATH=~/go make depend build install test end_to_end -C /home/gpadmin/go/src/github.com/greenplum-db/gpbackup" diff --git a/backup/backup.go b/backup/backup.go index f1f6bb5d0..1d6d2d3b1 100644 --- a/backup/backup.go +++ b/backup/backup.go @@ -382,22 +382,39 @@ func DoTeardown() { if statErr != nil { // Even if this isn't os.IsNotExist, don't try to write a report file in case of further errors return } - historyFilename := globalFPInfo.GetBackupHistoryFilePath() + historyDBName := globalFPInfo.GetBackupHistoryDatabasePath() + historyFileLegacyName := globalFPInfo.GetBackupHistoryFilePath() reportFilename := globalFPInfo.GetBackupReportFilePath() configFilename := globalFPInfo.GetConfigFilePath() time.Sleep(time.Second) // We sleep for 1 second to ensure multiple backups do not start within the same second. + // Check if legacy history file is still present, log warning if so. Only log if we're planning to use history db. + var err error + if _, err = os.Stat(historyFileLegacyName); err == nil && !MustGetFlagBool(options.NO_HISTORY) { + gplog.Warn("Legacy gpbackup_history file %s is still present. Please run 'gpbackup_manager migrate-history' to add entries from that file to the history database.", historyFileLegacyName) + } + if backupReport != nil { if !backupFailed { backupReport.BackupConfig.Status = history.BackupStatusSucceed } backupReport.ConstructBackupParamsString() backupReport.BackupConfig.SegmentCount = len(globalCluster.ContentIDs) - 1 - err := history.WriteBackupHistory(historyFilename, &backupReport.BackupConfig) - if err != nil { - gplog.Error(fmt.Sprintf("%v", err)) + + if !MustGetFlagBool(options.NO_HISTORY) { + historyDB, err := history.InitializeHistoryDatabase(historyDBName) + if err != nil { + gplog.Error(fmt.Sprintf("%v", err)) + } else { + err = history.StoreBackupHistory(historyDB, &backupReport.BackupConfig) + historyDB.Close() + if err != nil { + gplog.Error(fmt.Sprintf("%v", err)) + } + } } + history.WriteConfigFile(&backupReport.BackupConfig, configFilename) if backupReport.BackupConfig.EndTime == "" { backupReport.BackupConfig.EndTime = history.CurrentTimestamp() @@ -447,7 +464,7 @@ func DoCleanup(backupFailed bool) { // If the terminate query is sent via a connection with an active COPY command, and the COPY's pipe is cleaned up, the COPY query will hang. // This results in the DoCleanup function passed to the signal handler to never return, blocking the os.Exit call if wasTerminated { - // It is possible for the COPY command to become orphaned if an agent process is killed + // It is possible for the COPY command to become orphaned if an agent process is stopped utils.TerminateHangingCopySessions(connectionPool, globalFPInfo, fmt.Sprintf("gpbackup_%s", globalFPInfo.Timestamp)) } if backupFailed { diff --git a/backup/data.go b/backup/data.go index feb134729..19262eed2 100644 --- a/backup/data.go +++ b/backup/data.go @@ -51,7 +51,7 @@ func AddTableDataEntriesToTOC(tables []Table, rowsCopiedMaps []map[uint32]int64) } } attributes := ConstructTableAttributesList(table.ColumnDefs) - globalTOC.AddMasterDataEntry(table.Schema, table.Name, table.Oid, attributes, rowsCopied, table.PartitionLevelInfo.RootName, table.DistPolicy) + globalTOC.AddCoordinatorDataEntry(table.Schema, table.Name, table.Oid, attributes, rowsCopied, table.PartitionLevelInfo.RootName, table.DistPolicy) } } } @@ -144,7 +144,7 @@ func backupDataForAllTables(tables []Table) []map[uint32]int64 { rowsCopiedMaps := make([]map[uint32]int64, connectionPool.NumConns) /* * We break when an interrupt is received and rely on - * TerminateHangingCopySessions to kill any COPY statements + * TerminateHangingCopySessions to halt any COPY statements * in progress if they don't finish on their own. */ tasks := make(chan Table, len(tables)) @@ -337,7 +337,9 @@ func GetBackupDataSet(tables []Table) ([]Table, int64) { // the lock, the call will fail instead of block. Return the failure for handling. func LockTableNoWait(dataTable Table, connNum int) error { var lockMode string - if connectionPool.Version.AtLeast("6.21.0") { + if connectionPool.Version.AtLeast("7") { + lockMode = `IN ACCESS SHARE MODE NOWAIT COORDINATOR ONLY` + } else if connectionPool.Version.AtLeast("6.21.0") { lockMode = `IN ACCESS SHARE MODE NOWAIT MASTER ONLY` } else { lockMode = `IN ACCESS SHARE MODE NOWAIT` diff --git a/backup/data_test.go b/backup/data_test.go index 89a74037c..2743d1328 100644 --- a/backup/data_test.go +++ b/backup/data_test.go @@ -54,7 +54,7 @@ var _ = Describe("backup/data tests", func() { It("adds an entry for a regular table to the TOC", func() { tables := []backup.Table{table} backup.AddTableDataEntriesToTOC(tables, rowsCopiedMaps) - expectedDataEntries := []toc.MasterDataEntry{{Schema: "public", Name: "table", Oid: 1, AttributeString: "(a)"}} + expectedDataEntries := []toc.CoordinatorDataEntry{{Schema: "public", Name: "table", Oid: 1, AttributeString: "(a)"}} Expect(tocfile.DataEntries).To(Equal(expectedDataEntries)) }) It("does not add an entry for an external table to the TOC", func() { diff --git a/backup/incremental.go b/backup/incremental.go index 66d0765d0..9aa6ab8e1 100644 --- a/backup/incremental.go +++ b/backup/incremental.go @@ -1,10 +1,11 @@ package backup import ( + "fmt" "path" "github.com/greenplum-db/gp-common-go-libs/gplog" - "github.com/greenplum-db/gp-common-go-libs/iohelper" + "github.com/greenplum-db/gp-common-go-libs/operating" "github.com/greenplum-db/gpbackup/history" "github.com/greenplum-db/gpbackup/options" "github.com/greenplum-db/gpbackup/toc" @@ -43,13 +44,12 @@ func GetTargetBackupTimestamp() string { func GetLatestMatchingBackupTimestamp() string { latestTimestamp := "" - var contents *history.History var latestMatchingBackupHistoryEntry *history.BackupConfig - var err error - if iohelper.FileExistsAndIsReadable(globalFPInfo.GetBackupHistoryFilePath()) { - contents, _, err = history.NewHistory(globalFPInfo.GetBackupHistoryFilePath()) - gplog.FatalOnError(err) - latestMatchingBackupHistoryEntry = GetLatestMatchingBackupConfig(contents, &backupReport.BackupConfig) + + historyDBPath := globalFPInfo.GetBackupHistoryDatabasePath() + _, err := operating.System.Stat(historyDBPath) + if err == nil { + latestMatchingBackupHistoryEntry = GetLatestMatchingBackupConfig(historyDBPath, &backupReport.BackupConfig) } if latestMatchingBackupHistoryEntry == nil { @@ -62,10 +62,53 @@ func GetLatestMatchingBackupTimestamp() string { return latestTimestamp } -func GetLatestMatchingBackupConfig(history *history.History, currentBackupConfig *history.BackupConfig) *history.BackupConfig { - for _, backupConfig := range history.BackupConfigs { - if matchesIncrementalFlags(&backupConfig, currentBackupConfig) && !backupConfig.Failed() { - return &backupConfig +func GetLatestMatchingBackupConfig(historyDBPath string, currentBackupConfig *history.BackupConfig) *history.BackupConfig { + // get list of timestamps for backups that match filterable flags, most recent first, then + // iterate through them querying and checking one at a time. this is necessary due to the + // impracticality of checking the include and exclude sets directly in a query + + historyDB, _ := history.InitializeHistoryDatabase(historyDBPath) + + whereClause := fmt.Sprintf(`backup_dir = '%s' AND database_name = '%s' AND leaf_partition_data = %v + AND plugin = '%s' AND single_data_file = %v AND compressed = %v AND date_deleted = ''`, + MustGetFlagString(options.BACKUP_DIR), + currentBackupConfig.DatabaseName, + MustGetFlagBool(options.LEAF_PARTITION_DATA), + currentBackupConfig.Plugin, + MustGetFlagBool(options.SINGLE_DATA_FILE), + currentBackupConfig.Compressed) + + getBackupTimetampsQuery := fmt.Sprintf(` + SELECT timestamp + FROM backups + WHERE %s + ORDER BY timestamp DESC`, whereClause) + timestampRows, err := historyDB.Query(getBackupTimetampsQuery) + if err != nil { + gplog.Error(err.Error()) + return nil + } + defer timestampRows.Close() + + timestamps := make([]string, 0) + for timestampRows.Next() { + var timestamp string + err = timestampRows.Scan(×tamp) + if err != nil { + gplog.Error(err.Error()) + return nil + } + timestamps = append(timestamps, timestamp) + } + + for _, ts := range timestamps { + backupConfig, err := history.GetBackupConfig(ts, historyDB) + if err != nil { + gplog.Error(err.Error()) + return nil + } + if !backupConfig.Failed() && matchesIncrementalFlags(backupConfig, currentBackupConfig) { + return backupConfig } } diff --git a/backup/incremental_test.go b/backup/incremental_test.go index 2a5b1183f..76d01762d 100644 --- a/backup/incremental_test.go +++ b/backup/incremental_test.go @@ -1,6 +1,8 @@ package backup_test import ( + "os" + "github.com/greenplum-db/gp-common-go-libs/operating" "github.com/greenplum-db/gp-common-go-libs/structmatcher" "github.com/greenplum-db/gp-common-go-libs/testhelper" @@ -79,39 +81,80 @@ var _ = Describe("backup/incremental tests", func() { }) Describe("GetLatestMatchingBackupConfig", func() { - contents := history.History{BackupConfigs: []history.BackupConfig{ - {DatabaseName: "test2", Timestamp: "timestamp4", Status: history.BackupStatusFailed}, - {DatabaseName: "test1", Timestamp: "timestamp3"}, - {DatabaseName: "test2", Timestamp: "timestamp2"}, - {DatabaseName: "test1", Timestamp: "timestamp1"}, - }} - It("Should return the latest backup's timestamp with matching Dbname", func() { - currentBackupConfig := history.BackupConfig{DatabaseName: "test1"} + historyDBPath := "/tmp/hist.db" + contents := []history.BackupConfig{ + { + DatabaseName: "test2", + Timestamp: "timestamp4", + Status: history.BackupStatusFailed, + ExcludeRelations: []string{}, + ExcludeSchemas: []string{}, + IncludeRelations: []string{}, + IncludeSchemas: []string{}, + RestorePlan: []history.RestorePlanEntry{}, + }, + { + DatabaseName: "test1", + Timestamp: "timestamp3", + ExcludeRelations: []string{}, + ExcludeSchemas: []string{}, + IncludeRelations: []string{}, + IncludeSchemas: []string{}, + RestorePlan: []history.RestorePlanEntry{}}, + { + DatabaseName: "test2", + Timestamp: "timestamp2", + ExcludeRelations: []string{}, + ExcludeSchemas: []string{}, + IncludeRelations: []string{}, + IncludeSchemas: []string{}, + RestorePlan: []history.RestorePlanEntry{}, + }, + { + DatabaseName: "test1", + Timestamp: "timestamp1", + ExcludeRelations: []string{}, + ExcludeSchemas: []string{}, + IncludeRelations: []string{}, + IncludeSchemas: []string{}, + RestorePlan: []history.RestorePlanEntry{}, + }, + } + BeforeEach(func() { + os.Remove(historyDBPath) + historyDB, _ := history.InitializeHistoryDatabase(historyDBPath) + for _, backupConfig := range contents { + history.StoreBackupHistory(historyDB, &backupConfig) + } + historyDB.Close() + }) - latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(&contents, ¤tBackupConfig) + AfterEach(func() { + os.Remove(historyDBPath) + }) - structmatcher.ExpectStructsToMatch(contents.BackupConfigs[1], latestBackupHistoryEntry) + It("Should return the latest backup's timestamp with matching Dbname", func() { + currentBackupConfig := history.BackupConfig{DatabaseName: "test1"} + latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(historyDBPath, ¤tBackupConfig) + // endtime is set dynamically on storage, so force it to match + contents[1].EndTime = latestBackupHistoryEntry.EndTime + structmatcher.ExpectStructsToMatch(contents[1], latestBackupHistoryEntry) }) It("Should return the latest matching backup's timestamp that did not fail", func() { currentBackupConfig := history.BackupConfig{DatabaseName: "test2"} - - latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(&contents, ¤tBackupConfig) - - structmatcher.ExpectStructsToMatch(contents.BackupConfigs[2], latestBackupHistoryEntry) + latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(historyDBPath, ¤tBackupConfig) + contents[2].EndTime = latestBackupHistoryEntry.EndTime + structmatcher.ExpectStructsToMatch(contents[2], latestBackupHistoryEntry) }) It("should return nil with no matching Dbname", func() { currentBackupConfig := history.BackupConfig{DatabaseName: "test3"} - - latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(&contents, ¤tBackupConfig) - + latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(historyDBPath, ¤tBackupConfig) Expect(latestBackupHistoryEntry).To(BeNil()) }) It("should return nil with an empty history", func() { currentBackupConfig := history.BackupConfig{} - - latestBackupHistoryEntry := backup. - GetLatestMatchingBackupConfig(&history.History{BackupConfigs: []history.BackupConfig{}}, ¤tBackupConfig) - + os.Remove(historyDBPath) + latestBackupHistoryEntry := backup.GetLatestMatchingBackupConfig(historyDBPath, ¤tBackupConfig) Expect(latestBackupHistoryEntry).To(BeNil()) }) }) diff --git a/backup/metadata_globals.go b/backup/metadata_globals.go index 025c7329d..4f1f50bf2 100644 --- a/backup/metadata_globals.go +++ b/backup/metadata_globals.go @@ -12,7 +12,7 @@ import ( /* * This file contains structs and functions related to backing up global cluster - * metadata on the master that needs to be restored before data is restored, + * metadata on the coordinator that needs to be restored before data is restored, * such as roles and database configuration. */ @@ -106,14 +106,25 @@ func PrintResetResourceGroupStatements(metadataFile *utils.FileWithByteCount, to * failing case is that default_group has memory_limit=100 and admin_group * has memory_limit=0, but this should not happen in real world. */ - defSettings := []struct { + + type DefSetting struct { name string setting string - }{ - {"admin_group", "SET CPU_RATE_LIMIT 1"}, - {"admin_group", "SET MEMORY_LIMIT 1"}, - {"default_group", "SET CPU_RATE_LIMIT 1"}, - {"default_group", "SET MEMORY_LIMIT 1"}, + } + defSettings := make([]DefSetting, 0) + + if connectionPool.Version.Before("7") { + defSettings = append(defSettings, DefSetting{"admin_group", "SET CPU_RATE_LIMIT 1"}) + defSettings = append(defSettings, DefSetting{"admin_group", "SET MEMORY_LIMIT 1"}) + defSettings = append(defSettings, DefSetting{"default_group", "SET CPU_RATE_LIMIT 1"}) + defSettings = append(defSettings, DefSetting{"default_group", "SET MEMORY_LIMIT 1"}) + } else { // GPDB7+ + defSettings = append(defSettings, DefSetting{"admin_group", "SET CPU_HARD_QUOTA_LIMIT 1"}) + defSettings = append(defSettings, DefSetting{"admin_group", "SET CPU_SOFT_PRIORITY 100"}) + defSettings = append(defSettings, DefSetting{"default_group", "SET CPU_HARD_QUOTA_LIMIT 1"}) + defSettings = append(defSettings, DefSetting{"default_group", "SET CPU_SOFT_PRIORITY 100"}) + defSettings = append(defSettings, DefSetting{"system_group", "SET CPU_HARD_QUOTA_LIMIT 1"}) + defSettings = append(defSettings, DefSetting{"system_group", "SET CPU_SOFT_PRIORITY 100"}) } for _, prepare := range defSettings { @@ -125,7 +136,64 @@ func PrintResetResourceGroupStatements(metadataFile *utils.FileWithByteCount, to } } -func PrintCreateResourceGroupStatements(metadataFile *utils.FileWithByteCount, toc *toc.TOC, resGroups []ResourceGroup, resGroupMetadata MetadataMap) { +func PrintCreateResourceGroupStatementsAtLeast7(metadataFile *utils.FileWithByteCount, toc *toc.TOC, resGroups []ResourceGroupAtLeast7, resGroupMetadata MetadataMap) { + for _, resGroup := range resGroups { + var start uint64 + section, entry := resGroup.GetMetadataEntry() + if resGroup.Name == "default_group" || resGroup.Name == "admin_group" || resGroup.Name == "system_group" { + resGroupList := []struct { + setting string + value string + }{ + {"CPU_SOFT_PRIORITY", resGroup.CpuSoftPriority}, + {"CONCURRENCY", resGroup.Concurrency}, + } + for _, property := range resGroupList { + start = metadataFile.ByteCount + metadataFile.MustPrintf("\n\nALTER RESOURCE GROUP %s SET %s %s;", resGroup.Name, property.setting, property.value) + + toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) + } + + /* special handling for cpu properties */ + // TODO -- why do we handle these separately? + // TODO -- is this still necessary for 7? + start = metadataFile.ByteCount + if !strings.HasPrefix(resGroup.CpuHardQuotaLimit, "-") { + /* cpu rate mode */ + metadataFile.MustPrintf("\n\nALTER RESOURCE GROUP %s SET CPU_HARD_QUOTA_LIMIT %s;", resGroup.Name, resGroup.CpuHardQuotaLimit) + } else { + /* cpuset mode */ + metadataFile.MustPrintf("\n\nALTER RESOURCE GROUP %s SET CPUSET '%s';", resGroup.Name, resGroup.Cpuset) + } + + toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) + PrintObjectMetadata(metadataFile, toc, resGroupMetadata[resGroup.GetUniqueID()], resGroup, "") + } else { + start = metadataFile.ByteCount + attributes := make([]string, 0) + + /* special handling for cpu properties */ + // TODO -- why do we handle these separately? + // TODO -- is this still necessary for 7? + if !strings.HasPrefix(resGroup.CpuHardQuotaLimit, "-") { + /* cpu rate mode */ + attributes = append(attributes, fmt.Sprintf("CPU_HARD_QUOTA_LIMIT=%s", resGroup.CpuHardQuotaLimit)) + } else if connectionPool.Version.AtLeast("5.9.0") { + /* cpuset mode */ + attributes = append(attributes, fmt.Sprintf("CPUSET='%s'", resGroup.Cpuset)) + } + attributes = append(attributes, fmt.Sprintf("CPU_SOFT_PRIORITY=%s", resGroup.CpuSoftPriority)) + attributes = append(attributes, fmt.Sprintf("CONCURRENCY=%s", resGroup.Concurrency)) + metadataFile.MustPrintf("\n\nCREATE RESOURCE GROUP %s WITH (%s);", resGroup.Name, strings.Join(attributes, ", ")) + + toc.AddMetadataEntry(section, entry, start, metadataFile.ByteCount) + PrintObjectMetadata(metadataFile, toc, resGroupMetadata[resGroup.GetUniqueID()], resGroup, "") + } + } +} + +func PrintCreateResourceGroupStatementsBefore7(metadataFile *utils.FileWithByteCount, toc *toc.TOC, resGroups []ResourceGroupBefore7, resGroupMetadata MetadataMap) { for _, resGroup := range resGroups { // temporarily special case for 5x resource groups #temp5xResGroup @@ -194,9 +262,9 @@ func PrintCreateResourceGroupStatements(metadataFile *utils.FileWithByteCount, t * - "0": vmtracker (default) */ if resGroup.MemoryAuditor == "1" { - attributes = append(attributes, fmt.Sprintf("MEMORY_AUDITOR=cgroup")) - } else if connectionPool.Version.AtLeast("5.8.0"){ - attributes = append(attributes, fmt.Sprintf("MEMORY_AUDITOR=vmtracker")) + attributes = append(attributes, "MEMORY_AUDITOR=cgroup") + } else if connectionPool.Version.AtLeast("5.8.0") { + attributes = append(attributes, "MEMORY_AUDITOR=vmtracker") } attributes = append(attributes, fmt.Sprintf("MEMORY_LIMIT=%s", resGroup.MemoryLimit)) diff --git a/backup/metadata_globals_test.go b/backup/metadata_globals_test.go index a0b746337..1c6fdd7ed 100644 --- a/backup/metadata_globals_test.go +++ b/backup/metadata_globals_test.go @@ -141,21 +141,21 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, var emptyResGroupMetadata = backup.MetadataMap{} It("prints resource groups", func() { testhelper.SetDBVersion(connectionPool, "5.9.0") - someGroup := backup.ResourceGroup{Oid: 1, Name: "some_group", CPURateLimit: "10", MemoryLimit: "20", Concurrency: "15", MemorySharedQuota: "25", MemorySpillRatio: "30"} - someGroup2 := backup.ResourceGroup{Oid: 2, Name: "some_group2", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "10"} - resGroups := []backup.ResourceGroup{someGroup, someGroup2} + someGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 1, Name: "some_group", Concurrency: "15"}, CPURateLimit: "10", MemoryLimit: "20", MemorySharedQuota: "25", MemorySpillRatio: "30"} + someGroup2 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 2, Name: "some_group2", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "10"} + resGroups := []backup.ResourceGroupBefore7{someGroup, someGroup2} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "", "some_group", "RESOURCE GROUP") testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `CREATE RESOURCE GROUP some_group WITH (CPU_RATE_LIMIT=10, MEMORY_AUDITOR=vmtracker, MEMORY_LIMIT=20, MEMORY_SHARED_QUOTA=25, MEMORY_SPILL_RATIO=30, CONCURRENCY=15);`, `CREATE RESOURCE GROUP some_group2 WITH (CPU_RATE_LIMIT=20, MEMORY_AUDITOR=vmtracker, MEMORY_LIMIT=30, MEMORY_SHARED_QUOTA=35, MEMORY_SPILL_RATIO=10, CONCURRENCY=25);`) }) It("prints ALTER statement for default_group resource group", func() { - defaultGroup := backup.ResourceGroup{Oid: 1, Name: "default_group", CPURateLimit: "10", MemoryLimit: "20", Concurrency: "15", MemorySharedQuota: "25", MemorySpillRatio: "30"} - resGroups := []backup.ResourceGroup{defaultGroup} + defaultGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 1, Name: "default_group", Concurrency: "15"}, CPURateLimit: "10", MemoryLimit: "20", MemorySharedQuota: "25", MemorySpillRatio: "30"} + resGroups := []backup.ResourceGroupBefore7{defaultGroup} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "", "default_group", "RESOURCE GROUP") testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `ALTER RESOURCE GROUP default_group SET MEMORY_LIMIT 20;`, @@ -166,12 +166,12 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, }) It("prints memory_auditor resource groups", func() { testhelper.SetDBVersion(connectionPool, "5.8.0") - someGroup := backup.ResourceGroup{Oid: 1, Name: "some_group", CPURateLimit: "10", MemoryLimit: "20", Concurrency: "15", MemorySharedQuota: "25", MemorySpillRatio: "30"} - someGroup2 := backup.ResourceGroup{Oid: 2, Name: "some_group2", CPURateLimit: "10", MemoryLimit: "30", Concurrency: "0", MemorySharedQuota: "35", MemorySpillRatio: "10", MemoryAuditor: "1"} - someGroup3 := backup.ResourceGroup{Oid: 3, Name: "some_group3", CPURateLimit: "10", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "10", MemoryAuditor: "0"} - resGroups := []backup.ResourceGroup{someGroup, someGroup2, someGroup3} + someGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 1, Name: "some_group", Concurrency: "15"}, CPURateLimit: "10", MemoryLimit: "20", MemorySharedQuota: "25", MemorySpillRatio: "30"} + someGroup2 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 2, Name: "some_group2", Concurrency: "0"}, CPURateLimit: "10", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "10", MemoryAuditor: "1"} + someGroup3 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 3, Name: "some_group3", Concurrency: "25"}, CPURateLimit: "10", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "10", MemoryAuditor: "0"} + resGroups := []backup.ResourceGroupBefore7{someGroup, someGroup2, someGroup3} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "", "some_group", "RESOURCE GROUP") testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `CREATE RESOURCE GROUP some_group WITH (CPU_RATE_LIMIT=10, MEMORY_AUDITOR=vmtracker, MEMORY_LIMIT=20, MEMORY_SHARED_QUOTA=25, MEMORY_SPILL_RATIO=30, CONCURRENCY=15);`, @@ -180,11 +180,11 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, }) It("prints cpuset resource groups", func() { testhelper.SetDBVersion(connectionPool, "5.9.0") - someGroup := backup.ResourceGroup{Oid: 1, Name: "some_group", CPURateLimit: "10", MemoryLimit: "20", Concurrency: "15", MemorySharedQuota: "25", MemorySpillRatio: "30"} - someGroup2 := backup.ResourceGroup{Oid: 2, Name: "some_group2", CPURateLimit: "-1", Cpuset: "0-3", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "10"} - resGroups := []backup.ResourceGroup{someGroup, someGroup2} + someGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 1, Name: "some_group", Concurrency: "15"}, CPURateLimit: "10", MemoryLimit: "20", MemorySharedQuota: "25", MemorySpillRatio: "30"} + someGroup2 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 2, Name: "some_group2", Concurrency: "25", Cpuset: "0-3"}, CPURateLimit: "-1", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "10"} + resGroups := []backup.ResourceGroupBefore7{someGroup, someGroup2} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "", "some_group", "RESOURCE GROUP") testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `CREATE RESOURCE GROUP some_group WITH (CPU_RATE_LIMIT=10, MEMORY_AUDITOR=vmtracker, MEMORY_LIMIT=20, MEMORY_SHARED_QUOTA=25, MEMORY_SPILL_RATIO=30, CONCURRENCY=15);`, @@ -194,13 +194,13 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, It("prints memory_spill_ratio resource groups in new syntax", func() { testhelper.SetDBVersion(connectionPool, "5.2.0") - defaultGroup := backup.ResourceGroup{Oid: 1, Name: "default_group", CPURateLimit: "10", MemoryLimit: "20", Concurrency: "15", MemorySharedQuota: "25", MemorySpillRatio: "30 MB"} - adminGroup := backup.ResourceGroup{Oid: 2, Name: "admin_group", CPURateLimit: "10", MemoryLimit: "20", Concurrency: "15", MemorySharedQuota: "25", MemorySpillRatio: "30"} - someGroup := backup.ResourceGroup{Oid: 3, Name: "some_group", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40 MB"} - someGroup2 := backup.ResourceGroup{Oid: 4, Name: "some_group2", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40"} - resGroups := []backup.ResourceGroup{defaultGroup, adminGroup, someGroup, someGroup2} + defaultGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 1, Name: "default_group", Concurrency: "15"}, CPURateLimit: "10", MemoryLimit: "20", MemorySharedQuota: "25", MemorySpillRatio: "30 MB"} + adminGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 2, Name: "admin_group", Concurrency: "15"}, CPURateLimit: "10", MemoryLimit: "20", MemorySharedQuota: "25", MemorySpillRatio: "30"} + someGroup := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 3, Name: "some_group", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40 MB"} + someGroup2 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 4, Name: "some_group2", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40"} + resGroups := []backup.ResourceGroupBefore7{defaultGroup, adminGroup, someGroup, someGroup2} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "", "default_group", "RESOURCE GROUP") testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `ALTER RESOURCE GROUP default_group SET MEMORY_LIMIT 20;`, @@ -219,11 +219,11 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, It("prints correct CREATE RESOURCE GROUP syntax for old resource groups on GPDB 5.8", func() { // Memory Auditor reslimittype was added in GPDB 5.8. Make sure the older resource group object will have the proper default. testhelper.SetDBVersion(connectionPool, "5.8.0") - resGroup52 := backup.ResourceGroup{Oid: 3, Name: "resGroup52", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40"} - resGroup58 := backup.ResourceGroup{Oid: 4, Name: "resGroup58", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40", MemoryAuditor: "1"} - resGroups := []backup.ResourceGroup{resGroup52, resGroup58} + resGroup52 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 3, Name: "resGroup52", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40"} + resGroup58 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 4, Name: "resGroup58", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40", MemoryAuditor: "1"} + resGroups := []backup.ResourceGroupBefore7{resGroup52, resGroup58} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `CREATE RESOURCE GROUP resGroup52 WITH (CPU_RATE_LIMIT=20, MEMORY_AUDITOR=vmtracker, MEMORY_LIMIT=30, MEMORY_SHARED_QUOTA=35, MEMORY_SPILL_RATIO=40, CONCURRENCY=25);`, `CREATE RESOURCE GROUP resGroup58 WITH (CPU_RATE_LIMIT=20, MEMORY_AUDITOR=cgroup, MEMORY_LIMIT=30, MEMORY_SHARED_QUOTA=35, MEMORY_SPILL_RATIO=40, CONCURRENCY=25);`) @@ -232,12 +232,12 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, // Cpuset reslimittype was added in GPDB 5.9. Make sure the older resource group objects // will have the proper default. In this case, you either have cpu_rate_limit or cpuset. testhelper.SetDBVersion(connectionPool, "5.9.0") - resGroup52 := backup.ResourceGroup{Oid: 3, Name: "resGroup52", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40"} - resGroup58 := backup.ResourceGroup{Oid: 4, Name: "resGroup58", CPURateLimit: "20", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40", MemoryAuditor: "1"} - resGroup59 := backup.ResourceGroup{Oid: 5, Name: "resGroup59", CPURateLimit: "-1", MemoryLimit: "30", Concurrency: "25", MemorySharedQuota: "35", MemorySpillRatio: "40", MemoryAuditor: "1", Cpuset: "1"} - resGroups := []backup.ResourceGroup{resGroup52, resGroup58, resGroup59} + resGroup52 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 3, Name: "resGroup52", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40"} + resGroup58 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 4, Name: "resGroup58", Concurrency: "25"}, CPURateLimit: "20", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40", MemoryAuditor: "1"} + resGroup59 := backup.ResourceGroupBefore7{ResourceGroup: backup.ResourceGroup{Oid: 5, Name: "resGroup59", Concurrency: "25", Cpuset: "1"}, CPURateLimit: "-1", MemoryLimit: "30", MemorySharedQuota: "35", MemorySpillRatio: "40", MemoryAuditor: "1"} + resGroups := []backup.ResourceGroupBefore7{resGroup52, resGroup58, resGroup59} - backup.PrintCreateResourceGroupStatements(backupfile, tocfile, resGroups, emptyResGroupMetadata) + backup.PrintCreateResourceGroupStatementsBefore7(backupfile, tocfile, resGroups, emptyResGroupMetadata) testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, `CREATE RESOURCE GROUP resGroup52 WITH (CPU_RATE_LIMIT=20, MEMORY_AUDITOR=vmtracker, MEMORY_LIMIT=30, MEMORY_SHARED_QUOTA=35, MEMORY_SPILL_RATIO=40, CONCURRENCY=25);`, `CREATE RESOURCE GROUP resGroup58 WITH (CPU_RATE_LIMIT=20, MEMORY_AUDITOR=cgroup, MEMORY_LIMIT=30, MEMORY_SHARED_QUOTA=35, MEMORY_SPILL_RATIO=40, CONCURRENCY=25);`, @@ -248,11 +248,22 @@ GRANT TEMPORARY,CONNECT ON DATABASE testdb TO testrole;`, It("prints prepare resource groups", func() { backup.PrintResetResourceGroupStatements(backupfile, tocfile) testutils.ExpectEntry(tocfile.GlobalEntries, 0, "", "", "admin_group", "RESOURCE GROUP") - testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, - `ALTER RESOURCE GROUP admin_group SET CPU_RATE_LIMIT 1;`, - `ALTER RESOURCE GROUP admin_group SET MEMORY_LIMIT 1;`, - `ALTER RESOURCE GROUP default_group SET CPU_RATE_LIMIT 1;`, - `ALTER RESOURCE GROUP default_group SET MEMORY_LIMIT 1;`) + if connectionPool.Version.Before("7") { + testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, + `ALTER RESOURCE GROUP admin_group SET CPU_RATE_LIMIT 1;`, + `ALTER RESOURCE GROUP admin_group SET MEMORY_LIMIT 1;`, + `ALTER RESOURCE GROUP default_group SET CPU_RATE_LIMIT 1;`, + `ALTER RESOURCE GROUP default_group SET MEMORY_LIMIT 1;`) + } else { // GPDB7+ + testutils.AssertBufferContents(tocfile.GlobalEntries, buffer, + `ALTER RESOURCE GROUP admin_group SET CPU_HARD_QUOTA_LIMIT 1;`, + `ALTER RESOURCE GROUP admin_group SET CPU_SOFT_PRIORITY 100;`, + `ALTER RESOURCE GROUP default_group SET CPU_HARD_QUOTA_LIMIT 1;`, + `ALTER RESOURCE GROUP default_group SET CPU_SOFT_PRIORITY 100;`, + `ALTER RESOURCE GROUP system_group SET CPU_HARD_QUOTA_LIMIT 1;`, + `ALTER RESOURCE GROUP system_group SET CPU_SOFT_PRIORITY 100;`) + + } }) }) Describe("PrintCreateRoleStatements", func() { diff --git a/backup/postdata.go b/backup/postdata.go index f30bd8661..4a9226810 100644 --- a/backup/postdata.go +++ b/backup/postdata.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up "post-data" metadata - * on the master, which is any metadata that needs to be restored after data is + * on the coordinator, which is any metadata that needs to be restored after data is * restored, such as indexes and rules. */ diff --git a/backup/predata_externals.go b/backup/predata_externals.go index b9182aada..5bf05d0dc 100644 --- a/backup/predata_externals.go +++ b/backup/predata_externals.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up metadata on the - * master for objects that connect to external data (external tables and external + * coordinator for objects that connect to external data (external tables and external * protocols). */ @@ -289,8 +289,8 @@ func PrintExternalTableStatements(metadataFile *utils.FileWithByteCount, tableNa } } if extTableDef.Type == READABLE || (extTableDef.Type == WRITABLE_WEB && extTableDef.Protocol == S3) { - if extTableDef.ExecLocation == "MASTER_ONLY" { - metadataFile.MustPrintf(" ON MASTER") + if extTableDef.ExecLocation == "COORDINATOR_ONLY" { + metadataFile.MustPrintf(" ON COORDINATOR") } else if extTableDef.ExecLocation == "COORDINATOR_ONLY" { metadataFile.MustPrintf(" ON COORDINATOR") } diff --git a/backup/predata_externals_test.go b/backup/predata_externals_test.go index 8bbef4197..9dc0f19fa 100644 --- a/backup/predata_externals_test.go +++ b/backup/predata_externals_test.go @@ -172,10 +172,10 @@ DISTRIBUTED RANDOMLY;`) FORMAT 'TEXT' ENCODING 'UTF-8'`) }) - It("prints a CREATE block for a table with EXECUTE ON MASTER", func() { - extTableDef.ExecLocation = "MASTER_ONLY" + It("prints a CREATE block for a table with EXECUTE ON COORDINATOR", func() { + extTableDef.ExecLocation = "COORDINATOR_ONLY" backup.PrintExternalTableStatements(backupfile, tableName, extTableDef) - testhelper.ExpectRegexp(buffer, `EXECUTE 'hostname' ON MASTER + testhelper.ExpectRegexp(buffer, `EXECUTE 'hostname' ON COORDINATOR FORMAT 'TEXT' ENCODING 'UTF-8'`) }) @@ -231,15 +231,15 @@ ENCODING 'UTF-8'`) extTableDef.URIs = []string{"file://host:port/path/file"} }) - It("prints a CREATE block for an S3 table with ON MASTER", func() { + It("prints a CREATE block for an S3 table with ON COORDINATOR", func() { extTableDef.Protocol = backup.S3 extTableDef.Location = sql.NullString{String: "s3://s3_endpoint:port/bucket_name/s3_prefix", Valid: true} extTableDef.URIs = []string{"s3://s3_endpoint:port/bucket_name/s3_prefix"} - extTableDef.ExecLocation = "MASTER_ONLY" + extTableDef.ExecLocation = "COORDINATOR_ONLY" backup.PrintExternalTableStatements(backupfile, tableName, extTableDef) testhelper.ExpectRegexp(buffer, `LOCATION ( 's3://s3_endpoint:port/bucket_name/s3_prefix' -) ON MASTER +) ON COORDINATOR FORMAT 'TEXT' ENCODING 'UTF-8'`) }) diff --git a/backup/predata_operators.go b/backup/predata_operators.go index e32bbeb4b..1a1c552d9 100644 --- a/backup/predata_operators.go +++ b/backup/predata_operators.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up metadata on the - * master for objects that don't fall under any other predata categorization, + * coordinator for objects that don't fall under any other predata categorization, * such as procedural languages and constraints, that needs to be restored * before data is restored. */ diff --git a/backup/predata_relations.go b/backup/predata_relations.go index 1456ed3e0..75beadb87 100644 --- a/backup/predata_relations.go +++ b/backup/predata_relations.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up relation - * (sequence, table, and view) metadata on the master. + * (sequence, table, and view) metadata on the coordinator. */ import ( diff --git a/backup/predata_shared.go b/backup/predata_shared.go index 7a5e6fe28..951b1829a 100644 --- a/backup/predata_shared.go +++ b/backup/predata_shared.go @@ -3,7 +3,7 @@ package backup /* * This file contains structs and functions related to backing up metadata shared * among many or all object types (privileges, owners, and comments) on the - * master that needs to be restored before data is restored. + * coordinator that needs to be restored before data is restored. */ import ( diff --git a/backup/predata_textsearch.go b/backup/predata_textsearch.go index 91d570741..1fd25010a 100644 --- a/backup/predata_textsearch.go +++ b/backup/predata_textsearch.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up metadata on the - * master for objects relating to built-in text search that needs to be restored + * coordinator for objects relating to built-in text search that needs to be restored * before data is restored. * * Text search is not supported in GPDB 4.3, so none of these structs or functions diff --git a/backup/predata_types.go b/backup/predata_types.go index 5b9249ceb..292bc0601 100644 --- a/backup/predata_types.go +++ b/backup/predata_types.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up type - * metadata on the master that needs to be restored before data is restored. + * metadata on the coordinator that needs to be restored before data is restored. */ import ( diff --git a/backup/queries_externals.go b/backup/queries_externals.go index 466fc6951..aa19ee63e 100644 --- a/backup/queries_externals.go +++ b/backup/queries_externals.go @@ -76,7 +76,7 @@ func GetExternalTableDefinitions(connectionPool *dbconn.DBConn) map[uint32]Exter // Cannot use unnest() in CASE statements anymore in GPDB 7+ so convert // it to a LEFT JOIN LATERAL. We do not use LEFT JOIN LATERAL for GPDB 6 // because the CASE unnest() logic is more performant. - version7Query := ` + atLeast7Query := ` SELECT e.reloid AS oid, ljl_unnest AS location, array_to_string(e.execlocation, ',') AS execlocation, @@ -100,8 +100,8 @@ func GetExternalTableDefinitions(connectionPool *dbconn.DBConn) map[uint32]Exter query = version5Query } else if connectionPool.Version.Is("6") { query = version6Query - } else if connectionPool.Version.Is("7") { - query = version7Query + } else if connectionPool.Version.AtLeast("7") { + query = atLeast7Query } results := make([]ExternalTableDefinition, 0) diff --git a/backup/queries_functions.go b/backup/queries_functions.go index 76a420700..d2dbd6f73 100644 --- a/backup/queries_functions.go +++ b/backup/queries_functions.go @@ -103,9 +103,7 @@ func GetFunctionsAllVersions(connectionPool *dbconn.DBConn) []Function { func GetFunctions(connectionPool *dbconn.DBConn) []Function { excludeImplicitFunctionsClause := "" - locationAtts := "'a' AS proexeclocation," if connectionPool.Version.AtLeast("6") { - locationAtts = "proiswindow,proexeclocation,proleakproof," // This excludes implicitly created functions. Currently this is only range type functions excludeImplicitFunctionsClause = ` AND NOT EXISTS ( @@ -113,13 +111,20 @@ func GetFunctions(connectionPool *dbconn.DBConn) []Function { WHERE classid = 'pg_proc'::regclass::oid AND objid = p.oid AND deptype = 'i')` } - var query string - if connectionPool.Version.AtLeast("7") { + + locationAtts := "" + if connectionPool.Version.Before("6") { + locationAtts = "'a' AS proexeclocation," + } else if connectionPool.Version.Is("6") { + locationAtts = "proiswindow,proexeclocation,proleakproof," + } else { locationAtts = "proexeclocation,proleakproof,proparallel," - query = fmt.Sprintf(` + } + + before7Query := fmt.Sprintf(` SELECT p.oid, quote_ident(nspname) AS schema, - quote_ident(p.proname) AS name, + quote_ident(proname) AS name, proretset, coalesce(prosrc, '') AS functionbody, coalesce(probin, '') AS binarypath, @@ -135,32 +140,22 @@ func GetFunctions(connectionPool *dbconn.DBConn) []Function { procost, prorows, prodataaccess, - prokind, - prosupport, - l.lanname AS language, - coalesce(array_to_string(ARRAY(SELECT 'FOR TYPE ' || nm.nspname || '.' || typ.typname - from - unnest(p.protrftypes) as trf_unnest - left join pg_type typ - on trf_unnest = typ.oid - left join pg_namespace nm - on typ.typnamespace = nm.oid - ), ', '), '') AS transformtypes + l.lanname AS language FROM pg_proc p JOIN pg_catalog.pg_language l ON p.prolang = l.oid LEFT JOIN pg_namespace n ON p.pronamespace = n.oid WHERE %s - AND prokind <> 'a' + AND proisagg = 'f' AND %s%s ORDER BY nspname, proname, identargs`, locationAtts, - SchemaFilterClause("n"), - ExtensionFilterClause("p"), - excludeImplicitFunctionsClause) - } else { - query = fmt.Sprintf(` + SchemaFilterClause("n"), + ExtensionFilterClause("p"), + excludeImplicitFunctionsClause) + + atLeast7Query := fmt.Sprintf(` SELECT p.oid, quote_ident(nspname) AS schema, - quote_ident(proname) AS name, + quote_ident(p.proname) AS name, proretset, coalesce(prosrc, '') AS functionbody, coalesce(probin, '') AS binarypath, @@ -176,17 +171,33 @@ func GetFunctions(connectionPool *dbconn.DBConn) []Function { procost, prorows, prodataaccess, - l.lanname AS language + prokind, + prosupport, + l.lanname AS language, + coalesce(array_to_string(ARRAY(SELECT 'FOR TYPE ' || nm.nspname || '.' || typ.typname + from + unnest(p.protrftypes) as trf_unnest + left join pg_type typ + on trf_unnest = typ.oid + left join pg_namespace nm + on typ.typnamespace = nm.oid + ), ', '), '') AS transformtypes FROM pg_proc p JOIN pg_catalog.pg_language l ON p.prolang = l.oid LEFT JOIN pg_namespace n ON p.pronamespace = n.oid WHERE %s - AND proisagg = 'f' + AND prokind <> 'a' AND %s%s ORDER BY nspname, proname, identargs`, locationAtts, - SchemaFilterClause("n"), - ExtensionFilterClause("p"), - excludeImplicitFunctionsClause) + SchemaFilterClause("n"), + ExtensionFilterClause("p"), + excludeImplicitFunctionsClause) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query + } else { + query = atLeast7Query } results := make([]Function, 0) @@ -542,7 +553,7 @@ func GetAggregates(connectionPool *dbconn.DBConn) []Aggregate { AND %s`, SchemaFilterClause("n"), ExtensionFilterClause("p")) - version7Query := fmt.Sprintf(` + atLeast7Query := fmt.Sprintf(` SELECT p.oid, quote_ident(n.nspname) AS schema, p.proname AS name, @@ -590,7 +601,7 @@ func GetAggregates(connectionPool *dbconn.DBConn) []Aggregate { } else if connectionPool.Version.Is("6") { query = version6query } else { - query = version7Query + query = atLeast7Query } err := connectionPool.Select(&aggregates, query) gplog.FatalOnError(err) @@ -656,13 +667,13 @@ func (info FunctionInfo) GetMetadataEntry() (string, toc.MetadataEntry) { } func GetFunctionOidToInfoMap(connectionPool *dbconn.DBConn) map[uint32]FunctionInfo { - version4query := ` + before5Query := ` SELECT p.oid, quote_ident(n.nspname) AS schema, quote_ident(p.proname) AS name FROM pg_proc p LEFT JOIN pg_namespace n ON p.pronamespace = n.oid` - query := ` + atLeast5Query := ` SELECT p.oid, quote_ident(n.nspname) AS schema, quote_ident(p.proname) AS name, @@ -675,7 +686,7 @@ func GetFunctionOidToInfoMap(connectionPool *dbconn.DBConn) map[uint32]FunctionI funcMap := make(map[uint32]FunctionInfo) var err error if connectionPool.Version.Before("5") { - err = connectionPool.Select(&results, version4query) + err = connectionPool.Select(&results, before5Query) arguments, _ := GetFunctionArgsAndIdentArgs(connectionPool) for i := range results { results[i].Arguments.String = arguments[results[i].Oid] @@ -684,7 +695,7 @@ func GetFunctionOidToInfoMap(connectionPool *dbconn.DBConn) map[uint32]FunctionI results[i].IdentArgs.Valid = true // Hardcode for GPDB 4.3 to fit sql.NullString } } else { - err = connectionPool.Select(&results, query) + err = connectionPool.Select(&results, atLeast5Query) } gplog.FatalOnError(err) for _, funcInfo := range results { @@ -867,7 +878,7 @@ func (pl ProceduralLanguage) FQN() string { func GetProceduralLanguages(connectionPool *dbconn.DBConn) []ProceduralLanguage { results := make([]ProceduralLanguage, 0) // Languages are owned by the bootstrap superuser, OID 10 - version4query := ` + before5Query := ` SELECT oid, quote_ident(l.lanname) AS name, pg_get_userbyid(10) AS owner, @@ -879,7 +890,7 @@ func GetProceduralLanguages(connectionPool *dbconn.DBConn) []ProceduralLanguage FROM pg_language l WHERE l.lanispl='t' AND l.lanname != 'plpgsql'` - query := fmt.Sprintf(` + atLeast5Query := fmt.Sprintf(` SELECT oid, quote_ident(l.lanname) AS name, pg_get_userbyid(l.lanowner) AS owner, @@ -892,12 +903,15 @@ func GetProceduralLanguages(connectionPool *dbconn.DBConn) []ProceduralLanguage WHERE l.lanispl='t' AND l.lanname != 'plpgsql' AND %s`, ExtensionFilterClause("l")) - var err error + + query := "" if connectionPool.Version.Before("5") { - err = connectionPool.Select(&results, version4query) + query = before5Query } else { - err = connectionPool.Select(&results, query) + query = atLeast5Query } + + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) return results } @@ -933,7 +947,7 @@ func (trf Transform) FQN() string { func GetTransforms(connectionPool *dbconn.DBConn) []Transform { results := make([]Transform, 0) - query := fmt.Sprintf(` + query := ` SELECT trf.oid, quote_ident(ns.nspname) AS typnamespace, quote_ident(tp.typname) AS typname, @@ -943,7 +957,7 @@ func GetTransforms(connectionPool *dbconn.DBConn) []Transform { FROM pg_transform trf JOIN pg_type tp ON trf.trftype=tp.oid JOIN pg_namespace ns ON tp.typnamespace = ns.oid - JOIN pg_language l ON trf.trflang=l.oid;`) + JOIN pg_language l ON trf.trflang=l.oid;` err := connectionPool.Select(&results, query) gplog.FatalOnError(err) @@ -1177,7 +1191,7 @@ func (se StatisticExt) FQN() string { func GetExtendedStatistics(connectionPool *dbconn.DBConn) []StatisticExt { results := make([]StatisticExt, 0) - query := fmt.Sprintf(` + query := ` SELECT se.oid, stxname AS name, regexp_replace(pg_catalog.pg_get_statisticsobjdef(se.oid), '(.* FROM ).*', '\1' || quote_ident(c.relnamespace::regnamespace::text) || '.' || quote_ident(c.relname)) AS definition, @@ -1186,7 +1200,7 @@ func GetExtendedStatistics(connectionPool *dbconn.DBConn) []StatisticExt { quote_ident(c.relnamespace::regnamespace::text) AS tableschema, quote_ident(c.relname) AS tablename FROM pg_catalog.pg_statistic_ext se - JOIN pg_catalog.pg_class c ON se.stxrelid = c.oid;`) + JOIN pg_catalog.pg_class c ON se.stxrelid = c.oid;` err := connectionPool.Select(&results, query) gplog.FatalOnError(err) return results diff --git a/backup/queries_globals.go b/backup/queries_globals.go index f33e0a28f..3ed13d7eb 100644 --- a/backup/queries_globals.go +++ b/backup/queries_globals.go @@ -183,15 +183,10 @@ func GetResourceQueues(connectionPool *dbconn.DBConn) []ResourceQueue { } type ResourceGroup struct { - Oid uint32 - Name string - Concurrency string - CPURateLimit string - MemoryLimit string - MemorySharedQuota string - MemorySpillRatio string - MemoryAuditor string - Cpuset string + Oid uint32 `db:"oid"` + Name string `db:"name"` + Concurrency string `db:"concurrency"` + Cpuset string `db:"cpuset"` } func (rg ResourceGroup) GetMetadataEntry() (string, toc.MetadataEntry) { @@ -214,11 +209,29 @@ func (rg ResourceGroup) FQN() string { return rg.Name } -func GetResourceGroups(connectionPool *dbconn.DBConn) []ResourceGroup { - selectClause := "" - // This is when pg_dumpall was changed to use the actual values - if connectionPool.Version.AtLeast("5.2.0") { - selectClause += ` +type ResourceGroupBefore7 struct { + ResourceGroup // embedded common rg fields+methods + CPURateLimit string `db:"cpuratelimit"` + MemoryLimit string `db:"memorylimit"` + MemorySharedQuota string `db:"memorysharedquota"` + MemorySpillRatio string `db:"memoryspillratio"` + MemoryAuditor string `db:"memoryauditor"` +} + +type ResourceGroupAtLeast7 struct { + ResourceGroup // embedded common rg fields+methods + CpuHardQuotaLimit string `db:"cpu_hard_quota_limit"` + CpuSoftPriority string `db:"cpu_soft_priority"` +} + +func GetResourceGroups[T ResourceGroupBefore7 | ResourceGroupAtLeast7](connectionPool *dbconn.DBConn) []T { + var query string + + if connectionPool.Version.Before("7") { + before7SelectClause := "" + // This is when pg_dumpall was changed to use the actual values + if connectionPool.Version.AtLeast("5.2.0") { + before7SelectClause += ` SELECT g.oid, quote_ident(g.rsgname) AS name, t1.value AS concurrency, @@ -226,8 +239,8 @@ func GetResourceGroups(connectionPool *dbconn.DBConn) []ResourceGroup { t3.value AS memorylimit, t4.value AS memorysharedquota, t5.value AS memoryspillratio` - } else { // GPDB 5.0.0 and 5.1.0 - selectClause += ` + } else { // GPDB 5.0.0 and 5.1.0 + before7SelectClause += ` SELECT g.oid, quote_ident(g.rsgname) AS name, t1.proposed AS concurrency, @@ -235,50 +248,54 @@ func GetResourceGroups(connectionPool *dbconn.DBConn) []ResourceGroup { t3.proposed AS memorylimit, t4.proposed AS memorysharedquota, t5.proposed AS memoryspillratio` - } - - fromClause := ` - FROM pg_resgroup g - JOIN pg_resgroupcapability t1 ON t1.resgroupid = g.oid - JOIN pg_resgroupcapability t2 ON t2.resgroupid = g.oid - JOIN pg_resgroupcapability t3 ON t3.resgroupid = g.oid - JOIN pg_resgroupcapability t4 ON t4.resgroupid = g.oid - JOIN pg_resgroupcapability t5 ON t5.resgroupid = g.oid` - - whereClause := ` - WHERE t1.reslimittype = 1 AND - t2.reslimittype = 2 AND - t3.reslimittype = 3 AND - t4.reslimittype = 4 AND - t5.reslimittype = 5` - - // The reslimittype 6 (memoryauditor) was introduced in GPDB - // 5.8.0. Default the value to '0' (vmtracker) since there could - // be a resource group created before 5.8.0 which will not have - // this memoryauditor field defined. - if connectionPool.Version.AtLeast("5.8.0") { - selectClause += `, - coalesce(t6.value, '0') AS memoryauditor` - - fromClause += ` - LEFT JOIN pg_resgroupcapability t6 ON t6.resgroupid = g.oid AND t6.reslimittype = 6` - } + } - // The reslimittype 7 (cpuset) was introduced in GPDB - // 5.9.0. Default the value to '-1' since there could be a - // resource group created before 5.9.0 which will not have this - // cpuset field defined. - if connectionPool.Version.AtLeast("5.9.0") { - selectClause += `, - coalesce(t7.value, '-1') AS cpuset` + before7FromClause := ` + FROM pg_resgroup g + JOIN pg_resgroupcapability t1 ON t1.resgroupid = g.oid AND t1.reslimittype = 1 + JOIN pg_resgroupcapability t2 ON t2.resgroupid = g.oid AND t2.reslimittype = 2 + JOIN pg_resgroupcapability t3 ON t3.resgroupid = g.oid AND t3.reslimittype = 3 + JOIN pg_resgroupcapability t4 ON t4.resgroupid = g.oid AND t4.reslimittype = 4 + JOIN pg_resgroupcapability t5 ON t5.resgroupid = g.oid AND t5.reslimittype = 5` + + // The reslimittype 6 (memoryauditor) was introduced in GPDB + // 5.8.0. Default the value to '0' (vmtracker) since there could + // be a resource group created before 5.8.0 which will not have + // this memoryauditor field defined. + if connectionPool.Version.AtLeast("5.8.0") { + before7SelectClause += `, coalesce(t6.value, '0') AS memoryauditor` + before7FromClause += ` LEFT JOIN pg_resgroupcapability t6 ON t6.resgroupid = g.oid AND t6.reslimittype = 6` + } - fromClause += ` - LEFT JOIN pg_resgroupcapability t7 ON t7.resgroupid = g.oid AND t7.reslimittype = 7` + // The reslimittype 7 (cpuset) was introduced in GPDB + // 5.9.0. Default the value to '-1' since there could be a + // resource group created before 5.9.0 which will not have this + // cpuset field defined. + if connectionPool.Version.AtLeast("5.9.0") { + before7SelectClause += `, coalesce(t7.value, '-1') AS cpuset` + before7FromClause += ` LEFT JOIN pg_resgroupcapability t7 ON t7.resgroupid = g.oid AND t7.reslimittype = 7` + } + query = fmt.Sprintf(`%s %s;`, before7SelectClause, before7FromClause) + } else { // GPDB7+ + // Resource groups were heavily reworked for GPDB7 + // See: https://github.com/greenplum-db/gpdb/commit/483adea86b50c1759460a6265b3d8e3f4198d92e + query = ` + SELECT + g.oid AS oid, + g.rsgname AS name, + t1.value AS concurrency, + t2.value AS cpu_hard_quota_limit, + t3.value AS cpu_soft_priority, + t4.value AS cpuset + FROM pg_resgroup g + JOIN pg_resgroupcapability t1 ON g.oid = t1.resgroupid AND t1.reslimittype = 1 + JOIN pg_resgroupcapability t2 ON g.oid = t2.resgroupid AND t2.reslimittype = 2 + JOIN pg_resgroupcapability t3 ON g.oid = t3.resgroupid AND t3.reslimittype = 3 + LEFT JOIN pg_resgroupcapability t4 ON g.oid = t4.resgroupid AND t4.reslimittype = 4` } - results := make([]ResourceGroup, 0) - query := fmt.Sprintf(`%s %s %s;`, selectClause, fromClause, whereClause) - err := connectionPool.Select(&results, query) + results := make([]T, 0) + err := connectionPool.Select(&results, query) // AJR TODO -- not sure this is smart enough to deserialize into a generic struct. let's find out! gplog.FatalOnError(err) return results } @@ -518,8 +535,8 @@ func GetRoleMembers(connectionPool *dbconn.DBConn) []RoleMember { WHEN pg_get_userbyid(pga.grantor) like 'unknown (OID='||pga.grantor||')' THEN '' ELSE quote_ident(pg_get_userbyid(pga.grantor))` } - - if connectionPool.Version.AtLeast("7"){ + + if connectionPool.Version.AtLeast("7") { whereClause = fmt.Sprintf(`WHERE roleid >= %d`, FIRST_NORMAL_OBJECT_ID) } else { whereClause = `` @@ -570,7 +587,7 @@ func (t Tablespace) FQN() string { } func GetTablespaces(connectionPool *dbconn.DBConn) []Tablespace { - before6query := ` + before6Query := ` SELECT t.oid, quote_ident(t.spcname) AS tablespace, quote_ident(f.fsname) AS filelocation @@ -578,7 +595,7 @@ func GetTablespaces(connectionPool *dbconn.DBConn) []Tablespace { JOIN pg_filespace f ON t.spcfsoid = f.oid WHERE spcname != 'pg_default' AND spcname != 'pg_global'` - query := ` + atLeast6Query := ` SELECT oid, quote_ident(spcname) AS tablespace, '''' || pg_catalog.pg_tablespace_location(oid)::text || '''' AS filelocation, @@ -590,9 +607,9 @@ func GetTablespaces(connectionPool *dbconn.DBConn) []Tablespace { results := make([]Tablespace, 0) var err error if connectionPool.Version.Before("6") { - err = connectionPool.Select(&results, before6query) + err = connectionPool.Select(&results, before6Query) } else { - err = connectionPool.Select(&results, query) + err = connectionPool.Select(&results, atLeast6Query) for i := 0; i < len(results); i++ { results[i].SegmentLocations = GetSegmentTablespaces(connectionPool, results[i].Oid) } @@ -611,7 +628,7 @@ func GetSegmentTablespaces(connectionPool *dbconn.DBConn, Oid uint32) []string { return dbconn.MustSelectStringSlice(connectionPool, query) } -//Potentially expensive query +// Potentially expensive query func GetDBSize(connectionPool *dbconn.DBConn) string { size := struct{ DBSize string }{} sizeQuery := fmt.Sprintf("SELECT pg_size_pretty(pg_database_size('%s')) as dbsize", diff --git a/backup/queries_incremental.go b/backup/queries_incremental.go index 0bbbb47c1..a3e2d7fa0 100644 --- a/backup/queries_incremental.go +++ b/backup/queries_incremental.go @@ -34,9 +34,8 @@ func getAllModCounts(connectionPool *dbconn.DBConn) map[string]int64 { } func getAOSegTableFQNs(connectionPool *dbconn.DBConn) map[string]string { - var query string - if connectionPool.Version.AtLeast("7") { - query = fmt.Sprintf(` + + before7Query := fmt.Sprintf(` SELECT seg.aotablefqn, 'pg_aoseg.' || quote_ident(aoseg_c.relname) AS aosegtablefqn FROM pg_class aoseg_c @@ -45,16 +44,15 @@ func getAOSegTableFQNs(connectionPool *dbconn.DBConn) map[string]string { aotables.aotablefqn FROM pg_appendonly pg_ao JOIN (SELECT c.oid, - quote_ident(n.nspname) || '.' || quote_ident(c.relname) AS aotablefqn + quote_ident(n.nspname)|| '.' || quote_ident(c.relname) AS aotablefqn FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid - JOIN pg_am a ON c.relam = a.oid - WHERE a.amname in ('ao_row', 'ao_column') + WHERE relstorage IN ( 'ao', 'co' ) AND %s ) aotables ON pg_ao.relid = aotables.oid ) seg ON aoseg_c.oid = seg.segrelid`, relationAndSchemaFilterClause()) - } else { - query = fmt.Sprintf(` + + atLeast7Query := fmt.Sprintf(` SELECT seg.aotablefqn, 'pg_aoseg.' || quote_ident(aoseg_c.relname) AS aosegtablefqn FROM pg_class aoseg_c @@ -63,13 +61,20 @@ func getAOSegTableFQNs(connectionPool *dbconn.DBConn) map[string]string { aotables.aotablefqn FROM pg_appendonly pg_ao JOIN (SELECT c.oid, - quote_ident(n.nspname)|| '.' || quote_ident(c.relname) AS aotablefqn + quote_ident(n.nspname) || '.' || quote_ident(c.relname) AS aotablefqn FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid - WHERE relstorage IN ( 'ao', 'co' ) + JOIN pg_am a ON c.relam = a.oid + WHERE a.amname in ('ao_row', 'ao_column') AND %s ) aotables ON pg_ao.relid = aotables.oid ) seg ON aoseg_c.oid = seg.segrelid`, relationAndSchemaFilterClause()) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query + } else { + query = atLeast7Query } results := make([]struct { @@ -86,41 +91,43 @@ func getAOSegTableFQNs(connectionPool *dbconn.DBConn) map[string]string { } func getModCount(connectionPool *dbconn.DBConn, aosegtablefqn string) int64 { - var modCountQuery string - if connectionPool.Version.AtLeast("7") { - // In GPDB 7+, the master no longer stores AO segment data so we must - // query the modcount from the segments. Unfortunately, this does give a - // false positive if a VACUUM FULL compaction happens on the AO table. - modCountQuery = fmt.Sprintf(` - SELECT COALESCE(pg_catalog.sum(modcount), 0) AS modcount FROM gp_dist_random('%s')`, aosegtablefqn) + + before7Query := fmt.Sprintf(`SELECT COALESCE(pg_catalog.sum(modcount), 0) AS modcount FROM %s`, + aosegtablefqn) + + // In GPDB 7+, the coordinator no longer stores AO segment data so we must + // query the modcount from the segments. Unfortunately, this does give a + // false positive if a VACUUM FULL compaction happens on the AO table. + atLeast7Query := fmt.Sprintf(`SELECT COALESCE(pg_catalog.sum(modcount), 0) AS modcount FROM gp_dist_random('%s')`, + aosegtablefqn) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query } else { - modCountQuery = fmt.Sprintf(` - SELECT COALESCE(pg_catalog.sum(modcount), 0) AS modcount FROM %s`, aosegtablefqn) + query = atLeast7Query } var results []struct { Modcount int64 } - err := connectionPool.Select(&results, modCountQuery) + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) return results[0].Modcount } func getLastDDLTimestamps(connectionPool *dbconn.DBConn) map[string]string { - var query string - if connectionPool.Version.AtLeast("7") { - query = fmt.Sprintf(` + before7Query := fmt.Sprintf(` SELECT quote_ident(aoschema) || '.' || quote_ident(aorelname) as aotablefqn, lastddltimestamp FROM ( SELECT c.oid AS aooid, n.nspname AS aoschema, c.relname AS aorelname FROM pg_class c - JOIN pg_namespace n ON c.relnamespace = n.oid - JOIN pg_am a ON c.relam = a.oid - WHERE a.amname in ('ao_row', 'ao_column') - AND %s + JOIN pg_namespace n ON c.relnamespace = n.oid + WHERE c.relstorage IN ('ao', 'co') + AND %s ) aotables JOIN ( SELECT lo.objid, MAX(lo.statime) AS lastddltimestamp @@ -129,17 +136,18 @@ func getLastDDLTimestamps(connectionPool *dbconn.DBConn) map[string]string { GROUP BY lo.objid ) lastop ON aotables.aooid = lastop.objid`, relationAndSchemaFilterClause()) - } else { - query = fmt.Sprintf(` + + atLeast7Query := fmt.Sprintf(` SELECT quote_ident(aoschema) || '.' || quote_ident(aorelname) as aotablefqn, lastddltimestamp FROM ( SELECT c.oid AS aooid, n.nspname AS aoschema, c.relname AS aorelname FROM pg_class c - JOIN pg_namespace n ON c.relnamespace = n.oid - WHERE c.relstorage IN ('ao', 'co') - AND %s + JOIN pg_namespace n ON c.relnamespace = n.oid + JOIN pg_am a ON c.relam = a.oid + WHERE a.amname in ('ao_row', 'ao_column') + AND %s ) aotables JOIN ( SELECT lo.objid, MAX(lo.statime) AS lastddltimestamp @@ -148,6 +156,12 @@ func getLastDDLTimestamps(connectionPool *dbconn.DBConn) map[string]string { GROUP BY lo.objid ) lastop ON aotables.aooid = lastop.objid`, relationAndSchemaFilterClause()) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query + } else { + query = atLeast7Query } var results []struct { diff --git a/backup/queries_operators.go b/backup/queries_operators.go index 47211f49e..b503c5588 100644 --- a/backup/queries_operators.go +++ b/backup/queries_operators.go @@ -59,7 +59,7 @@ func (o Operator) FQN() string { func GetOperators(connectionPool *dbconn.DBConn) []Operator { results := make([]Operator, 0) - version4query := fmt.Sprintf(` + before5Query := fmt.Sprintf(` SELECT o.oid AS oid, quote_ident(n.nspname) AS schema, oprname AS name, @@ -75,7 +75,7 @@ func GetOperators(connectionPool *dbconn.DBConn) []Operator { JOIN pg_namespace n on n.oid = o.oprnamespace WHERE %s AND oprcode != 0`, SchemaFilterClause("n")) - masterQuery := fmt.Sprintf(` + atLeast5Query := fmt.Sprintf(` SELECT o.oid AS oid, quote_ident(n.nspname) AS schema, oprname AS name, @@ -93,12 +93,14 @@ func GetOperators(connectionPool *dbconn.DBConn) []Operator { WHERE %s AND oprcode != 0 AND %s`, SchemaFilterClause("n"), ExtensionFilterClause("o")) - var err error + query := "" if connectionPool.Version.Before("5") { - err = connectionPool.Select(&results, version4query) + query = before5Query } else { - err = connectionPool.Select(&results, masterQuery) + query = atLeast5Query } + + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) return results } @@ -194,7 +196,7 @@ func GetOperatorClasses(connectionPool *dbconn.DBConn) []OperatorClass { * PrintCreateOperatorClassStatement to not print FAMILY if the class and * family have the same schema and name will work for both versions. */ - version4query := fmt.Sprintf(` + before5Query := fmt.Sprintf(` SELECT c.oid AS oid, quote_ident(cls_ns.nspname) AS schema, quote_ident(opcname) AS name, @@ -208,7 +210,7 @@ func GetOperatorClasses(connectionPool *dbconn.DBConn) []OperatorClass { JOIN pg_catalog.pg_namespace cls_ns ON cls_ns.oid = opcnamespace WHERE %s`, SchemaFilterClause("cls_ns")) - masterQuery := fmt.Sprintf(` + atLeast5Query := fmt.Sprintf(` SELECT c.oid AS oid, quote_ident(cls_ns.nspname) AS schema, quote_ident(opcname) AS name, @@ -226,12 +228,14 @@ func GetOperatorClasses(connectionPool *dbconn.DBConn) []OperatorClass { AND %s`, SchemaFilterClause("cls_ns"), ExtensionFilterClause("c")) - var err error + query := "" if connectionPool.Version.Before("5") { - err = connectionPool.Select(&results, version4query) + query = before5Query } else { - err = connectionPool.Select(&results, masterQuery) + query = atLeast5Query } + + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) operators := GetOperatorClassOperators(connectionPool) @@ -256,15 +260,15 @@ type OperatorClassOperator struct { func GetOperatorClassOperators(connectionPool *dbconn.DBConn) map[uint32][]OperatorClassOperator { results := make([]OperatorClassOperator, 0) - version4query := fmt.Sprintf(` + before5Query := ` SELECT amopclaid AS classoid, amopstrategy AS strategynumber, amopopr::pg_catalog.regoperator AS operator, amopreqcheck AS recheck FROM pg_catalog.pg_amop - ORDER BY amopstrategy`) + ORDER BY amopstrategy` - version5query := fmt.Sprintf(` + version5Query := ` SELECT refobjid AS classoid, amopstrategy AS strategynumber, amopopr::pg_catalog.regoperator AS operator, @@ -273,9 +277,9 @@ func GetOperatorClassOperators(connectionPool *dbconn.DBConn) map[uint32][]Opera JOIN pg_catalog.pg_depend d ON d.objid = ao.oid WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass - ORDER BY amopstrategy`) + ORDER BY amopstrategy` - masterQuery := fmt.Sprintf(` + atLeast6Query := ` SELECT refobjid AS classoid, amopstrategy AS strategynumber, amopopr::pg_catalog.regoperator AS operator, @@ -286,15 +290,18 @@ func GetOperatorClassOperators(connectionPool *dbconn.DBConn) map[uint32][]Opera LEFT JOIN pg_namespace ns ON ns.oid = opf.opfnamespace WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass - ORDER BY amopstrategy`) - var err error + ORDER BY amopstrategy` + + query := "" if connectionPool.Version.Before("5") { - err = connectionPool.Select(&results, version4query) - } else if connectionPool.Version.Before("6") { - err = connectionPool.Select(&results, version5query) + query = before5Query + } else if connectionPool.Version.Is("5") { + query = version5Query } else { - err = connectionPool.Select(&results, masterQuery) + query = atLeast6Query } + + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) operators := make(map[uint32][]OperatorClassOperator) @@ -314,14 +321,14 @@ type OperatorClassFunction struct { func GetOperatorClassFunctions(connectionPool *dbconn.DBConn) map[uint32][]OperatorClassFunction { results := make([]OperatorClassFunction, 0) - version4query := fmt.Sprintf(` + before5Query := ` SELECT amopclaid AS classoid, amprocnum AS supportnumber, amproc::regprocedure AS functionname FROM pg_catalog.pg_amproc - ORDER BY amprocnum`) + ORDER BY amprocnum` - masterQuery := fmt.Sprintf(` + atLeast5Query := ` SELECT refobjid AS classoid, amprocnum AS supportnumber, amproclefttype::regtype, @@ -331,14 +338,16 @@ func GetOperatorClassFunctions(connectionPool *dbconn.DBConn) map[uint32][]Opera JOIN pg_catalog.pg_depend d ON d.objid = ap.oid WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass - ORDER BY amprocnum`) + ORDER BY amprocnum` - var err error + query := "" if connectionPool.Version.Before("5") { - err = connectionPool.Select(&results, version4query) + query = before5Query } else { - err = connectionPool.Select(&results, masterQuery) + query = atLeast5Query } + + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) functions := make(map[uint32][]OperatorClassFunction) diff --git a/backup/queries_postdata.go b/backup/queries_postdata.go index 91f9440bf..9adbb51ae 100644 --- a/backup/queries_postdata.go +++ b/backup/queries_postdata.go @@ -82,14 +82,15 @@ func (i IndexDefinition) FQN() string { * e.g. comments on implicitly created indexes */ func GetIndexes(connectionPool *dbconn.DBConn) []IndexDefinition { - var query string + implicitIndexStr := "" if connectionPool.Version.Before("6") { indexOidList := ConstructImplicitIndexOidList(connectionPool) - implicitIndexStr := "" + if indexOidList != "" { implicitIndexStr = fmt.Sprintf("OR i.indexrelid IN (%s)", indexOidList) } - query = fmt.Sprintf(` + } + before6Query := fmt.Sprintf(` SELECT DISTINCT i.indexrelid AS oid, quote_ident(ic.relname) AS name, quote_ident(n.nspname) AS owningschema, @@ -111,10 +112,9 @@ func GetIndexes(connectionPool *dbconn.DBConn) []IndexDefinition { AND NOT EXISTS (SELECT 1 FROM pg_partition_rule r WHERE r.parchildrelid = c.oid) AND %s ORDER BY name`, - implicitIndexStr, relationAndSchemaFilterClause(), ExtensionFilterClause("c")) + implicitIndexStr, relationAndSchemaFilterClause(), ExtensionFilterClause("c")) - } else if connectionPool.Version.Is("6") { - query = fmt.Sprintf(` + version6Query := fmt.Sprintf(` SELECT DISTINCT i.indexrelid AS oid, quote_ident(ic.relname) AS name, quote_ident(n.nspname) AS owningschema, @@ -140,10 +140,9 @@ func GetIndexes(connectionPool *dbconn.DBConn) []IndexDefinition { AND NOT EXISTS (SELECT 1 FROM pg_partition_rule r WHERE r.parchildrelid = c.oid) AND %s ORDER BY name`, - relationAndSchemaFilterClause(), ExtensionFilterClause("c")) // The index itself does not have a dependency on the extension, but the index's table does + relationAndSchemaFilterClause(), ExtensionFilterClause("c")) // The index itself does not have a dependency on the extension, but the index's table does - } else { - query = fmt.Sprintf(` + atLeast7Query := fmt.Sprintf(` SELECT DISTINCT i.indexrelid AS oid, coalesce(inh.inhparent, '0') AS parentindex, quote_ident(ic.relname) AS name, @@ -172,7 +171,15 @@ func GetIndexes(connectionPool *dbconn.DBConn) []IndexDefinition { AND i.indexrelid >= %d AND %s ORDER BY name`, - relationAndSchemaFilterClause(), FIRST_NORMAL_OBJECT_ID, ExtensionFilterClause("c")) + relationAndSchemaFilterClause(), FIRST_NORMAL_OBJECT_ID, ExtensionFilterClause("c")) + + query := "" + if connectionPool.Version.Before("6") { + query = before6Query + } else if connectionPool.Version.Is("6") { + query = version6Query + } else { + query = atLeast7Query } resultIndexes := make([]IndexDefinition, 0) @@ -500,7 +507,7 @@ type RLSPolicy struct { } func GetPolicies(connectionPool *dbconn.DBConn) []RLSPolicy { - query := fmt.Sprintf(` + query := ` SELECT p.oid as oid, quote_ident(p.polname) as name, @@ -516,7 +523,7 @@ func GetPolicies(connectionPool *dbconn.DBConn) []RLSPolicy { coalesce(pg_catalog.pg_get_expr(polwithcheck, polrelid), '') AS withcheck FROM pg_catalog.pg_policy p JOIN pg_catalog.pg_class c ON p.polrelid = c.oid - ORDER BY p.polname`) + ORDER BY p.polname` results := make([]RLSPolicy, 0) err := connectionPool.Select(&results, query) diff --git a/backup/queries_relations.go b/backup/queries_relations.go index d47169c1d..ce0db2efa 100644 --- a/backup/queries_relations.go +++ b/backup/queries_relations.go @@ -226,9 +226,7 @@ type SequenceDefinition struct { } func GetAllSequences(connectionPool *dbconn.DBConn) []Sequence { - var query string - if connectionPool.Version.AtLeast("7") { - query = fmt.Sprintf(` + atLeast7Query := fmt.Sprintf(` SELECT n.oid AS schemaoid, c.oid AS oid, quote_ident(n.nspname) AS schema, @@ -253,9 +251,9 @@ func GetAllSequences(connectionPool *dbconn.DBConn) []Sequence { AND %s AND %s ORDER BY n.nspname, c.relname`, - relationAndSchemaFilterClause(), ExtensionFilterClause("c")) - } else { - query = fmt.Sprintf(` + relationAndSchemaFilterClause(), ExtensionFilterClause("c")) + + before7Query := fmt.Sprintf(` SELECT n.oid AS schemaoid, c.oid AS oid, quote_ident(n.nspname) AS schema, @@ -274,8 +272,15 @@ func GetAllSequences(connectionPool *dbconn.DBConn) []Sequence { AND %s AND %s ORDER BY n.nspname, c.relname`, - relationAndSchemaFilterClause(), ExtensionFilterClause("c")) + relationAndSchemaFilterClause(), ExtensionFilterClause("c")) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query + } else { + query = atLeast7Query } + results := make([]Sequence, 0) err := connectionPool.Select(&results, query) gplog.FatalOnError(err) @@ -307,9 +312,23 @@ func GetAllSequences(connectionPool *dbconn.DBConn) []Sequence { } func GetSequenceDefinition(connectionPool *dbconn.DBConn, seqName string) SequenceDefinition { - var query string - if connectionPool.Version.AtLeast("7") { - query = fmt.Sprintf(` + startValQuery := "" + if connectionPool.Version.AtLeast("6") { + startValQuery = "start_value AS startval," + } + + before7Query := fmt.Sprintf(` + SELECT last_value AS lastval, + %s + increment_by AS increment, + max_value AS maxval, + min_value AS minval, + cache_value AS cacheval, + is_cycled AS iscycled, + is_called AS iscalled + FROM %s`, startValQuery, seqName) + + atLeast7Query := fmt.Sprintf(` SELECT s.seqstart AS startval, r.last_value AS lastval, pg_catalog.format_type(s.seqtypid, NULL) AS type, @@ -321,22 +340,14 @@ func GetSequenceDefinition(connectionPool *dbconn.DBConn, seqName string) Sequen r.is_called AS iscalled FROM %s r JOIN pg_sequence s ON s.seqrelid = '%s'::regclass::oid;`, seqName, seqName) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query } else { - startValQuery := "" - if connectionPool.Version.AtLeast("6") { - startValQuery = "start_value AS startval," - } - query = fmt.Sprintf(` - SELECT last_value AS lastval, - %s - increment_by AS increment, - max_value AS maxval, - min_value AS minval, - cache_value AS cacheval, - is_cycled AS iscycled, - is_called AS iscalled - FROM %s`, startValQuery, seqName) + query = atLeast7Query } + result := SequenceDefinition{} err := connectionPool.Get(&result, query) gplog.FatalOnError(err) @@ -382,7 +393,6 @@ func (v View) ObjectType() string { } // This function retrieves both regular views and materialized views. -// Materialized views were introduced in GPDB 7 and backported to GPDB 6.2. func GetAllViews(connectionPool *dbconn.DBConn) []View { // When querying the view definition using pg_get_viewdef(), the pg function @@ -395,31 +405,45 @@ func GetAllViews(connectionPool *dbconn.DBConn) []View { defer connectionPool.MustExec("ROLLBACK TO SAVEPOINT gpbackup_get_views") } - selectClause := ` + before6Query := fmt.Sprintf(` SELECT c.oid AS oid, quote_ident(n.nspname) AS schema, quote_ident(c.relname) AS name, - pg_get_viewdef(c.oid) AS definition` - if connectionPool.Version.AtLeast("6") { - selectClause += `, + pg_get_viewdef(c.oid) AS definition + FROM pg_class c + LEFT JOIN pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace + WHERE c.relkind IN ('m', 'v') + AND %s + AND %s`, relationAndSchemaFilterClause(), ExtensionFilterClause("c")) + + // Materialized views were introduced in GPDB 7 and backported to GPDB 6.2. + // Reloptions and tablespace added to pg_class in GPDB 6 + atLeast6Query := fmt.Sprintf(` + SELECT + c.oid AS oid, + quote_ident(n.nspname) AS schema, + quote_ident(c.relname) AS name, + pg_get_viewdef(c.oid) AS definition, coalesce(' WITH (' || array_to_string(c.reloptions, ', ') || ')', '') AS options, coalesce(quote_ident(t.spcname), '') AS tablespace, - c.relkind='m' AS ismaterialized` - } - - fromClause := ` + c.relkind='m' AS ismaterialized FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace - LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace` - - whereClause := fmt.Sprintf(` + LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace WHERE c.relkind IN ('m', 'v') AND %s AND %s`, relationAndSchemaFilterClause(), ExtensionFilterClause("c")) + query := "" + if connectionPool.Version.Before("6") { + query = before6Query + } else { + query = atLeast6Query + } + results := make([]View, 0) - query := selectClause + fromClause + whereClause err := connectionPool.Select(&results, query) gplog.FatalOnError(err) @@ -470,7 +494,9 @@ func LockTables(connectionPool *dbconn.DBConn, tables []Relation) { lastBatchSize := len(tables) % batchSize tableBatches := GenerateTableBatches(tables, batchSize) currentBatchSize := batchSize - if connectionPool.Version.AtLeast("6.21.0") { + if connectionPool.Version.AtLeast("7") { + lockMode = `IN ACCESS SHARE MODE COORDINATOR ONLY` + } else if connectionPool.Version.AtLeast("6.21.0") { lockMode = `IN ACCESS SHARE MODE MASTER ONLY` } else { lockMode = `IN ACCESS SHARE MODE` diff --git a/backup/queries_statistics.go b/backup/queries_statistics.go index d258a479a..4f9a4eac7 100644 --- a/backup/queries_statistics.go +++ b/backup/queries_statistics.go @@ -109,7 +109,8 @@ func GetAttributeStatistics(connectionPool *dbconn.DBConn, tables []Table) map[u s.stavalues4 FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid - JOIN pg_attribute a ON a.attrelid = c.oid + + JOIN pg_attribute a ON a.attrelid = c.oid JOIN pg_statistic s ON (c.oid = s.starelid AND a.attnum = s.staattnum) JOIN pg_type t ON a.atttypid = t.oid WHERE %s @@ -152,7 +153,7 @@ func GetTupleStatistics(connectionPool *dbconn.DBConn, tables []Table) map[uint3 WHERE %s AND quote_ident(n.nspname) || '.' || quote_ident(c.relname) IN (%s) ORDER BY n.nspname, c.relname`, - SchemaFilterClause("n"), utils.SliceToQuotedString(tablenames)) + SchemaFilterClause("n"), utils.SliceToQuotedString(tablenames)) results := make([]TupleStatistic, 0) err := connectionPool.Select(&results, query) diff --git a/backup/queries_table_defs.go b/backup/queries_table_defs.go index 2af002b7c..5b7b1ace1 100644 --- a/backup/queries_table_defs.go +++ b/backup/queries_table_defs.go @@ -154,23 +154,7 @@ type PartitionLevelInfo struct { } func GetPartitionTableMap(connectionPool *dbconn.DBConn) map[uint32]PartitionLevelInfo { - var query string - if connectionPool.Version.AtLeast("7") { - query = ` - SELECT c.oid, - CASE WHEN p.partrelid IS NOT NULL AND c.relispartition = false THEN '' - ELSE rc.relname - END AS rootname, - CASE WHEN p.partrelid IS NOT NULL AND c.relispartition = false THEN 'p' - WHEN p.partrelid IS NOT NULL AND c.relispartition = true THEN 'i' - ELSE 'l' - END AS level - FROM pg_class c - LEFT JOIN pg_partitioned_table p ON c.oid = p.partrelid - LEFT JOIN pg_class rc ON pg_partition_root(c.oid) = rc.oid - WHERE c.relispartition = true OR c.relkind = 'p'` - } else { - query = ` + before7Query := ` SELECT pc.oid AS oid, 'p' AS level, '' AS rootname @@ -186,6 +170,26 @@ func GetPartitionTableMap(connectionPool *dbconn.DBConn) map[uint32]PartitionLev JOIN (SELECT parrelid AS relid, max(parlevel) AS pl FROM pg_partition GROUP BY parrelid) AS levels ON p.parrelid = levels.relid WHERE r.parchildrelid != 0` + + atLeast7Query := ` + SELECT c.oid, + CASE WHEN p.partrelid IS NOT NULL AND c.relispartition = false THEN '' + ELSE rc.relname + END AS rootname, + CASE WHEN p.partrelid IS NOT NULL AND c.relispartition = false THEN 'p' + WHEN p.partrelid IS NOT NULL AND c.relispartition = true THEN 'i' + ELSE 'l' + END AS level + FROM pg_class c + LEFT JOIN pg_partitioned_table p ON c.oid = p.partrelid + LEFT JOIN pg_class rc ON pg_partition_root(c.oid) = rc.oid + WHERE c.relispartition = true OR c.relkind = 'p'` + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query + } else { + query = atLeast7Query } results := make([]PartitionLevelInfo, 0) @@ -239,8 +243,9 @@ func GetColumnDefinitions(connectionPool *dbconn.DBConn) map[uint32][]ColumnDefi // Include child partitions that are also external tables gplog.Verbose("Getting column definitions") results := make([]ColumnDefinition, 0) - selectClause := ` - SELECT a.attrelid, + + before6Query := fmt.Sprintf(` + SELECT a.attrelid, a.attnum, quote_ident(a.attname) AS name, a.attnotnull, @@ -250,54 +255,42 @@ func GetColumnDefinitions(connectionPool *dbconn.DBConn) map[uint32][]ColumnDefi a.attstattarget, CASE WHEN a.attstorage != t.typstorage THEN a.attstorage ELSE '' END AS storagetype, coalesce('('||pg_catalog.pg_get_expr(ad.adbin, ad.adrelid)||')', '') AS defaultval, - coalesce(d.description, '') AS comment` - fromClause := ` + coalesce(d.description, '') AS comment FROM pg_catalog.pg_attribute a JOIN pg_class c ON a.attrelid = c.oid JOIN pg_namespace n ON c.relnamespace = n.oid LEFT JOIN pg_catalog.pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum LEFT JOIN pg_catalog.pg_type t ON a.atttypid = t.oid LEFT JOIN pg_catalog.pg_attribute_encoding e ON e.attrelid = a.attrelid AND e.attnum = a.attnum - LEFT JOIN pg_description d ON d.objoid = a.attrelid AND d.classoid = 'pg_class'::regclass AND d.objsubid = a.attnum` - - partitionRuleExcludeClause := "" - if connectionPool.Version.Before("7") { - // In GPDB7+ we do not want to exclude child partitions, they function as separate tables. - partitionRuleExcludeClause = ` + LEFT JOIN pg_description d ON d.objoid = a.attrelid AND d.classoid = 'pg_class'::regclass AND d.objsubid = a.attnum + WHERE %s AND NOT EXISTS ( SELECT 1 FROM (SELECT parchildrelid FROM pg_partition_rule EXCEPT SELECT reloid FROM pg_exttable) par - WHERE par.parchildrelid = c.oid)` - } - whereClause := fmt.Sprintf(` - WHERE %s - %s + WHERE par.parchildrelid = c.oid) AND c.reltype <> 0 AND a.attnum > 0::pg_catalog.int2 AND a.attisdropped = 'f' - ORDER BY a.attrelid, a.attnum`, relationAndSchemaFilterClause(), partitionRuleExcludeClause) - - if connectionPool.Version.AtLeast("6") { - // Cannot use unnest() in CASE statements anymore in GPDB 7+ so convert - // it to a LEFT JOIN LATERAL. We do not use LEFT JOIN LATERAL for GPDB 6 - // because the CASE unnest() logic is more performant. - aclCols := "''" - aclLateralJoin := "" - if connectionPool.Version.AtLeast("7") { - aclLateralJoin = - `LEFT JOIN LATERAL unnest(a.attacl) ljl_unnest ON a.attacl IS NOT NULL AND array_length(a.attacl, 1) != 0` - aclCols = "ljl_unnest" - // Generated columns - selectClause += `, a.attgenerated` - } else { - aclCols = `CASE - WHEN a.attacl IS NULL THEN NULL - WHEN array_upper(a.attacl, 1) = 0 THEN a.attacl[0] - ELSE unnest(a.attacl) END` - } + ORDER BY a.attrelid, a.attnum`, relationAndSchemaFilterClause()) - selectClause += fmt.Sprintf(`, - %s AS privileges, + // GPDB 6 adds multiple additional column attributes and options including fdwoptions and security labels. + // Add capture logic for all of these. + version6Query := fmt.Sprintf(` + SELECT a.attrelid, + a.attnum, + quote_ident(a.attname) AS name, + a.attnotnull, + a.atthasdef, + pg_catalog.format_type(t.oid,a.atttypmod) AS type, + coalesce(pg_catalog.array_to_string(e.attoptions, ','), '') AS encoding, + a.attstattarget, + CASE WHEN a.attstorage != t.typstorage THEN a.attstorage ELSE '' END AS storagetype, + coalesce('('||pg_catalog.pg_get_expr(ad.adbin, ad.adrelid)||')', '') AS defaultval, + coalesce(d.description, '') AS comment, + CASE + WHEN a.attacl IS NULL THEN NULL + WHEN array_upper(a.attacl, 1) = 0 THEN a.attacl[0] + ELSE unnest(a.attacl) END AS privileges, CASE WHEN a.attacl IS NULL THEN '' WHEN array_upper(a.attacl, 1) = 0 THEN 'Empty' @@ -307,17 +300,81 @@ func GetColumnDefinitions(connectionPool *dbconn.DBConn) map[uint32][]ColumnDefi coalesce(array_to_string(ARRAY(SELECT option_name || ' ' || quote_literal(option_value) FROM pg_options_to_table(attfdwoptions) ORDER BY option_name), ', '), '') AS fdwoptions, CASE WHEN a.attcollation <> t.typcollation THEN quote_ident(cn.nspname) || '.' || quote_ident(coll.collname) ELSE '' END AS collation, coalesce(sec.provider,'') AS securitylabelprovider, - coalesce(sec.label,'') AS securitylabel`, aclCols) - - fromClause += fmt.Sprintf(` + coalesce(sec.label,'') AS securitylabel + FROM pg_catalog.pg_attribute a + JOIN pg_class c ON a.attrelid = c.oid + JOIN pg_namespace n ON c.relnamespace = n.oid + LEFT JOIN pg_catalog.pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum + LEFT JOIN pg_catalog.pg_type t ON a.atttypid = t.oid + LEFT JOIN pg_catalog.pg_attribute_encoding e ON e.attrelid = a.attrelid AND e.attnum = a.attnum + LEFT JOIN pg_description d ON d.objoid = a.attrelid AND d.classoid = 'pg_class'::regclass AND d.objsubid = a.attnum LEFT JOIN pg_collation coll ON a.attcollation = coll.oid LEFT JOIN pg_namespace cn ON coll.collnamespace = cn.oid - LEFT JOIN pg_seclabel sec ON sec.objoid = a.attrelid AND - sec.classoid = 'pg_class'::regclass AND sec.objsubid = a.attnum - %s`, aclLateralJoin) + LEFT JOIN pg_seclabel sec ON sec.objoid = a.attrelid AND sec.classoid = 'pg_class'::regclass AND sec.objsubid = a.attnum + WHERE %s + AND NOT EXISTS ( + SELECT 1 FROM + (SELECT parchildrelid FROM pg_partition_rule EXCEPT SELECT reloid FROM pg_exttable) par + WHERE par.parchildrelid = c.oid) + AND c.reltype <> 0 + AND a.attnum > 0::pg_catalog.int2 + AND a.attisdropped = 'f' + ORDER BY a.attrelid, a.attnum`, relationAndSchemaFilterClause()) + + // In GPDB7+ we do not want to exclude child partitions, they function as separate tables. + // Cannot use unnest() in CASE statements anymore in GPDB 7+ so convert + // it to a LEFT JOIN LATERAL. We do not use LEFT JOIN LATERAL for GPDB 6 + // because the CASE unnest() logic is more performant. + atLeast7Query := fmt.Sprintf(` + SELECT a.attrelid, + a.attnum, + quote_ident(a.attname) AS name, + a.attnotnull, + a.atthasdef, + pg_catalog.format_type(t.oid,a.atttypmod) AS type, + coalesce(pg_catalog.array_to_string(e.attoptions, ','), '') AS encoding, + a.attstattarget, + CASE WHEN a.attstorage != t.typstorage THEN a.attstorage ELSE '' END AS storagetype, + coalesce('('||pg_catalog.pg_get_expr(ad.adbin, ad.adrelid)||')', '') AS defaultval, + coalesce(d.description, '') AS comment, + a.attgenerated, + ljl_unnest AS privileges, + CASE + WHEN a.attacl IS NULL THEN '' + WHEN array_upper(a.attacl, 1) = 0 THEN 'Empty' + ELSE '' + END AS kind, + coalesce(pg_catalog.array_to_string(a.attoptions, ','), '') AS options, + coalesce(array_to_string(ARRAY(SELECT option_name || ' ' || quote_literal(option_value) FROM pg_options_to_table(attfdwoptions) ORDER BY option_name), ', '), '') AS fdwoptions, + CASE WHEN a.attcollation <> t.typcollation THEN quote_ident(cn.nspname) || '.' || quote_ident(coll.collname) ELSE '' END AS collation, + coalesce(sec.provider,'') AS securitylabelprovider, + coalesce(sec.label,'') AS securitylabel + FROM pg_catalog.pg_attribute a + JOIN pg_class c ON a.attrelid = c.oid + JOIN pg_namespace n ON c.relnamespace = n.oid + LEFT JOIN pg_catalog.pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum + LEFT JOIN pg_catalog.pg_type t ON a.atttypid = t.oid + LEFT JOIN pg_catalog.pg_attribute_encoding e ON e.attrelid = a.attrelid AND e.attnum = a.attnum + LEFT JOIN pg_description d ON d.objoid = a.attrelid AND d.classoid = 'pg_class'::regclass AND d.objsubid = a.attnum + LEFT JOIN pg_collation coll ON a.attcollation = coll.oid + LEFT JOIN pg_namespace cn ON coll.collnamespace = cn.oid + LEFT JOIN pg_seclabel sec ON sec.objoid = a.attrelid AND sec.classoid = 'pg_class'::regclass AND sec.objsubid = a.attnum + LEFT JOIN LATERAL unnest(a.attacl) ljl_unnest ON a.attacl IS NOT NULL AND array_length(a.attacl, 1) != 0 + WHERE %s + AND c.reltype <> 0 + AND a.attnum > 0::pg_catalog.int2 + AND a.attisdropped = 'f' + ORDER BY a.attrelid, a.attnum`, relationAndSchemaFilterClause()) + + query := `` + if connectionPool.Version.Before("6") { + query = before6Query + } else if connectionPool.Version.Is("6") { + query = version6Query + } else { + query = atLeast7Query } - query := fmt.Sprintf(`%s %s %s;`, selectClause, fromClause, whereClause) err := connectionPool.Select(&results, query) gplog.FatalOnError(err) resultMap := make(map[uint32][]ColumnDefinition) @@ -333,10 +390,9 @@ func GetColumnDefinitions(connectionPool *dbconn.DBConn) map[uint32][]ColumnDefi func GetDistributionPolicies(connectionPool *dbconn.DBConn) map[uint32]string { gplog.Verbose("Getting distribution policies") - var query string - if connectionPool.Version.Before("6") { - // This query is adapted from the addDistributedBy() function in pg_dump.c. - query = ` + + // This query is adapted from the addDistributedBy() function in pg_dump.c. + before6Query := ` SELECT p.localoid AS oid, 'DISTRIBUTED BY (' || string_agg(quote_ident(a.attname) , ', ' ORDER BY index) || ')' AS value FROM (SELECT localoid, unnest(attrnums) AS attnum, @@ -347,11 +403,18 @@ func GetDistributionPolicies(connectionPool *dbconn.DBConn) map[uint32]string { UNION ALL SELECT p.localoid AS oid, 'DISTRIBUTED RANDOMLY' AS value FROM gp_distribution_policy p WHERE attrnums IS NULL` - } else { - query = ` + + atLeast6Query := ` SELECT localoid AS oid, pg_catalog.pg_get_table_distributedby(localoid) AS value FROM gp_distribution_policy` + + query := "" + if connectionPool.Version.Before("6") { + query = before6Query + } else { + query = atLeast6Query } + return selectAsOidToStringMap(connectionPool, query) } @@ -450,7 +513,7 @@ func GetPartitionAlteredSchema(connectionPool *dbconn.DBConn) map[uint32][]Alter } gplog.Info("Getting child partitions with altered schema") - query := fmt.Sprintf(` + query := ` SELECT pgp.parrelid AS oid, quote_ident(pgn2.nspname) AS oldschema, quote_ident(pgn.nspname) AS newschema, @@ -461,7 +524,7 @@ func GetPartitionAlteredSchema(connectionPool *dbconn.DBConn) map[uint32][]Alter JOIN pg_catalog.pg_class pgc2 ON pgp.parrelid = pgc2.oid JOIN pg_catalog.pg_namespace pgn ON pgc.relnamespace = pgn.oid JOIN pg_catalog.pg_namespace pgn2 ON pgc2.relnamespace = pgn2.oid - WHERE pgc.relnamespace != pgc2.relnamespace`) + WHERE pgc.relnamespace != pgc2.relnamespace` var results []struct { Oid uint32 AlteredPartitionRelation @@ -537,10 +600,10 @@ func GetForeignTableDefinitions(connectionPool *dbconn.DBConn) map[uint32]Foreig } query := fmt.Sprintf(` - SELECT ftrelid, fs.srvname AS ftserver, - pg_catalog.array_to_string(array( - SELECT pg_catalog.quote_ident(option_name) || ' ' || pg_catalog.quote_literal(option_value) - FROM pg_catalog.pg_options_to_table(ftoptions) ORDER BY option_name + SELECT ftrelid, quote_ident(fs.srvname) AS ftserver, + array_to_string(array( + SELECT quote_ident(option_name) || ' ' || quote_literal(option_value) + FROM pg_options_to_table(ftoptions) ORDER BY option_name ), e', ') AS ftoptions FROM pg_foreign_table ft JOIN pg_foreign_server fs ON ft.ftserver = fs.oid @@ -631,7 +694,7 @@ func GetAttachPartitionInfo(connectionPool *dbconn.DBConn) map[uint32]AttachPart return make(map[uint32]AttachPartitionInfo, 0) } - query := fmt.Sprintf(` + query := ` SELECT c.oid, quote_ident(n.nspname) || '.' || quote_ident(c.relname) AS relname, @@ -641,7 +704,7 @@ func GetAttachPartitionInfo(connectionPool *dbconn.DBConn) map[uint32]AttachPart JOIN pg_namespace n ON c.relnamespace = n.oid JOIN pg_class rc ON pg_partition_root(c.oid) = rc.oid JOIN pg_namespace rn ON rc.relnamespace = rn.oid - WHERE c.relispartition = 't'`) + WHERE c.relispartition = 't'` results := make([]AttachPartitionInfo, 0) err := connectionPool.Select(&results, query) diff --git a/backup/queries_textsearch.go b/backup/queries_textsearch.go index 987c5af3d..15eca53de 100644 --- a/backup/queries_textsearch.go +++ b/backup/queries_textsearch.go @@ -116,7 +116,7 @@ func GetTextSearchTemplates(connectionPool *dbconn.DBConn) []TextSearchTemplate WHERE %s AND %s ORDER BY tmplname`, - SchemaFilterClause("n"), ExtensionFilterClause("p")) + SchemaFilterClause("n"), ExtensionFilterClause("p")) results := make([]TextSearchTemplate, 0) err := connectionPool.Select(&results, query) @@ -166,7 +166,7 @@ func GetTextSearchDictionaries(connectionPool *dbconn.DBConn) []TextSearchDictio WHERE %s AND %s ORDER BY dictname`, - SchemaFilterClause("dict_ns"), ExtensionFilterClause("d")) + SchemaFilterClause("dict_ns"), ExtensionFilterClause("d")) results := make([]TextSearchDictionary, 0) err := connectionPool.Select(&results, query) @@ -216,7 +216,7 @@ func GetTextSearchConfigurations(connectionPool *dbconn.DBConn) []TextSearchConf WHERE %s AND %s ORDER BY cfgname`, - SchemaFilterClause("cfg_ns"), ExtensionFilterClause("c")) + SchemaFilterClause("cfg_ns"), ExtensionFilterClause("c")) results := make([]struct { Schema string diff --git a/backup/queries_types.go b/backup/queries_types.go index 117e41b44..80ee91577 100644 --- a/backup/queries_types.go +++ b/backup/queries_types.go @@ -131,7 +131,7 @@ func GetBaseTypes(connectionPool *dbconn.DBConn) []BaseType { AND ut.oid IS NULL AND %s`, SchemaFilterClause("n"), ExtensionFilterClause("t")) - masterQuery := fmt.Sprintf(` + atLeast6Query := fmt.Sprintf(` SELECT t.oid, quote_ident(n.nspname) AS schema, quote_ident(t.typname) AS name, @@ -172,7 +172,7 @@ func GetBaseTypes(connectionPool *dbconn.DBConn) []BaseType { } else if connectionPool.Version.Is("5") { err = connectionPool.Select(&results, version5query) } else { - err = connectionPool.Select(&results, masterQuery) + err = connectionPool.Select(&results, atLeast6Query) } gplog.FatalOnError(err) /* @@ -253,7 +253,7 @@ type Attribute struct { func getCompositeTypeAttributes(connectionPool *dbconn.DBConn) map[uint32][]Attribute { gplog.Verbose("Getting composite type attributes") - compositeAttributeQuery := ` + before6Query := ` SELECT t.oid AS compositetypeoid, quote_ident(a.attname) AS name, pg_catalog.format_type(a.atttypid, a.atttypmod) AS type, @@ -266,31 +266,36 @@ func getCompositeTypeAttributes(connectionPool *dbconn.DBConn) map[uint32][]Attr AND c.relkind = 'c' ORDER BY t.oid, a.attnum` - if connectionPool.Version.AtLeast("6") { - compositeAttributeQuery = ` - SELECT t.oid AS compositetypeoid, - quote_ident(a.attname) AS name, - pg_catalog.format_type(a.atttypid, a.atttypmod) AS type, - coalesce(quote_literal(d.description),'') AS comment, - CASE - WHEN at.typcollation <> a.attcollation - THEN quote_ident(cn.nspname) || '.' || quote_ident(coll.collname) ELSE '' - END AS collation - FROM pg_type t - JOIN pg_class c ON t.typrelid = c.oid - JOIN pg_attribute a ON t.typrelid = a.attrelid - LEFT JOIN pg_description d ON (d.objoid = a.attrelid AND d.classoid = 'pg_class'::regclass AND d.objsubid = a.attnum) - LEFT JOIN pg_type at ON at.oid = a.atttypid - LEFT JOIN pg_collation coll ON a.attcollation = coll.oid - LEFT JOIN pg_namespace cn ON coll.collnamespace = cn.oid - WHERE t.typtype = 'c' - AND c.relkind = 'c' - AND a.attisdropped = false - ORDER BY t.oid, a.attnum` + atLeast6Query := ` + SELECT t.oid AS compositetypeoid, + quote_ident(a.attname) AS name, + pg_catalog.format_type(a.atttypid, a.atttypmod) AS type, + coalesce(quote_literal(d.description),'') AS comment, + CASE + WHEN at.typcollation <> a.attcollation + THEN quote_ident(cn.nspname) || '.' || quote_ident(coll.collname) ELSE '' + END AS collation + FROM pg_type t + JOIN pg_class c ON t.typrelid = c.oid + JOIN pg_attribute a ON t.typrelid = a.attrelid + LEFT JOIN pg_description d ON (d.objoid = a.attrelid AND d.classoid = 'pg_class'::regclass AND d.objsubid = a.attnum) + LEFT JOIN pg_type at ON at.oid = a.atttypid + LEFT JOIN pg_collation coll ON a.attcollation = coll.oid + LEFT JOIN pg_namespace cn ON coll.collnamespace = cn.oid + WHERE t.typtype = 'c' + AND c.relkind = 'c' + AND a.attisdropped = false + ORDER BY t.oid, a.attnum` + + query := "" + if connectionPool.Version.Before("6") { + query = before6Query + } else { + query = atLeast6Query } results := make([]Attribute, 0) - err := connectionPool.Select(&results, compositeAttributeQuery) + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) attributeMap := make(map[uint32][]Attribute) @@ -347,7 +352,7 @@ func GetDomainTypes(connectionPool *dbconn.DBConn) []Domain { AND %s ORDER BY n.nspname, t.typname`, SchemaFilterClause("n"), ExtensionFilterClause("t")) - masterQuery := fmt.Sprintf(` + atLeast6Query := fmt.Sprintf(` SELECT t.oid, quote_ident(n.nspname) AS schema, quote_ident(t.typname) AS name, @@ -368,14 +373,15 @@ func GetDomainTypes(connectionPool *dbconn.DBConn) []Domain { AND t.typtype = 'd' AND %s ORDER BY n.nspname, t.typname`, SchemaFilterClause("n"), ExtensionFilterClause("t")) - var err error + query := "" if connectionPool.Version.Before("6") { - err = connectionPool.Select(&results, before6query) + query = before6query } else { - err = connectionPool.Select(&results, masterQuery) + query = atLeast6Query } + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) return results } @@ -556,29 +562,34 @@ func (c Collation) FQN() string { } func GetCollations(connectionPool *dbconn.DBConn) []Collation { - var query string - if connectionPool.Version.AtLeast("7") { - query = fmt.Sprintf(` - SELECT c.oid, - quote_ident(n.nspname) AS schema, - quote_ident(c.collname) AS name, - c.collcollate AS collate, - c.collctype AS ctype, - c.collprovider as provider, - c.collisdeterministic as IsDeterministic - FROM pg_collation c - JOIN pg_namespace n ON c.collnamespace = n.oid - WHERE %s`, SchemaFilterClause("n")) + + before7Query := fmt.Sprintf(` + SELECT c.oid, + quote_ident(n.nspname) AS schema, + quote_ident(c.collname) AS name, + c.collcollate AS collate, + c.collctype AS ctype + FROM pg_collation c + JOIN pg_namespace n ON c.collnamespace = n.oid + WHERE %s`, SchemaFilterClause("n")) + + atLeast7Query := fmt.Sprintf(` + SELECT c.oid, + quote_ident(n.nspname) AS schema, + quote_ident(c.collname) AS name, + c.collcollate AS collate, + c.collctype AS ctype, + c.collprovider as provider, + c.collisdeterministic as IsDeterministic + FROM pg_collation c + JOIN pg_namespace n ON c.collnamespace = n.oid + WHERE %s`, SchemaFilterClause("n")) + + query := "" + if connectionPool.Version.Before("7") { + query = before7Query } else { - query = fmt.Sprintf(` - SELECT c.oid, - quote_ident(n.nspname) AS schema, - quote_ident(c.collname) AS name, - c.collcollate AS collate, - c.collctype AS ctype - FROM pg_collation c - JOIN pg_namespace n ON c.collnamespace = n.oid - WHERE %s`, SchemaFilterClause("n")) + query = atLeast7Query } results := make([]Collation, 0) diff --git a/backup/statistics.go b/backup/statistics.go index ff1570821..1d2b30295 100644 --- a/backup/statistics.go +++ b/backup/statistics.go @@ -2,7 +2,7 @@ package backup /* * This file contains structs and functions related to backing up query planner - * statistics on the master. + * statistics on the coordinator. */ import ( diff --git a/backup/wrappers.go b/backup/wrappers.go index e62eadc1c..885925f0e 100644 --- a/backup/wrappers.go +++ b/backup/wrappers.go @@ -178,7 +178,7 @@ func createBackupLockFile(timestamp string) { func createBackupDirectoriesOnAllHosts() { remoteOutput := globalCluster.GenerateAndExecuteCommand("Creating backup directories", - cluster.ON_SEGMENTS|cluster.INCLUDE_MASTER, + cluster.ON_SEGMENTS|cluster.INCLUDE_COORDINATOR, func(contentID int) string { return fmt.Sprintf("mkdir -p %s", globalFPInfo.GetDirForContent(contentID)) }) @@ -495,11 +495,19 @@ func backupResourceGroups(metadataFile *utils.FileWithByteCount) { return } gplog.Verbose("Writing CREATE RESOURCE GROUP statements to metadata file") - resGroups := GetResourceGroups(connectionPool) - objectCounts["Resource Groups"] = len(resGroups) - resGroupMetadata := GetCommentsForObjectType(connectionPool, TYPE_RESOURCEGROUP) - PrintResetResourceGroupStatements(metadataFile, globalTOC) - PrintCreateResourceGroupStatements(metadataFile, globalTOC, resGroups, resGroupMetadata) + if connectionPool.Version.Before("7") { + resGroups := GetResourceGroups[ResourceGroupBefore7](connectionPool) + objectCounts["Resource Groups"] = len(resGroups) + resGroupMetadata := GetCommentsForObjectType(connectionPool, TYPE_RESOURCEGROUP) + PrintResetResourceGroupStatements(metadataFile, globalTOC) + PrintCreateResourceGroupStatementsBefore7(metadataFile, globalTOC, resGroups, resGroupMetadata) + } else { // GPDB7+ + resGroups := GetResourceGroups[ResourceGroupAtLeast7](connectionPool) + objectCounts["Resource Groups"] = len(resGroups) + resGroupMetadata := GetCommentsForObjectType(connectionPool, TYPE_RESOURCEGROUP) + PrintResetResourceGroupStatements(metadataFile, globalTOC) + PrintCreateResourceGroupStatementsAtLeast7(metadataFile, globalTOC, resGroups, resGroupMetadata) + } } func backupRoles(metadataFile *utils.FileWithByteCount) { diff --git a/ci/gpbackup-generated.yml b/ci/gpbackup-generated.yml index bda2d796a..4bdecaa46 100644 --- a/ci/gpbackup-generated.yml +++ b/ci/gpbackup-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpbackup-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -p gpbackup-release). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2022-11-02 09:43:34.463191 +## Generated by gen_pipeline.py at: 2023-04-11 14:51:34.942939 ## Template file: gpbackup-tpl.yml ## Pipeline Name: gpbackup ## Nightly Trigger: True @@ -24,35 +24,28 @@ groups: jobs: - build_binaries - build_gppkgs - - GPDB4.3 - GPDB5 - - GPDB5-sles11 - GPDB6-ubuntu - - ddboost_plugin_and_boostfs_tests_43 - ddboost_plugin_and_boostfs_tests_5x + - scale-5x + - scale-6x + - scale-GPDB7 + - s3_plugin_perf + - ddboost_plugin_perf - gpbackup-manager-tests - - 5X-head-gpbackup-fixed-test - GPDB6 - GPDB6-7-seg-cluster + - GPDB7 - s3_plugin_tests - backward-compatibility - ddboost_plugin_and_boostfs_tests_6x - - master - final_gate -- name: GPDB4.3 - jobs: - - GPDB4.3 - - scale-43 - - ddboost_plugin_and_boostfs_tests_43 - - name: GPDB5 jobs: - GPDB5 - - GPDB5-sles11 - - scale-5x - ddboost_plugin_and_boostfs_tests_5x - - 5X-head-gpbackup-fixed-test + - scale-5x - s3_plugin_tests - backward-compatibility @@ -66,19 +59,18 @@ groups: - s3_plugin_perf - ddboost_plugin_perf -- name: Master +- name: GPDB7 jobs: - - master - - scale-master + - GPDB7 + - scale-GPDB7 - name: Scale jobs: - - scale-43 - scale-5x - scale-6x + - scale-GPDB7 - s3_plugin_perf - ddboost_plugin_perf - - scale-master ##### Anchors ##### @@ -108,7 +100,7 @@ anchors: terraform_source: ccp_src/google/ env_name_file: terraform/name vars: - aws_instance-node-instance_type: t2.micro #t2.micro is ignored in destroy, but aws_instance-node-instance_type is required. + aws_instance-node-instance_type: t2.micro #t2.micro is ignored in destroy, but aws_instance-node-instance_is required. aws_ebs_volume_type: standard - &ccp_destroy @@ -209,28 +201,44 @@ resource_types: repository: pivotalcf/pivnet-resource tag: latest-final - +############################################## resources: ##### Docker Images ##### -- name: centos7-image +- name: centos6-gpdb5-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test + tag: latest + +- name: centos7-gpdb5-image type: registry-image source: repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test tag: latest -- name: centos6-image +- name: centos7-gpdb6-build-image type: registry-image source: - repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-build + tag: latest + +- name: centos7-gpdb6-golang-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-test-golang tag: latest -- name: sles11-image +- name: rocky8-gpdb6-image type: registry-image source: - repository: gcr.io/data-gpdb-private-images/gpdb5-sles11-build-test + repository: gcr.io/data-gpdb-public-images/gpdb6-rocky8-test + tag: latest + +- name: rocky8-gpdb7-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) - name: ubuntu-debian-image type: registry-image @@ -244,15 +252,9 @@ resources: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest -##### Other Resources ##### -- name: nightly-trigger - type: time - source: - location: America/Los_Angeles - days: [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday] - start: 6:00 AM - stop: 7:00 AM +############################################## +##### Source Code ##### # TODO mark these as src with name change - name: gpbackup type: git @@ -291,19 +293,6 @@ resources: uri: https://github.com/greenplum-db/gpdb branch: 5X_STABLE -# We use this intermediate binary in an effort to catch possible -# inconsistencies (between a new gpdb5 bin and a fixed version of gpbackup) -# before a final release_candidate is generated & shipped -- name: bin_gpdb_5x_stable_intermediate - type: s3 - icon: amazon - source: - bucket: gpdb5-stable-concourse-builds - versioned_file: bin_gpdb_centos/bin_gpdb.tar.gz - region_name: us-west-2 - access_key_id: ((aws-bucket-access-key-id)) - secret_access_key: ((aws-bucket-secret-access-key)) - - name: gpdb6_src type: git icon: github-circle @@ -311,103 +300,45 @@ resources: uri: https://github.com/greenplum-db/gpdb branch: 6X_STABLE -- name: bin_gpdb_5x_sles11 - type: s3 - icon: amazon - source: - bucket: gpdb5-stable-concourse-builds - access_key_id: ((aws-bucket-access-key-id)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: bin_gpdb_sles11/gpdb_branch_5X_STABLE/icw_green/bin_gpdb.tar.gz - -- name: bin_gpdb_6x_stable_ubuntu - type: gcs - icon: google - source: - bucket: ((dp/prod/gcs-bucket)) - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64((dp/prod/rc-build-type-gcs)).tar.gz - -- name: gpdb_master_src +- name: gpdb_main_src type: git icon: github-circle source: uri: https://github.com/greenplum-db/gpdb branch: main -- name: bin_gpdb_master_centos7 - type: gcs - icon: google - source: - bucket: ((dp/prod/gcs-bucket)) - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/master/server-rc-(.*)-rhel7_x86_64((dp/prod/rc-build-type-gcs)).tar.gz - -# These binaries are used for backwards-compatibility testing only -- name: bin_gpbackup_1.0.0_and_1.7.1 - type: s3 - icon: amazon - source: - bucket: gpbackup-dependencies - versioned_file: gpbackup_bins_1.0.0_and_1.7.1.tar.gz - region_name: us-west-2 - access_key_id: ((aws-bucket-access-key-id)) - secret_access_key: ((aws-bucket-secret-access-key)) - -# This is specifically for sles11 images because it cannot connect to github due to TLS issues -- name: libyaml-0.1.7 - type: s3 - icon: amazon - source: - bucket: gpbackup-dependencies - versioned_file: gpbackup-dependencies/libyaml-0.1.7.tar.gz - region_name: us-west-2 - access_key_id: ((aws-bucket-access-key-id)) - secret_access_key: ((aws-bucket-secret-access-key)) - -- name: pgcrypto43 - type: s3 - icon: amazon +- name: ccp_src + type: git + icon: github-circle source: - bucket: pgcrypto43 - access_key_id: ((aws-bucket-access-key-id)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - regexp: pgcrypto-ossv1.1_pv(.*)_gpdb4.3orca-rhel5-x86_64.gppkg + branch: ((dp/ccp-git-branch)) + private_key: ((gp-concourse-cluster-provisioner-git-key)) + uri: ((dp/ccp-git-remote)) -# Manual caching to prevent dep flakes when downloading dependencies -- name: gpbackup_1.12.1_dependencies - type: s3 - icon: amazon - source: - bucket: gpbackup-dependencies - versioned_file: gpbackup_1.12.1_dependencies.tar.gz - region_name: us-west-2 - access_key_id: ((aws-bucket-access-key-id)) - secret_access_key: ((aws-bucket-secret-access-key)) +############################################## -- name: dummy_seclabel_linux_gpdb6 +##### Binaries ##### +- name: bin_gpdb_6x_stable_ubuntu type: gcs icon: google source: - bucket: dummy_seclabel_gpdb_linux - json_key: ((dp/prod/gcp_svc_acct_key)) - regexp: dummy_seclabel_gpdb6-v(.*).so + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64((dp/prod/rc-build-type-gcs)).tar.gz -- name: dummy_seclabel_linux_master +- name: bin_gpdb_7x_rhel8 type: gcs icon: google source: - bucket: dummy_seclabel_gpdb_linux - json_key: ((dp/prod/gcp_svc_acct_key)) - regexp: dummy_seclabel_gpdb7-v(.*).so + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz - name: bin_gpdb_5x_stable_centos6 type: s3 icon: amazon source: - bucket: gpdb-stable-concourse-builds + bucket: ((dp/prod/gpdb-stable-bucket-name)) versioned_file: release_candidates/bin_gpdb_centos6/gpdb5/bin_gpdb.tar.gz region_name: us-west-2 access_key_id: ((aws-bucket-access-key-id)) @@ -417,7 +348,7 @@ resources: type: s3 icon: amazon source: - bucket: gpdb-stable-concourse-builds + bucket: ((dp/prod/gpdb-stable-bucket-name)) versioned_file: release_candidates/bin_gpdb_centos7/gpdb5/bin_gpdb.tar.gz region_name: us-west-2 access_key_id: ((aws-bucket-access-key-id)) @@ -427,7 +358,7 @@ resources: type: gcs icon: google source: - bucket: ((dp/prod/gcs-bucket)) + bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64((dp/prod/rc-build-type-gcs)).tar.gz @@ -435,27 +366,54 @@ resources: type: gcs icon: google source: - bucket: ((dp/prod/gcs-bucket)) + bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64((dp/prod/rc-build-type-gcs)).tar.gz -- name: bin_gpdb_43_stable +- name: bin_gpdb_6x_rhel8 + type: gcs + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64((dp/prod/rc-build-type-gcs)).tar.gz + +# These binaries are used for backwards-compatibility testing only +- name: bin_gpbackup_1.0.0_and_1.7.1 type: s3 icon: amazon source: - bucket: gpdb-4.3-stable-concourse - versioned_file: bin_gpdb_centos/bin_gpdb.tar.gz + bucket: gpbackup-dependencies + versioned_file: gpbackup_bins_1.0.0_and_1.7.1.tar.gz region_name: us-west-2 - access_key_id: ((gpdb4-bucket-access-key-id)) - secret_access_key: ((gpdb4-bucket-secret-access-key)) + access_key_id: ((aws-bucket-access-key-id)) + secret_access_key: ((aws-bucket-secret-access-key)) -- name: ccp_src - type: git - icon: github-circle +############################################## + +##### Other Resources ##### +- name: nightly-trigger + type: time source: - branch: ((dp/ccp-git-branch)) - private_key: ((gp-concourse-cluster-provisioner-git-key)) - uri: ((dp/ccp-git-remote)) + location: America/Los_Angeles + days: [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday] + start: 6:00 AM + stop: 7:00 AM + +- name: dummy_seclabel_linux_gpdb6 + type: gcs + icon: google + source: + bucket: dummy_seclabel_gpdb_linux + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: dummy_seclabel_gpdb6-v(.*).so + +- name: dummy_seclabel_linux_main + type: gcs + icon: google + source: + bucket: dummy_seclabel_gpdb_linux + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: dummy_seclabel_gpdb7-v(.*).so - name: terraform <<: *terraform_cluster @@ -507,25 +465,45 @@ resources: regexp: pivnet_release_version/v-(.*) -- name: gpbackup-go-components - type: s3 - icon: amazon +- name: gpbackup-go-components-rhel6 + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: gpbackup-go-components/go_components.tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel6/go_components.tar.gz + +- name: gpbackup-go-components-rhel7 + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel7/go_components.tar.gz + +- name: gpbackup-go-components-rhel8 + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel8/go_components.tar.gz + +- name: gpbackup-go-components-ubuntu + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/ubuntu/go_components.tar.gz - name: gppkgs - type: s3 - icon: amazon + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: gppkgs/intermediates/gpbackup-gppkgs.tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/gpbackup-gppkgs.tar.gz @@ -534,8 +512,11 @@ resources: icon: google source: bucket: gpbackup-release-licenses - json_key: ((dp/prod/gcp_svc_acct_key)) - regexp: open_source_license_VMware_Tanzu_Greenplum_Backup_and_Restore_(.*)_.*.txt + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: open_source_license_VMware_Greenplum_Backup_and_Restore_(.*)_.*.txt + +############################################## + jobs: - name: build_binaries @@ -548,75 +529,142 @@ jobs: - get: gpbackup trigger: true - get: pivnet_release_cache - - task: build-go-binaries - file: gpbackup/ci/tasks/build-go-binaries.yml - - put: gpbackup-go-components - params: - file: go_components/go_components.tar.gz + - get: centos6-gpdb5-image + - get: centos7-gpdb6-build-image + - get: rocky8-gpdb7-image + - get: ubuntu-debian-image + - in_parallel: + - do: # RHEL6 + - task: build-go-binaries-rhel6 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: centos6-gpdb5-image + params: + OS: RHEL6 + - put: gpbackup-go-components-rhel6 + params: + file: go_components/go_components.tar.gz + - do: # RHEL7 + - task: build-go-binaries-rhel7 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: centos7-gpdb6-build-image + params: + OS: RHEL7 + - put: gpbackup-go-components-rhel7 + params: + file: go_components/go_components.tar.gz + - do: # RHEL8 + - task: build-go-binaries-rhel8 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: rocky8-gpdb7-image + params: + OS: RHEL8 + - put: gpbackup-go-components-rhel8 + params: + file: go_components/go_components.tar.gz + - do: # UBUNTU + - task: build-go-binaries-ubuntu + file: gpbackup/ci/tasks/build-go-binaries.yml + image: ubuntu-debian-image + params: + OS: UBUNTU + - put: gpbackup-go-components-ubuntu + params: + file: go_components/go_components.tar.gz - name: build_gppkgs plan: - in_parallel: - - get: centos6-image - - get: sles11-image - - get: bin_gpdb_5x_sles11 - - get: libyaml-0.1.7 + - get: rocky8-gpdb6-image + - get: gpdb_src + resource: gpdb6_src - get: ubuntu-debian-image - get: ubuntu-debian-test-image - get: bin_gpdb_6x_stable_ubuntu - - get: gpdb_src - resource: gpdb6_src - - get: gpbackup-go-components + - get: gpbackup-go-components-ubuntu + - get: gpbackup-go-components-rhel6 + - get: gpbackup-go-components-rhel7 + - get: gpbackup-go-components-rhel8 trigger: true passed: [build_binaries] - - get: bin_gpdb_6x_stable_centos6 + - get: bin_gpdb_6x_rhel8 - get: gpbackup passed: [build_binaries] - get: gpbackup_ddboost_plugin - trigger: true - get: pivnet_release_cache - get: gpbackup-release-license - task: gpbackup-tools-versions - image: centos6-image + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/gpbackup-tools-versions.yml - in_parallel: - - do: # RHEL - - task: build-ddboost-RHEL - image: centos6-image + - do: # RHEL6 + - task: build-ddboost-RHEL6 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-ddboost.yml + input_mapping: + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL6 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-os-tars.yml + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel6 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel6 + - task: build_gppkgs-RHEL6 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-gppkg.yml + input_mapping: + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel6 + output_mapping: + gppkgs: rhel6-gppkg + params: + OS: RHEL6 + - do: # RHEL7 + - task: build-ddboost-RHEL7 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-ddboost.yml input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 - - task: tar-binaries-RHEL - image: centos6-image + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL7 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-os-tars.yml - - task: build_gppkgs-RHEL - image: centos6-image + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel7 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel7 + - task: build_gppkgs-RHEL7 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-gppkg.yml input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel7 output_mapping: - gppkgs: rhel-gppkg + gppkgs: rhel7-gppkg params: - OS: RHEL - - do: # SLES - - task: build-ddboost-SLES - image: sles11-image + OS: RHEL7 + - do: # RHEL8 + - task: build-ddboost-RHEL8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-ddboost.yml input_mapping: - bin_gpdb: bin_gpdb_5x_sles11 - - task: tar-binaries-SLES - image: sles11-image + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-os-tars.yml - - task: build_gppkgs-SLES - # We need to use centos image for doing rpm-build for sles - image: centos6-image + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel8 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel8 + - task: build_gppkgs-RHEL8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-gppkg.yml input_mapping: - bin_gpdb: bin_gpdb_5x_sles11 + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel8 output_mapping: - gppkgs: sles-gppkg + gppkgs: rhel8-gppkg params: - OS: SLES + OS: RHEL8 - do: # ubuntu - task: build-ddboost-ubuntu image: ubuntu-debian-image @@ -626,17 +674,22 @@ jobs: - task: tar-binaries-ubuntu image: ubuntu-debian-image file: gpbackup/ci/tasks/build-os-tars.yml + input_mapping: + gpbackup-go-components: gpbackup-go-components-ubuntu + output_mapping: + gpbackup_tar: gpbackup_tar_ubuntu - task: build_gppkgs-ubuntu image: ubuntu-debian-test-image file: gpbackup/ci/tasks/build-gppkg.yml input_mapping: bin_gpdb: bin_gpdb_6x_stable_ubuntu + gpbackup_tar: gpbackup_tar_ubuntu output_mapping: gppkgs: ubuntu-gppkg params: OS: ubuntu - task: tar-gppkgs - image: centos6-image + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/tar-gppkgs.yml - put: gppkgs params: @@ -645,15 +698,15 @@ jobs: - name: s3_plugin_tests plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb6-image - get: gpbackup_s3_plugin passed: [build_binaries] - get: gpbackup passed: [build_gppkgs] - get: gpdb_src - resource: gpdb5_src + resource: gpdb6_src - get: gpdb_binary - resource: bin_gpdb_5x_stable_centos6 + resource: bin_gpdb_6x_rhel8 - get: gppkgs passed: [build_gppkgs] trigger: true @@ -667,19 +720,21 @@ jobs: <<: *ccp_default_params vars: <<: *ccp_default_vars - PLATFORM: centos7 + PLATFORM: rocky8 - task: gen_cluster file: ccp_src/ci/tasks/gen_cluster.yml params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos7 + PLATFORM: rocky8 - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos7-image + image: rocky8-gpdb6-image + params: + default_ami_user: rocky file: gpbackup/ci/tasks/setup-centos-env.yml - task: run_tests - image: centos7-image + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/s3-plugin-tests.yml params: REGION: us-west-2 @@ -696,7 +751,7 @@ jobs: - name: s3_plugin_perf plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb7-image - get: gpbackup_s3_plugin passed: [build_binaries] - get: gpbackup @@ -718,7 +773,7 @@ jobs: <<: *ccp_default_params terraform_source: ccp_src/google-nvme-block-device/ vars: - PLATFORM: centos6 + PLATFORM: centos7 number_of_nodes: 4 segments_per_host: 4 instance_type: n1-standard-8 @@ -727,13 +782,14 @@ jobs: file: ccp_src/ci/tasks/gen_cluster.yml params: <<: *ccp_gen_cluster_default_params + PLATFORM: centos7 - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/setup-centos-env.yml - task: run_perf - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/s3-plugin-perf.yml params: REGION: us-west-2 @@ -751,7 +807,7 @@ jobs: - name: gpbackup-manager-tests plan: - in_parallel: - - get: centos7-image + - get: centos7-gpdb6-golang-image - get: gpbackup_manager_src passed: [build_binaries] - get: gpbackup @@ -764,62 +820,9 @@ jobs: trigger: true passed: [build_gppkgs] - task: run_tests - image: centos7-image + image: centos7-gpdb6-golang-image file: gpbackup/ci/tasks/test-gpbackup-manager.yml -- name: ddboost_plugin_and_boostfs_tests_43 - plan: - - in_parallel: - - get: gpdb_src - resource: gpdb5_src - - get: gpdb_binary - resource: bin_gpdb_43_stable - - get: pgcrypto43 - - get: centos6-image - - get: gpbackup_ddboost_plugin - passed: [build_gppkgs] - - get: gpbackup - passed: [build_gppkgs] - - get: boostfs_installer - - get: ccp_src - - get: gppkgs - passed: [build_gppkgs] - trigger: true - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - terraform_source: ccp_src/google-nvme-block-device/ - vars: - instance_type: n1-standard-8 - PLATFORM: centos6 - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: setup-centos-env - file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image - - task: install-boostfs - file: gpbackup/ci/tasks/boostfs-install.yml - image: centos6-image - params: - <<: *ddboost_params - - task: run_tests - image: centos6-image - file: gpbackup/ci/tasks/ddboost-plugin-tests.yml - params: - <<: *ddboost_params - on_success: - <<: *ccp_destroy_nvme - ensure: - <<: *set_failed - - name: ddboost_plugin_and_boostfs_tests_5x plan: - in_parallel: @@ -827,7 +830,7 @@ jobs: resource: gpdb5_src - get: gpdb_binary resource: bin_gpdb_5x_stable_centos6 - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup_ddboost_plugin passed: [build_gppkgs] - get: gpbackup @@ -846,24 +849,24 @@ jobs: terraform_source: ccp_src/google-nvme-block-device/ vars: instance_type: n1-standard-8 - PLATFORM: centos6 + PLATFORM: centos7 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 + PLATFORM: centos7 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image + image: rocky8-gpdb7-image - task: install-boostfs file: gpbackup/ci/tasks/boostfs-install.yml - image: centos6-image + image: rocky8-gpdb7-image params: <<: *ddboost_params - task: run_tests - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/ddboost-plugin-tests.yml params: <<: *ddboost_params @@ -878,10 +881,10 @@ jobs: - get: gpdb_src resource: gpdb6_src - get: gpdb_binary - resource: bin_gpdb_6x_stable_centos6 + resource: bin_gpdb_6x_rhel8 - get: dummy_seclabel resource: dummy_seclabel_linux_gpdb6 - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup_ddboost_plugin passed: [build_gppkgs] - get: gpbackup @@ -900,24 +903,27 @@ jobs: terraform_source: ccp_src/google-nvme-block-device/ vars: instance_type: n1-standard-8 - PLATFORM: centos6 + PLATFORM: rhel8 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 + PLATFORM: rhel8 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image + image: rocky8-gpdb7-image + params: + default_ami_user: rhel - task: install-boostfs file: gpbackup/ci/tasks/boostfs-install.yml - image: centos6-image + image: rocky8-gpdb7-image params: + default_ami_user: rhel <<: *ddboost_params - task: run_tests - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/ddboost-plugin-tests.yml params: <<: *ddboost_params @@ -929,7 +935,7 @@ jobs: - name: ddboost_plugin_perf plan: - in_parallel: - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup_ddboost_plugin passed: [build_gppkgs] - get: gpbackup @@ -939,7 +945,7 @@ jobs: - get: gpdb_src resource: gpdb6_src - get: gpdb_binary - resource: bin_gpdb_6x_stable_centos6 + resource: bin_gpdb_6x_stable_centos7 - get: gppkgs passed: [build_gppkgs] trigger: true @@ -954,19 +960,19 @@ jobs: instance_type: n1-standard-8 number_of_nodes: 4 segments_per_host: 4 - PLATFORM: centos6 + PLATFORM: centos7 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 + PLATFORM: centos7 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/setup-centos-env.yml - task: run_perf - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/ddboost-plugin-perf.yml params: <<: *ddboost_params @@ -980,50 +986,6 @@ jobs: ensure: <<: *set_failed -- name: GPDB5-sles11 - plan: - - in_parallel: - - get: sles11-image - - get: gpbackup - passed: [build_gppkgs] - - get: gpdb_binary - resource: bin_gpdb_5x_sles11 - trigger: true - - get: ccp_src - - get: gpdb_src - resource: gpdb5_src - - get: gppkgs - passed: [build_gppkgs] - trigger: true - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - vars: - <<: *ccp_default_vars - PLATFORM: sles12 - default_image_user: root - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - PLATFORM: sles12 - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: integration-tests - image: sles11-image - file: gpbackup/ci/tasks/gpbackup-tests.yml - params: - OS: SLES - on_success: - <<: *ccp_destroy - on_failure: - *slack_alert - ensure: - <<: *set_failed - - name: GPDB6-ubuntu plan: - in_parallel: @@ -1071,56 +1033,37 @@ jobs: ensure: <<: *set_failed -- name: master +- name: GPDB7 plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - - get: bin_gpdb_master_centos7 + - get: bin_gpdb_7x_rhel8 trigger: true - get: gpdb_src - resource: gpdb_master_src + resource: gpdb_main_src - get: dummy_seclabel - resource: dummy_seclabel_linux_master + resource: dummy_seclabel_linux_main - get: gppkgs trigger: true passed: [build_gppkgs] - - task: run-tests-locally-centos7 - image: centos7-image + - task: run-tests-locally-rocky8 + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true + OS: RHEL8 input_mapping: - bin_gpdb: bin_gpdb_master_centos7 - on_failure: - *slack_alert - -- name: GPDB4.3 - plan: - - in_parallel: - - get: centos6-image - - get: gpbackup - passed: [build_gppkgs] - - get: bin_gpdb - resource: bin_gpdb_43_stable - trigger: true - - get: gpdb_src - resource: gpdb5_src - - get: gppkgs - passed: [build_gppkgs] - trigger: true - - task: run-tests-locally-centos6 - image: centos6-image - file: gpbackup/ci/tasks/test-on-local-cluster.yml + bin_gpdb: bin_gpdb_7x_rhel8 on_failure: *slack_alert - name: GPDB5 plan: - in_parallel: - - get: centos6-image - - get: centos7-image + - get: centos6-gpdb5-image + - get: centos7-gpdb5-image - get: gpbackup passed: [build_gppkgs] - get: bin_gpdb_5x_stable_centos6 @@ -1133,25 +1076,30 @@ jobs: passed: [build_gppkgs] - in_parallel: - task: run-tests-locally-centos6 - image: centos6-image + image: centos6-gpdb5-image file: gpbackup/ci/tasks/test-on-local-cluster.yml input_mapping: bin_gpdb: bin_gpdb_5x_stable_centos6 + params: + OS: RHEL6 - task: run-tests-locally-centos7 - image: centos7-image + image: centos7-gpdb5-image file: gpbackup/ci/tasks/test-on-local-cluster.yml input_mapping: bin_gpdb: bin_gpdb_5x_stable_centos7 + params: + OS: RHEL7 on_failure: *slack_alert - name: GPDB6 plan: - in_parallel: - - get: centos6-image - - get: centos7-image + - get: centos7-gpdb6-golang-image + - get: rocky8-gpdb6-image - get: ubuntu-debian-test-image - get: bin_gpdb_6x_stable_ubuntu + - get: bin_gpdb_6x_rhel8 - get: gpbackup passed: [build_gppkgs] - get: bin_gpdb_6x_stable_centos6 @@ -1165,20 +1113,22 @@ jobs: trigger: true passed: [build_gppkgs] - in_parallel: - - task: run-tests-locally-centos6 - image: centos6-image + - task: run-tests-locally-centos7 + image: centos7-gpdb6-golang-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true + OS: RHEL7 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 - - task: run-tests-locally-centos7 - image: centos7-image + bin_gpdb: bin_gpdb_6x_stable_centos7 + - task: run-tests-locally-rocky8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true + OS: RHEL8 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos7 + bin_gpdb: bin_gpdb_6x_rhel8 - task: run-tests-locally-ubuntu-debian image: ubuntu-debian-test-image file: gpbackup/ci/tasks/test-on-local-cluster.yml @@ -1193,14 +1143,15 @@ jobs: - name: GPDB6-7-seg-cluster plan: - in_parallel: - - get: centos6-image - - get: centos7-image + - get: centos7-gpdb6-golang-image + - get: rocky8-gpdb6-image - get: ubuntu-debian-test-image - get: bin_gpdb_6x_stable_ubuntu - get: gpbackup passed: [build_gppkgs] - get: bin_gpdb_6x_stable_centos6 trigger: true + - get: bin_gpdb_6x_rhel8 - get: bin_gpdb_6x_stable_centos7 - get: gpdb_src resource: gpdb6_src @@ -1210,22 +1161,24 @@ jobs: trigger: true passed: [build_gppkgs] - in_parallel: - - task: run-tests-locally-centos6 - image: centos6-image + - task: run-tests-locally-centos7 + image: centos7-gpdb6-golang-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true LOCAL_CLUSTER_SIZE: 7 + OS: RHEL7 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 - - task: run-tests-locally-centos7 - image: centos7-image + bin_gpdb: bin_gpdb_6x_stable_centos7 + - task: run-tests-locally-rocky8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true LOCAL_CLUSTER_SIZE: 7 + OS: RHEL8 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos7 + bin_gpdb: bin_gpdb_6x_rhel8 - task: run-tests-locally-ubuntu-debian image: ubuntu-debian-test-image file: gpbackup/ci/tasks/test-on-local-cluster.yml @@ -1241,7 +1194,7 @@ jobs: - name: backward-compatibility plan: - in_parallel: - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - get: gpdb_binary @@ -1272,56 +1225,35 @@ jobs: file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image + image: rocky8-gpdb7-image - task: backup-1.0.0-restore-latest - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/all-tests.yml params: GPBACKUP_VERSION: "1.0.0" + OS: RHEL7 - task: backup-1.7.1-restore-latest - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/all-tests.yml params: GPBACKUP_VERSION: "1.7.1" + OS: RHEL7 on_success: <<: *ccp_destroy ensure: <<: *set_failed -# Ensure compatibility between a new GPDB5 binary and a fixed version of gpbackup -- name: 5X-head-gpbackup-fixed-test - plan: - - in_parallel: - - get: centos6-image - # NOTE: This is not being used, but is required input for the final_gate - - get: gppkgs - passed: [build_gppkgs] - trigger: true - - get: gpbackup - passed: [build_gppkgs] - - get: bin_gpdb - resource: bin_gpdb_5x_stable_intermediate - trigger: true - - get: gpdb_src - resource: gpdb5_src - - get: gpbackup_1.12.1_dependencies - - task: gpbackup-1.12.1-test - image: centos6-image - file: gpbackup/ci/tasks/integration-tests-fixed-version.yml - params: - GPBACKUP_VERSION: "1.12.1" - -- name: scale-master +- name: scale-GPDB7 plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - get: gpdb_binary - resource: bin_gpdb_master_centos7 + resource: bin_gpdb_7x_rhel8 - get: ccp_src - get: gpdb_src - resource: gpdb_master_src + resource: gpdb_main_src - get: scale_schema - get: nightly-trigger trigger: true @@ -1336,20 +1268,24 @@ jobs: terraform_source: ccp_src/google-nvme-block-device/ vars: instance_type: n1-standard-8 - PLATFORM: centos7 + PLATFORM: rhel8-gpdb7 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos7 + PLATFORM: rhel8-gpdb7 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos7-image + image: rocky8-gpdb7-image + params: + default_ami_user: rhel file: gpbackup/ci/tasks/setup-centos-env.yml - task: scale-tests - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/scale-tests.yml + params: + OS: RHEL8 on_success: <<: *ccp_destroy_nvme on_failure: @@ -1360,7 +1296,7 @@ jobs: - name: scale-6x plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - get: gpdb_binary @@ -1391,11 +1327,13 @@ jobs: - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/setup-centos-env.yml - task: scale-tests - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/scale-tests.yml + params: + OS: RHEL7 on_success: <<: *ccp_destroy_nvme on_failure: @@ -1406,7 +1344,7 @@ jobs: - name: scale-5x plan: - in_parallel: - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - get: gpdb_binary @@ -1435,55 +1373,13 @@ jobs: - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/setup-centos-env.yml - task: scale-tests - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/scale-tests.yml - on_success: - <<: *ccp_destroy_nvme - on_failure: - *slack_alert - ensure: - <<: *set_failed - -- name: scale-43 - plan: - - in_parallel: - - get: centos6-image - - get: gpbackup - passed: [build_gppkgs] - - get: gpdb_binary - resource: bin_gpdb_43_stable - - get: ccp_src - - get: gpdb_src - resource: gpdb5_src - - get: scale_schema - - get: nightly-trigger - trigger: true - - get: gppkgs - passed: [build_gppkgs] - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - terraform_source: ccp_src/google-nvme-block-device/ - vars: - instance_type: n1-standard-8 - - task: gen_cluster params: - <<: *ccp_gen_cluster_default_params - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: setup-centos-env - image: centos6-image - file: gpbackup/ci/tasks/setup-centos-env.yml - - task: scale-tests - image: centos6-image - file: gpbackup/ci/tasks/scale-tests.yml + OS: RHEL6 on_success: <<: *ccp_destroy_nvme on_failure: @@ -1499,16 +1395,12 @@ jobs: trigger: true passed: - build_gppkgs - - GPDB4.3 - GPDB5 - GPDB6 - - master + - GPDB7 - s3_plugin_tests - backward-compatibility - - 5X-head-gpbackup-fixed-test - - GPDB5-sles11 - GPDB6-ubuntu - - ddboost_plugin_and_boostfs_tests_43 - ddboost_plugin_and_boostfs_tests_5x - ddboost_plugin_and_boostfs_tests_6x - gpbackup-manager-tests diff --git a/ci/gpbackup-release-generated.yml b/ci/gpbackup-release-generated.yml index 327faee3d..bd0f9975d 100644 --- a/ci/gpbackup-release-generated.yml +++ b/ci/gpbackup-release-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpbackup-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -p gpbackup-release). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2022-11-02 09:43:34.479435 +## Generated by gen_pipeline.py at: 2023-04-11 14:51:34.961951 ## Template file: gpbackup-tpl.yml ## Pipeline Name: gpbackup-release ## Nightly Trigger: True @@ -24,15 +24,13 @@ groups: jobs: - build_binaries - build_gppkgs - - GPDB4.3 - GPDB5 - - GPDB5-sles11 - GPDB6-ubuntu - - ddboost_plugin_and_boostfs_tests_43 - ddboost_plugin_and_boostfs_tests_5x - gpbackup-manager-tests - GPDB6 - GPDB6-7-seg-cluster + - GPDB7 - s3_plugin_tests - backward-compatibility - ddboost_plugin_and_boostfs_tests_6x @@ -40,17 +38,9 @@ groups: - green_gate - push-to-pivnet -- name: GPDB4.3 - jobs: - - GPDB4.3 - - scale-43 - - ddboost_plugin_and_boostfs_tests_43 - - name: GPDB5 jobs: - GPDB5 - - GPDB5-sles11 - - scale-5x - ddboost_plugin_and_boostfs_tests_5x - s3_plugin_tests - backward-compatibility @@ -60,19 +50,14 @@ groups: - GPDB6 - GPDB6-7-seg-cluster - GPDB6-ubuntu - - scale-6x - ddboost_plugin_and_boostfs_tests_6x - s3_plugin_perf - ddboost_plugin_perf - -- name: Scale +- name: GPDB7 jobs: - - scale-43 - - scale-5x - - scale-6x - - s3_plugin_perf - - ddboost_plugin_perf + - GPDB7 + - name: Packaging and Release jobs: @@ -108,7 +93,7 @@ anchors: terraform_source: ccp_src/google/ env_name_file: terraform/name vars: - aws_instance-node-instance_type: t2.micro #t2.micro is ignored in destroy, but aws_instance-node-instance_type is required. + aws_instance-node-instance_type: t2.micro #t2.micro is ignored in destroy, but aws_instance-node-instance_is required. aws_ebs_volume_type: standard - &ccp_destroy @@ -209,28 +194,44 @@ resource_types: repository: pivotalcf/pivnet-resource tag: latest-final - +############################################## resources: ##### Docker Images ##### -- name: centos7-image +- name: centos6-gpdb5-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test + tag: latest + +- name: centos7-gpdb5-image type: registry-image source: repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test tag: latest -- name: centos6-image +- name: centos7-gpdb6-build-image type: registry-image source: - repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-build tag: latest -- name: sles11-image +- name: centos7-gpdb6-golang-image type: registry-image source: - repository: gcr.io/data-gpdb-private-images/gpdb5-sles11-build-test + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-test-golang + tag: latest + +- name: rocky8-gpdb6-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-rocky8-test + tag: latest + +- name: rocky8-gpdb7-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) - name: ubuntu-debian-image type: registry-image @@ -244,8 +245,9 @@ resources: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest -##### Other Resources ##### +############################################## +##### Source Code ##### # TODO mark these as src with name change - name: gpbackup type: git @@ -289,7 +291,6 @@ resources: branch: 5X_STABLE tag_filter: 5.* - - name: gpdb6_src type: git icon: github-circle @@ -298,72 +299,45 @@ resources: branch: 6X_STABLE tag_filter: 6.* -- name: bin_gpdb_5x_sles11 - type: s3 - icon: amazon +- name: gpdb_main_src + type: git + icon: github-circle source: - bucket: gpdb5-stable-concourse-builds - access_key_id: ((aws-bucket-access-key-id)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: bin_gpdb_sles11/gpdb_branch_5X_STABLE/icw_green/bin_gpdb.tar.gz + uri: https://github.com/greenplum-db/gpdb + branch: main + +- name: ccp_src + type: git + icon: github-circle + source: + branch: ((dp/ccp-git-branch)) + private_key: ((gp-concourse-cluster-provisioner-git-key)) + uri: ((dp/ccp-git-remote)) + +############################################## +##### Binaries ##### - name: bin_gpdb_6x_stable_ubuntu type: gcs icon: google source: - bucket: ((dp/prod/gcs-bucket)) + bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64((dp/prod/rc-build-type-gcs)).tar.gz - -# These binaries are used for backwards-compatibility testing only -- name: bin_gpbackup_1.0.0_and_1.7.1 - type: s3 - icon: amazon - source: - bucket: gpbackup-dependencies - versioned_file: gpbackup_bins_1.0.0_and_1.7.1.tar.gz - region_name: us-west-2 - access_key_id: ((aws-bucket-access-key-id)) - secret_access_key: ((aws-bucket-secret-access-key)) - -# This is specifically for sles11 images because it cannot connect to github due to TLS issues -- name: libyaml-0.1.7 - type: s3 - icon: amazon - source: - bucket: gpbackup-dependencies - versioned_file: gpbackup-dependencies/libyaml-0.1.7.tar.gz - region_name: us-west-2 - access_key_id: ((aws-bucket-access-key-id)) - secret_access_key: ((aws-bucket-secret-access-key)) - -- name: pgcrypto43 - type: s3 - icon: amazon - source: - bucket: pgcrypto43 - access_key_id: ((aws-bucket-access-key-id)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - regexp: pgcrypto-ossv1.1_pv(.*)_gpdb4.3orca-rhel5-x86_64.gppkg - - -- name: dummy_seclabel_linux_gpdb6 +- name: bin_gpdb_7x_rhel8 type: gcs icon: google source: - bucket: dummy_seclabel_gpdb_linux - json_key: ((dp/prod/gcp_svc_acct_key)) - regexp: dummy_seclabel_gpdb6-v(.*).so - + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz - name: bin_gpdb_5x_stable_centos6 type: s3 icon: amazon source: - bucket: gpdb-stable-concourse-builds + bucket: ((dp/prod/gpdb-stable-bucket-name)) versioned_file: release_candidates/bin_gpdb_centos6/gpdb5/bin_gpdb.tar.gz region_name: us-west-2 access_key_id: ((aws-bucket-access-key-id)) @@ -373,7 +347,7 @@ resources: type: s3 icon: amazon source: - bucket: gpdb-stable-concourse-builds + bucket: ((dp/prod/gpdb-stable-bucket-name)) versioned_file: release_candidates/bin_gpdb_centos7/gpdb5/bin_gpdb.tar.gz region_name: us-west-2 access_key_id: ((aws-bucket-access-key-id)) @@ -383,7 +357,7 @@ resources: type: gcs icon: google source: - bucket: ((dp/prod/gcs-bucket)) + bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64((dp/prod/rc-build-type-gcs)).tar.gz @@ -391,27 +365,47 @@ resources: type: gcs icon: google source: - bucket: ((dp/prod/gcs-bucket)) + bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64((dp/prod/rc-build-type-gcs)).tar.gz -- name: bin_gpdb_43_stable +- name: bin_gpdb_6x_rhel8 + type: gcs + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64((dp/prod/rc-build-type-gcs)).tar.gz + +# These binaries are used for backwards-compatibility testing only +- name: bin_gpbackup_1.0.0_and_1.7.1 type: s3 icon: amazon source: - bucket: gpdb-4.3-stable-concourse - versioned_file: bin_gpdb_centos/bin_gpdb.tar.gz + bucket: gpbackup-dependencies + versioned_file: gpbackup_bins_1.0.0_and_1.7.1.tar.gz region_name: us-west-2 - access_key_id: ((gpdb4-bucket-access-key-id)) - secret_access_key: ((gpdb4-bucket-secret-access-key)) + access_key_id: ((aws-bucket-access-key-id)) + secret_access_key: ((aws-bucket-secret-access-key)) -- name: ccp_src - type: git - icon: github-circle +############################################## + +##### Other Resources ##### + +- name: dummy_seclabel_linux_gpdb6 + type: gcs + icon: google source: - branch: ((dp/ccp-git-branch)) - private_key: ((gp-concourse-cluster-provisioner-git-key)) - uri: ((dp/ccp-git-remote)) + bucket: dummy_seclabel_gpdb_linux + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: dummy_seclabel_gpdb6-v(.*).so + +- name: dummy_seclabel_linux_main + type: gcs + icon: google + source: + bucket: dummy_seclabel_gpdb_linux + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: dummy_seclabel_gpdb7-v(.*).so - name: terraform <<: *terraform_cluster @@ -426,15 +420,6 @@ resources: secret_access_key: ((aws-bucket-secret-access-key)) versioned_file: plugin-cache-all.tgz -- name: scale_schema - type: s3 - icon: amazon - source: - access_key_id: ((gpdb4-bucket-access-key-id)) - bucket: ((dp/bucket)) - region_name: ((dp/aws-region)) - secret_access_key: ((gpdb4-bucket-secret-access-key)) - versioned_file: scale-schemas/scale_db1.tgz - name: boostfs_installer type: s3 @@ -462,55 +447,173 @@ resources: secret_access_key: ((aws-bucket-secret-access-key)) regexp: pivnet_release_version/v-(.*) -- name: github_release_components - type: s3 - icon: amazon +- name: release_components_rhel6 + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - regexp: components/gpbackup/intermediates/gpbackup-(.*).tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: gpbackup/intermediates/rhel6/gpbackup-(.*).tar.gz -- name: github_release_components_rc - type: s3 - icon: amazon +- name: release_components_rhel7 + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - regexp: components/gpbackup/gpbackup-(.*).tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: gpbackup/intermediates/rhel7/gpbackup-(.*).tar.gz -- name: gppkgs_rc - type: s3 - icon: amazon +- name: release_components_rhel8 + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: gppkgs/gpbackup-gppkgs.tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: gpbackup/intermediates/rhel8/gpbackup-(.*).tar.gz -- name: gpbackup-go-components - type: s3 - icon: amazon +- name: release_components_ubuntu + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: gpbackup-go-components/go_components.tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: gpbackup/intermediates/ubuntu/gpbackup-(.*).tar.gz + +- name: gppkgs_release_versioned + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/release-candidates/gpbackup-gppkgs.tar.gz + +- name: final_published_file_gppkg_gp7_rhel8 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel8/greenplum_backup_restore-(.*)-gp7-rhel8-x86_64.gppkg + +- name: final_published_file_gppkg_gp6_rhel8 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel8/greenplum_backup_restore-(.*)-gp6-rhel8-x86_64.gppkg + +- name: final_published_file_gppkg_gp6_rhel7 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel7/greenplum_backup_restore-(.*)-gp6-rhel7-x86_64.gppkg + +- name: final_published_file_gppkg_gp6_rhel6 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel6/greenplum_backup_restore-(.*)-gp6-rhel6-x86_64.gppkg + +- name: final_published_file_gppkg_gp6_ubuntu + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/ubuntu/greenplum_backup_restore-(.*)-gp6-ubuntu-amd64.gppkg + +- name: final_published_file_gppkg_gp5_rhel7 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel7/greenplum_backup_restore-(.*)-gp5-rhel7-x86_64.gppkg + +- name: final_published_file_gppkg_gp5_rhel6 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel6/greenplum_backup_restore-(.*)-gp5-rhel6-x86_64.gppkg + +- name: final_published_file_tar_rhel8 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel8/greenplum_backup_restore-(.*)-rhel8.tar.gz + +- name: final_published_file_tar_rhel7 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel7/greenplum_backup_restore-(.*)-rhel7.tar.gz + +- name: final_published_file_tar_rhel6 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/rhel6/greenplum_backup_restore-(.*)-rhel6.tar.gz + +- name: final_published_file_tar_ubuntu + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((data-gpdb-ci-service-account-key)) + regexp: gpbackup/published/ubuntu/greenplum_backup_restore-(.*)-ubuntu.tar.gz + +- name: gpbackup-go-components-rhel6 + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel6/go_components.tar.gz + +- name: gpbackup-go-components-rhel7 + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel7/go_components.tar.gz + +- name: gpbackup-go-components-rhel8 + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel8/go_components.tar.gz + +- name: gpbackup-go-components-ubuntu + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/ubuntu/go_components.tar.gz - name: gppkgs - type: s3 - icon: amazon + type: gcs + icon: google source: - access_key_id: ((aws-bucket-access-key-id)) - bucket: ((dp/prod/gpdb-stable-bucket-name)) - region_name: ((dp/aws-region)) - secret_access_key: ((aws-bucket-secret-access-key)) - versioned_file: gppkgs/intermediates_release/gpbackup-gppkgs.tar.gz + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates_release/gpbackup-gppkgs.tar.gz - name: gpbackup_github_release type: github-release @@ -520,26 +623,24 @@ resources: access_token: ((dp/prod/gpbackup-git-access-token)) release: true -- name: pivnet-upload - type: pivnet +- name: tanzunet_client + type: github-release source: - api_token: ((dp/prod/pivnet_api_token)) - product_slug: pivotal-gpdb-backup-restore - endpoint: "https://network.tanzu.vmware.com" - bucket: gpbackup-pivotal-network - access_key_id: ((dp/prod/pivnet_bucket_access_key_id)) - secret_access_key: ((dp/prod/pivnet_bucket_secret_access_key)) - region: us-west-2 - copy_metadata: true - sort_by: semver + owner: pivotal + repository: gp-tanzunet-client + access_token: ((dp/prod/gpbackup-git-access-token)) + tag_filter: v4.8.1 - name: gpbackup-release-license type: gcs icon: google source: bucket: gpbackup-release-licenses - json_key: ((dp/prod/gcp_svc_acct_key)) - regexp: open_source_license_VMware_Tanzu_Greenplum_Backup_and_Restore_(.*)_.*.txt + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: open_source_license_VMware_Greenplum_Backup_and_Restore_(.*)_.*.txt + +############################################## + jobs: - name: build_binaries @@ -549,74 +650,142 @@ jobs: - get: gpbackup_manager_src - get: gpbackup - get: pivnet_release_cache - - task: build-go-binaries - file: gpbackup/ci/tasks/build-go-binaries.yml - - put: gpbackup-go-components - params: - file: go_components/go_components.tar.gz + - get: centos6-gpdb5-image + - get: centos7-gpdb6-build-image + - get: rocky8-gpdb7-image + - get: ubuntu-debian-image + - in_parallel: + - do: # RHEL6 + - task: build-go-binaries-rhel6 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: centos6-gpdb5-image + params: + OS: RHEL6 + - put: gpbackup-go-components-rhel6 + params: + file: go_components/go_components.tar.gz + - do: # RHEL7 + - task: build-go-binaries-rhel7 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: centos7-gpdb6-build-image + params: + OS: RHEL7 + - put: gpbackup-go-components-rhel7 + params: + file: go_components/go_components.tar.gz + - do: # RHEL8 + - task: build-go-binaries-rhel8 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: rocky8-gpdb7-image + params: + OS: RHEL8 + - put: gpbackup-go-components-rhel8 + params: + file: go_components/go_components.tar.gz + - do: # UBUNTU + - task: build-go-binaries-ubuntu + file: gpbackup/ci/tasks/build-go-binaries.yml + image: ubuntu-debian-image + params: + OS: UBUNTU + - put: gpbackup-go-components-ubuntu + params: + file: go_components/go_components.tar.gz - name: build_gppkgs plan: - in_parallel: - - get: centos6-image - - get: sles11-image - - get: bin_gpdb_5x_sles11 - - get: libyaml-0.1.7 + - get: rocky8-gpdb6-image + - get: gpdb_src + resource: gpdb6_src - get: ubuntu-debian-image - get: ubuntu-debian-test-image - get: bin_gpdb_6x_stable_ubuntu - - get: gpdb_src - resource: gpdb6_src - - get: gpbackup-go-components + - get: gpbackup-go-components-ubuntu + - get: gpbackup-go-components-rhel6 + - get: gpbackup-go-components-rhel7 + - get: gpbackup-go-components-rhel8 trigger: true passed: [build_binaries] - - get: bin_gpdb_6x_stable_centos6 + - get: bin_gpdb_6x_rhel8 - get: gpbackup passed: [build_binaries] - get: gpbackup_ddboost_plugin - get: pivnet_release_cache - get: gpbackup-release-license - task: gpbackup-tools-versions - image: centos6-image + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/gpbackup-tools-versions.yml - in_parallel: - - do: # RHEL - - task: build-ddboost-RHEL - image: centos6-image + - do: # RHEL6 + - task: build-ddboost-RHEL6 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-ddboost.yml input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 - - task: tar-binaries-RHEL - image: centos6-image + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL6 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-os-tars.yml - - task: build_gppkgs-RHEL - image: centos6-image + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel6 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel6 + - task: build_gppkgs-RHEL6 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-gppkg.yml + input_mapping: + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel6 + output_mapping: + gppkgs: rhel6-gppkg + params: + OS: RHEL6 + - do: # RHEL7 + - task: build-ddboost-RHEL7 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-ddboost.yml + input_mapping: + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL7 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-os-tars.yml + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel7 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel7 + - task: build_gppkgs-RHEL7 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-gppkg.yml input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel7 output_mapping: - gppkgs: rhel-gppkg + gppkgs: rhel7-gppkg params: - OS: RHEL - - do: # SLES - - task: build-ddboost-SLES - image: sles11-image + OS: RHEL7 + - do: # RHEL8 + - task: build-ddboost-RHEL8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-ddboost.yml input_mapping: - bin_gpdb: bin_gpdb_5x_sles11 - - task: tar-binaries-SLES - image: sles11-image + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-os-tars.yml - - task: build_gppkgs-SLES - # We need to use centos image for doing rpm-build for sles - image: centos6-image + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel8 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel8 + - task: build_gppkgs-RHEL8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/build-gppkg.yml input_mapping: - bin_gpdb: bin_gpdb_5x_sles11 + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel8 output_mapping: - gppkgs: sles-gppkg + gppkgs: rhel8-gppkg params: - OS: SLES + OS: RHEL8 - do: # ubuntu - task: build-ddboost-ubuntu image: ubuntu-debian-image @@ -626,37 +795,52 @@ jobs: - task: tar-binaries-ubuntu image: ubuntu-debian-image file: gpbackup/ci/tasks/build-os-tars.yml + input_mapping: + gpbackup-go-components: gpbackup-go-components-ubuntu + output_mapping: + gpbackup_tar: gpbackup_tar_ubuntu - task: build_gppkgs-ubuntu image: ubuntu-debian-test-image file: gpbackup/ci/tasks/build-gppkg.yml input_mapping: bin_gpdb: bin_gpdb_6x_stable_ubuntu + gpbackup_tar: gpbackup_tar_ubuntu output_mapping: gppkgs: ubuntu-gppkg params: OS: ubuntu - task: tar-gppkgs - image: centos6-image + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/tar-gppkgs.yml - put: gppkgs params: file: gppkgs/gpbackup-gppkgs.tar.gz - - put: github_release_components - params: - file: gpbackup_tar/gpbackup-*.tar.gz + - in_parallel: + - put: release_components_rhel6 + params: + file: gpbackup_tar_rhel6/gpbackup-*.tar.gz + - put: release_components_rhel7 + params: + file: gpbackup_tar_rhel7/gpbackup-*.tar.gz + - put: release_components_rhel8 + params: + file: gpbackup_tar_rhel8/gpbackup-*.tar.gz + - put: release_components_ubuntu + params: + file: gpbackup_tar_ubuntu/gpbackup-*.tar.gz - name: s3_plugin_tests plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb6-image - get: gpbackup_s3_plugin passed: [build_binaries] - get: gpbackup passed: [build_gppkgs] - get: gpdb_src - resource: gpdb5_src + resource: gpdb6_src - get: gpdb_binary - resource: bin_gpdb_5x_stable_centos6 + resource: bin_gpdb_6x_rhel8 - get: gppkgs passed: [build_gppkgs] trigger: true @@ -670,19 +854,21 @@ jobs: <<: *ccp_default_params vars: <<: *ccp_default_vars - PLATFORM: centos7 + PLATFORM: rocky8 - task: gen_cluster file: ccp_src/ci/tasks/gen_cluster.yml params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos7 + PLATFORM: rocky8 - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos7-image + image: rocky8-gpdb6-image + params: + default_ami_user: rocky file: gpbackup/ci/tasks/setup-centos-env.yml - task: run_tests - image: centos7-image + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/s3-plugin-tests.yml params: REGION: us-west-2 @@ -699,7 +885,7 @@ jobs: - name: s3_plugin_perf plan: - in_parallel: - - get: centos7-image + - get: rocky8-gpdb7-image - get: gpbackup_s3_plugin passed: [build_binaries] - get: gpbackup @@ -721,7 +907,7 @@ jobs: <<: *ccp_default_params terraform_source: ccp_src/google-nvme-block-device/ vars: - PLATFORM: centos6 + PLATFORM: centos7 number_of_nodes: 4 segments_per_host: 4 instance_type: n1-standard-8 @@ -730,13 +916,14 @@ jobs: file: ccp_src/ci/tasks/gen_cluster.yml params: <<: *ccp_gen_cluster_default_params + PLATFORM: centos7 - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/setup-centos-env.yml - task: run_perf - image: centos7-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/s3-plugin-perf.yml params: REGION: us-west-2 @@ -754,7 +941,7 @@ jobs: - name: gpbackup-manager-tests plan: - in_parallel: - - get: centos7-image + - get: centos7-gpdb6-golang-image - get: gpbackup_manager_src passed: [build_binaries] - get: gpbackup @@ -767,62 +954,9 @@ jobs: trigger: true passed: [build_gppkgs] - task: run_tests - image: centos7-image + image: centos7-gpdb6-golang-image file: gpbackup/ci/tasks/test-gpbackup-manager.yml -- name: ddboost_plugin_and_boostfs_tests_43 - plan: - - in_parallel: - - get: gpdb_src - resource: gpdb5_src - - get: gpdb_binary - resource: bin_gpdb_43_stable - - get: pgcrypto43 - - get: centos6-image - - get: gpbackup_ddboost_plugin - passed: [build_gppkgs] - - get: gpbackup - passed: [build_gppkgs] - - get: boostfs_installer - - get: ccp_src - - get: gppkgs - passed: [build_gppkgs] - trigger: true - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - terraform_source: ccp_src/google-nvme-block-device/ - vars: - instance_type: n1-standard-8 - PLATFORM: centos6 - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: setup-centos-env - file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image - - task: install-boostfs - file: gpbackup/ci/tasks/boostfs-install.yml - image: centos6-image - params: - <<: *ddboost_params - - task: run_tests - image: centos6-image - file: gpbackup/ci/tasks/ddboost-plugin-tests.yml - params: - <<: *ddboost_params - on_success: - <<: *ccp_destroy_nvme - ensure: - <<: *set_failed - - name: ddboost_plugin_and_boostfs_tests_5x plan: - in_parallel: @@ -830,7 +964,7 @@ jobs: resource: gpdb5_src - get: gpdb_binary resource: bin_gpdb_5x_stable_centos6 - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup_ddboost_plugin passed: [build_gppkgs] - get: gpbackup @@ -849,24 +983,24 @@ jobs: terraform_source: ccp_src/google-nvme-block-device/ vars: instance_type: n1-standard-8 - PLATFORM: centos6 + PLATFORM: centos7 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 + PLATFORM: centos7 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image + image: rocky8-gpdb7-image - task: install-boostfs file: gpbackup/ci/tasks/boostfs-install.yml - image: centos6-image + image: rocky8-gpdb7-image params: <<: *ddboost_params - task: run_tests - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/ddboost-plugin-tests.yml params: <<: *ddboost_params @@ -881,10 +1015,10 @@ jobs: - get: gpdb_src resource: gpdb6_src - get: gpdb_binary - resource: bin_gpdb_6x_stable_centos6 + resource: bin_gpdb_6x_rhel8 - get: dummy_seclabel resource: dummy_seclabel_linux_gpdb6 - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup_ddboost_plugin passed: [build_gppkgs] - get: gpbackup @@ -903,24 +1037,27 @@ jobs: terraform_source: ccp_src/google-nvme-block-device/ vars: instance_type: n1-standard-8 - PLATFORM: centos6 + PLATFORM: rhel8 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 + PLATFORM: rhel8 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image + image: rocky8-gpdb7-image + params: + default_ami_user: rhel - task: install-boostfs file: gpbackup/ci/tasks/boostfs-install.yml - image: centos6-image + image: rocky8-gpdb7-image params: + default_ami_user: rhel <<: *ddboost_params - task: run_tests - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/ddboost-plugin-tests.yml params: <<: *ddboost_params @@ -932,7 +1069,7 @@ jobs: - name: ddboost_plugin_perf plan: - in_parallel: - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup_ddboost_plugin passed: [build_gppkgs] - get: gpbackup @@ -942,7 +1079,7 @@ jobs: - get: gpdb_src resource: gpdb6_src - get: gpdb_binary - resource: bin_gpdb_6x_stable_centos6 + resource: bin_gpdb_6x_stable_centos7 - get: gppkgs passed: [build_gppkgs] trigger: true @@ -957,19 +1094,19 @@ jobs: instance_type: n1-standard-8 number_of_nodes: 4 segments_per_host: 4 - PLATFORM: centos6 + PLATFORM: centos7 - task: gen_cluster params: <<: *ccp_gen_cluster_default_params - PLATFORM: centos6 + PLATFORM: centos7 file: ccp_src/ci/tasks/gen_cluster.yml - task: gpinitsystem file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/setup-centos-env.yml - task: run_perf - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/ddboost-plugin-perf.yml params: <<: *ddboost_params @@ -983,49 +1120,6 @@ jobs: ensure: <<: *set_failed -- name: GPDB5-sles11 - plan: - - in_parallel: - - get: sles11-image - - get: gpbackup - passed: [build_gppkgs] - - get: gpdb_binary - resource: bin_gpdb_5x_sles11 - - get: ccp_src - - get: gpdb_src - resource: gpdb5_src - - get: gppkgs - passed: [build_gppkgs] - trigger: true - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - vars: - <<: *ccp_default_vars - PLATFORM: sles12 - default_image_user: root - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - PLATFORM: sles12 - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: integration-tests - image: sles11-image - file: gpbackup/ci/tasks/gpbackup-tests.yml - params: - OS: SLES - on_success: - <<: *ccp_destroy - on_failure: - *slack_alert - ensure: - <<: *set_failed - - name: GPDB6-ubuntu plan: - in_parallel: @@ -1072,31 +1166,37 @@ jobs: ensure: <<: *set_failed - -- name: GPDB4.3 +- name: GPDB7 plan: - in_parallel: - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - - get: bin_gpdb - resource: bin_gpdb_43_stable + - get: bin_gpdb_7x_rhel8 + trigger: true - get: gpdb_src - resource: gpdb5_src + resource: gpdb_main_src + - get: dummy_seclabel + resource: dummy_seclabel_linux_main - get: gppkgs - passed: [build_gppkgs] trigger: true - - task: run-tests-locally-centos6 - image: centos6-image + passed: [build_gppkgs] + - task: run-tests-locally-rocky8 + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/test-on-local-cluster.yml + params: + REQUIRES_DUMMY_SEC: true + OS: RHEL8 + input_mapping: + bin_gpdb: bin_gpdb_7x_rhel8 on_failure: *slack_alert - name: GPDB5 plan: - in_parallel: - - get: centos6-image - - get: centos7-image + - get: centos6-gpdb5-image + - get: centos7-gpdb5-image - get: gpbackup passed: [build_gppkgs] - get: bin_gpdb_5x_stable_centos6 @@ -1108,25 +1208,30 @@ jobs: passed: [build_gppkgs] - in_parallel: - task: run-tests-locally-centos6 - image: centos6-image + image: centos6-gpdb5-image file: gpbackup/ci/tasks/test-on-local-cluster.yml input_mapping: bin_gpdb: bin_gpdb_5x_stable_centos6 + params: + OS: RHEL6 - task: run-tests-locally-centos7 - image: centos7-image + image: centos7-gpdb5-image file: gpbackup/ci/tasks/test-on-local-cluster.yml input_mapping: bin_gpdb: bin_gpdb_5x_stable_centos7 + params: + OS: RHEL7 on_failure: *slack_alert - name: GPDB6 plan: - in_parallel: - - get: centos6-image - - get: centos7-image + - get: centos7-gpdb6-golang-image + - get: rocky8-gpdb6-image - get: ubuntu-debian-test-image - get: bin_gpdb_6x_stable_ubuntu + - get: bin_gpdb_6x_rhel8 - get: gpbackup passed: [build_gppkgs] - get: bin_gpdb_6x_stable_centos6 @@ -1139,20 +1244,22 @@ jobs: trigger: true passed: [build_gppkgs] - in_parallel: - - task: run-tests-locally-centos6 - image: centos6-image + - task: run-tests-locally-centos7 + image: centos7-gpdb6-golang-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true + OS: RHEL7 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 - - task: run-tests-locally-centos7 - image: centos7-image + bin_gpdb: bin_gpdb_6x_stable_centos7 + - task: run-tests-locally-rocky8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true + OS: RHEL8 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos7 + bin_gpdb: bin_gpdb_6x_rhel8 - task: run-tests-locally-ubuntu-debian image: ubuntu-debian-test-image file: gpbackup/ci/tasks/test-on-local-cluster.yml @@ -1167,13 +1274,14 @@ jobs: - name: GPDB6-7-seg-cluster plan: - in_parallel: - - get: centos6-image - - get: centos7-image + - get: centos7-gpdb6-golang-image + - get: rocky8-gpdb6-image - get: ubuntu-debian-test-image - get: bin_gpdb_6x_stable_ubuntu - get: gpbackup passed: [build_gppkgs] - get: bin_gpdb_6x_stable_centos6 + - get: bin_gpdb_6x_rhel8 - get: bin_gpdb_6x_stable_centos7 - get: gpdb_src resource: gpdb6_src @@ -1183,22 +1291,24 @@ jobs: trigger: true passed: [build_gppkgs] - in_parallel: - - task: run-tests-locally-centos6 - image: centos6-image + - task: run-tests-locally-centos7 + image: centos7-gpdb6-golang-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true LOCAL_CLUSTER_SIZE: 7 + OS: RHEL7 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos6 - - task: run-tests-locally-centos7 - image: centos7-image + bin_gpdb: bin_gpdb_6x_stable_centos7 + - task: run-tests-locally-rocky8 + image: rocky8-gpdb6-image file: gpbackup/ci/tasks/test-on-local-cluster.yml params: REQUIRES_DUMMY_SEC: true LOCAL_CLUSTER_SIZE: 7 + OS: RHEL8 input_mapping: - bin_gpdb: bin_gpdb_6x_stable_centos7 + bin_gpdb: bin_gpdb_6x_rhel8 - task: run-tests-locally-ubuntu-debian image: ubuntu-debian-test-image file: gpbackup/ci/tasks/test-on-local-cluster.yml @@ -1214,7 +1324,7 @@ jobs: - name: backward-compatibility plan: - in_parallel: - - get: centos6-image + - get: rocky8-gpdb7-image - get: gpbackup passed: [build_gppkgs] - get: gpdb_binary @@ -1244,234 +1354,133 @@ jobs: file: ccp_src/ci/tasks/gpinitsystem.yml - task: setup-centos-env file: gpbackup/ci/tasks/setup-centos-env.yml - image: centos6-image + image: rocky8-gpdb7-image - task: backup-1.0.0-restore-latest - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/all-tests.yml params: GPBACKUP_VERSION: "1.0.0" + OS: RHEL7 - task: backup-1.7.1-restore-latest - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/all-tests.yml params: GPBACKUP_VERSION: "1.7.1" + OS: RHEL7 on_success: <<: *ccp_destroy ensure: <<: *set_failed - -- name: scale-6x - plan: - - in_parallel: - - get: centos7-image - - get: gpbackup - passed: [build_gppkgs] - - get: gpdb_binary - resource: bin_gpdb_6x_stable_centos7 - - get: ccp_src - - get: gpdb_src - resource: gpdb6_src - - get: scale_schema - - get: gppkgs - passed: [build_gppkgs] - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - terraform_source: ccp_src/google-nvme-block-device/ - vars: - instance_type: n1-standard-8 - PLATFORM: centos7 - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - PLATFORM: centos7 - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: setup-centos-env - image: centos7-image - file: gpbackup/ci/tasks/setup-centos-env.yml - - task: scale-tests - image: centos7-image - file: gpbackup/ci/tasks/scale-tests.yml - on_success: - <<: *ccp_destroy_nvme - on_failure: - *slack_alert - ensure: - <<: *set_failed - -- name: scale-5x - plan: - - in_parallel: - - get: centos6-image - - get: gpbackup - passed: [build_gppkgs] - - get: gpdb_binary - resource: bin_gpdb_5x_stable_centos6 - - get: ccp_src - - get: gpdb_src - resource: gpdb5_src - - get: scale_schema - - get: gppkgs - passed: [build_gppkgs] - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - terraform_source: ccp_src/google-nvme-block-device/ - vars: - instance_type: n1-standard-8 - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: setup-centos-env - image: centos6-image - file: gpbackup/ci/tasks/setup-centos-env.yml - - task: scale-tests - image: centos6-image - file: gpbackup/ci/tasks/scale-tests.yml - on_success: - <<: *ccp_destroy_nvme - on_failure: - *slack_alert - ensure: - <<: *set_failed - -- name: scale-43 - plan: - - in_parallel: - - get: centos6-image - - get: gpbackup - passed: [build_gppkgs] - - get: gpdb_binary - resource: bin_gpdb_43_stable - - get: ccp_src - - get: gpdb_src - resource: gpdb5_src - - get: scale_schema - - get: gppkgs - passed: [build_gppkgs] - - get: terraform.d - params: - unpack: true - - put: terraform - params: - <<: *ccp_default_params - terraform_source: ccp_src/google-nvme-block-device/ - vars: - instance_type: n1-standard-8 - - task: gen_cluster - params: - <<: *ccp_gen_cluster_default_params - file: ccp_src/ci/tasks/gen_cluster.yml - - task: gpinitsystem - file: ccp_src/ci/tasks/gpinitsystem.yml - - task: setup-centos-env - image: centos6-image - file: gpbackup/ci/tasks/setup-centos-env.yml - - task: scale-tests - image: centos6-image - file: gpbackup/ci/tasks/scale-tests.yml - on_success: - <<: *ccp_destroy_nvme - on_failure: - *slack_alert - ensure: - <<: *set_failed - - name: green_gate plan: - in_parallel: - - get: github_release_components - passed: [build_gppkgs] - get: gppkgs trigger: true passed: - build_gppkgs - - GPDB4.3 - GPDB5 - - GPDB5-sles11 - GPDB6-ubuntu - GPDB6 - s3_plugin_tests - backward-compatibility - - ddboost_plugin_and_boostfs_tests_43 - ddboost_plugin_and_boostfs_tests_5x - ddboost_plugin_and_boostfs_tests_6x - gpbackup-manager-tests - get: gpbackup passed: [build_gppkgs] - - in_parallel: - - put: gppkgs_rc + - put: gppkgs_release_versioned params: file: gppkgs/gpbackup-gppkgs.tar.gz - - put: github_release_components_rc - params: - file: github_release_components/gpbackup-*.tar.gz - name: push-to-github plan: - - get: centos6-image - - get: github_release_components_rc + - get: rocky8-gpdb7-image + - get: release_components_rhel6 + passed: [build_gppkgs] + - get: release_components_rhel7 + passed: [build_gppkgs] + - get: release_components_rhel8 + passed: [build_gppkgs] + - get: release_components_ubuntu + passed: [build_gppkgs] + - get: gpbackup passed: [green_gate] - task: prepare-for-release - image: centos6-image - config: - platform: linux - inputs: - - name: github_release_components_rc - outputs: - - name: github_release_components_untarred - run: - path: "sh" - args: - - -exc - - | - set -x - tar xvzf github_release_components_rc/*.gz -C github_release_components_untarred - tar xvzf github_release_components_untarred/bin_gpbackup.tar.gz -C github_release_components_untarred + image: rocky8-gpdb7-image + file: gpbackup/ci/tasks/prepare_release.yml - put: gpbackup_github_release params: - name: github_release_components_untarred/gpbackup_version - tag: github_release_components_untarred/gpbackup_version + name: components_untarred_rhel8/gpbackup_version + tag: components_untarred_rhel8/gpbackup_version globs: - - github_release_components_untarred/bin/gpbackup - - github_release_components_untarred/bin/gprestore - - github_release_components_untarred/bin/gpbackup_helper + - components_untarred_rhel6/output/gpbackup_binaries_rhel6.tar.gz + - components_untarred_rhel7/output/gpbackup_binaries_rhel7.tar.gz + - components_untarred_rhel8/output/gpbackup_binaries_rhel8.tar.gz + - components_untarred_ubuntu/output/gpbackup_binaries_ubuntu.tar.gz + - name: push-to-pivnet plan: - in_parallel: - get: gppkgs - resource: gppkgs_rc - passed: [green_gate] - - get: github_release_components_rc + resource: gppkgs_release_versioned passed: [green_gate] + - get: release_components_rhel6 + passed: [build_gppkgs] + - get: release_components_rhel7 + passed: [build_gppkgs] + - get: release_components_rhel8 + passed: [build_gppkgs] + - get: release_components_ubuntu + passed: [build_gppkgs] - get: gpbackup passed: [green_gate] - get: gpbackup-release-license - get: pivnet_release_cache - - get: centos6-image + - get: rocky8-gpdb7-image + - get: tanzunet_client - task: update metadata.yml - image: centos6-image + image: rocky8-gpdb7-image file: gpbackup/ci/tasks/update-metadata.yml - - put: pivnet-upload params: - metadata_file: workspace/metadata.yml - file_glob: workspace/files-to-upload/* - s3_filepath_prefix: ((dp/prod/pivnet_s3_filepath_prefix)) + TANZUNET_REFRESH_TOKEN: ((releng/tanzunet-refresh-token)) - put: pivnet_release_cache params: file: workspace/v-* + - in_parallel: + - put: final_published_file_gppkg_gp7_rhel8 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp7-rhel8-x86_64.gppkg + - put: final_published_file_gppkg_gp6_rhel8 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp6-rhel8-x86_64.gppkg + - put: final_published_file_gppkg_gp6_rhel7 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp6-rhel7-x86_64.gppkg + - put: final_published_file_gppkg_gp6_rhel6 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp6-rhel6-x86_64.gppkg + - put: final_published_file_gppkg_gp6_ubuntu + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp6-ubuntu-amd64.gppkg + - put: final_published_file_gppkg_gp5_rhel7 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp5-rhel7-x86_64.gppkg + - put: final_published_file_gppkg_gp5_rhel6 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-gp5-rhel6-x86_64.gppkg + - put: final_published_file_tar_rhel8 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-rhel8.tar.gz + - put: final_published_file_tar_rhel7 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-rhel7.tar.gz + - put: final_published_file_tar_rhel6 + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-rhel6.tar.gz + - put: final_published_file_tar_ubuntu + params: + file: workspace/files-to-upload/greenplum_backup_restore-*-ubuntu.tar.gz diff --git a/ci/pivnet_release/metadata.yml b/ci/pivnet_release/metadata.yml index 819d4116f..57ad1ebf9 100644 --- a/ci/pivnet_release/metadata.yml +++ b/ci/pivnet_release/metadata.yml @@ -3,7 +3,7 @@ release: release_type: eula_slug: vmware_general_terms description: | - The VMware Tanzu™ Greenplum® Backup and Restore release contains the following versions of binaries for use with the VMware Tanzu™ Greenplum® Server: + The VMware Greenplum® Backup and Restore release contains the following versions of binaries for use with the VMware Greenplum® Server: - gpbackup - gpbackup_helper - gprestore @@ -17,58 +17,89 @@ release: license_exception: NLR end_of_support_date: -product_files: -########## SLES Gppkgs ########## - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp6-sles-x86_64.gppkg - description: - upload_as: Backup and Restore for GP 6 on SLES - file_type: Software - file_version: - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp5-sles-x86_64.gppkg - description: - upload_as: Backup and Restore for GP 5 on SLES - file_type: Software - file_version: - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp4.3orca-sles-x86_64.gppkg - description: - upload_as: Backup and Restore for GP 4 on SLES - file_type: Software - file_version: - -########## RHEL Gppkgs ########## - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp6-rhel-x86_64.gppkg - description: - upload_as: Backup and Restore for GP 6 on RHEL - file_type: Software - file_version: - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp5-rhel-x86_64.gppkg - description: - upload_as: Backup and Restore for GP 5 on RHEL - file_type: Software - file_version: - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp4.3orca-rhel-x86_64.gppkg - description: - upload_as: Backup and Restore for GP 4 on RHEL - file_type: Software - file_version: +file_groups: +- name: GPDB7 + product_files: + - file: file://greenplum_backup_restore--gp7-rhel8-x86_64.gppkg + description: + upload_as: Backup and Restore for GP 7 on RHEL8 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--rhel8.tar.gz + description: + upload_as: Backup and Restore for GP 7 on RHEL8 compressed tarball + file_type: Software + file_version: -########## Ubuntu Debian Gppkgs ########## - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore--gp6-ubuntu-amd64.gppkg - description: - upload_as: Backup and Restore for GP 6 on Ubuntu 18.04 - file_type: Software - file_version: +- name: GPDB6 + product_files: + - file: file://greenplum_backup_restore--gp6-rhel8-x86_64.gppkg + description: + upload_as: Backup and Restore for GP 6 on RHEL8 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--gp6-rhel7-x86_64.gppkg + description: + upload_as: Backup and Restore for GP 6 on RHEL7 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--gp6-rhel6-x86_64.gppkg + description: + upload_as: Backup and Restore for GP 6 on RHEL6 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--gp6-ubuntu-amd64.gppkg + description: + upload_as: Backup and Restore for GP 6 on Ubuntu 18.04 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--rhel8.tar.gz + description: + upload_as: Backup and Restore for GP 6 on RHEL8 compressed tarball + file_type: Software + file_version: + - file: file://greenplum_backup_restore--rhel7.tar.gz + description: + upload_as: Backup and Restore for GP 6 on RHEL7 compressed tarball + file_type: Software + file_version: + - file: file://greenplum_backup_restore--rhel6.tar.gz + description: + upload_as: Backup and Restore for GP 6 on RHEL6 compressed tarball + file_type: Software + file_version: + - file: file://greenplum_backup_restore--ubuntu.tar.gz + description: + upload_as: Backup and Restore for GP 6 on Ubuntu 18.04 compressed tarball + file_type: Software + file_version: -########## Binary Tarball ########## - - file: workspace/files-to-upload/pivotal_greenplum_backup_restore-.tar.gz - description: - upload_as: Backup and Restore for GP 4 / 5 / 6 compressed tarball - file_type: Software - file_version: +- name: GPDB5 + product_files: + - file: file://greenplum_backup_restore--gp5-rhel7-x86_64.gppkg + description: + upload_as: Backup and Restore for GP 5 on RHEL7 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--gp5-rhel6-x86_64.gppkg + description: + upload_as: Backup and Restore for GP 5 on RHEL6 + file_type: Software + file_version: + - file: file://greenplum_backup_restore--rhel7.tar.gz + description: + upload_as: Backup and Restore for GP 5 on RHEL7 compressed tarball + file_type: Software + file_version: + - file: file://greenplum_backup_restore--rhel6.tar.gz + description: + upload_as: Backup and Restore for GP 5 on RHEL6 compressed tarball + file_type: Software + file_version: -########## License ########## - - file: workspace/files-to-upload/ - upload_as: Open Source Licenses for VMware Tanzu™ Greenplum® Backup and Restore - description: Open Source Licenses for VMware Tanzu™ Greenplum® Backup and Restore - file_type: Open Source License - file_version: +product_files: +- file: file:// + upload_as: Open Source Licenses for VMware Greenplum® Backup and Restore + description: Open Source Licenses for VMware Greenplum® Backup and Restore + file_type: Open Source License + file_version: diff --git a/ci/regression/README.md b/ci/regression/README.md new file mode 100644 index 000000000..19fe34976 --- /dev/null +++ b/ci/regression/README.md @@ -0,0 +1,13 @@ +# Regression Testing +One of the core tests in gpdb is the `make installcheck-world` invocation in the main `gpdb` repo. This invocation runs a series of tests, including regression tests, that ensure the functionality of gpdb. +These tests leave behind many artifacts in a handful of databases, the most important of which is `regression`. This `regression` database represents a large cross-section of database objects that gpdbd +supports. Testing support of this collection of objects is a standard method used by the gpdb utilities. For `gpbackup` we want to ensure that we can correcly back up and restore a database containing +all of these objects. The `regression` pipeline automates that process. + +## Data Source +We use a dump of the `regression` database generated by the main gpdb pipelines, stored in a GCS bucket. This dump is generated by `pg_dumpall`, and loaded into our test database using `psql` +We use standalone psql commands to clean out a small number of objects with known bugs, linking to the relevant gpdb github issue. + +## Tests +We use a standard gpbackup invocation to save off the database, and an immediate gprestore invocation of that backup to ensure that both utilities correctly complete their functions. +We use a [simple golang utility](https://github.com/AJR-VMware/diffdb) to check the table counts, and the rowcounts of each table, across the backed up and restored databases to ensure that they match. diff --git a/ci/regression/regression_pipeline.yml b/ci/regression/regression_pipeline.yml new file mode 100644 index 000000000..f091c2994 --- /dev/null +++ b/ci/regression/regression_pipeline.yml @@ -0,0 +1,484 @@ +# USAGE: fly -t dp set-pipeline -p regression -c ~/workspace/gpbackup/ci/regression/regression_pipeline.yml -v gpbackup-git-branch=BRANCH_NAME +--- +groups: +- name: Regression + jobs: + - build_binaries + - build_gppkgs + - regdb-GPDB6 + - regdb-GPDB7 + +resource_types: +- name: terraform + type: registry-image + source: + repository: ljfranklin/terraform-resource + tag: 0.11.14 + +- name: slack-notification + type: registry-image + source: + repository: cfcommunity/slack-notification-resource + tag: latest + +- name: gcs + type: registry-image + source: + repository: frodenas/gcs-resource + +- name: pivnet + type: registry-image + source: + repository: pivotalcf/pivnet-resource + tag: latest-final + +## ====================================================================== +## _ __ ___ ___ ___ _ _ _ __ ___ ___ ___ +## | '__/ _ \/ __|/ _ \| | | | '__/ __/ _ \/ __| +## | | | __/\__ \ (_) | |_| | | | (_| __/\__ \ +## |_| \___||___/\___/ \__,_|_| \___\___||___/ +## ====================================================================== + +resources: +##### Docker Images ##### +- name: rocky8-gpdb6-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-rocky8-test + tag: latest + +- name: rocky8-gpdb7-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test + tag: latest + +##### Source Code #### +- name: gpbackup + type: git + icon: github-circle + source: + uri: https://github.com/greenplum-db/gpbackup + branch: ((gpbackup-git-branch)) + +- name: gpbackup_s3_plugin + type: git + icon: github-circle + source: + branch: ((dp/prod/gpbackup-s3-plugin-git-branch)) + uri: https://github.com/greenplum-db/gpbackup-s3-plugin + +- name: gpbackup_ddboost_plugin + type: git + icon: github-circle + source: + branch: ((dp/gpbackup-ddboost-plugin-branch)) + private_key: ((dp/gpbackup-ddboost-plugin-remote-key)) + uri: ((dp/gpbackup-ddboost-plugin-git-remote)) + +- name: gpbackup_manager_src + type: git + icon: github-circle + source: + branch: main + private_key: ((dp/prod/gp-backup-manager-remote-deploy-key)) + uri: ((dp/gp-backup-manager-git-remote)) + +- name: diffdb_src + type: git + icon: github-circle + source: + uri: https://github.com/AJR-VMware/diffdb + branch: main + +- name: gpdb6_src + type: git + icon: github-circle + source: + uri: https://github.com/greenplum-db/gpdb + branch: 6X_STABLE + +- name: gpdb_main_src + type: git + icon: github-circle + source: + uri: https://github.com/greenplum-db/gpdb + branch: main + +#### Binaries #### +- name: bin_gpdb_6x_rhel8 + type: gcs + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64((dp/prod/rc-build-type-gcs)).tar.gz + +- name: bin_gpdb_7x_rhel8 + type: gcs + icon: google + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz + +#### Misc Other #### +- name: weekly-trigger + type: time + source: + location: America/Los_Angeles + interval: 168h # 7 days + +- name: gpbackup-go-components-rhel8 + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/rhel8/go_components.tar.gz + +- name: pivnet_release_cache + type: s3 + icon: amazon + source: + access_key_id: ((aws-bucket-access-key-id)) + bucket: ((dp/prod/pivnet_bucket_name)) + region_name: ((dp/aws-region)) + secret_access_key: ((aws-bucket-secret-access-key)) + regexp: pivnet_release_version/v-(.*) + +- name: gpbackup-release-license + type: gcs + icon: google + source: + bucket: gpbackup-release-licenses + json_key: ((dp/dev/gcp_svc_acct_key)) + regexp: open_source_license_VMware_Greenplum_Backup_and_Restore_(.*)_.*.txt + +- name: icw_dump_GPDB6 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-intermediates-prod + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: 6X_STABLE_without_asserts/icw_planner_centos6_dump/dump.sql.xz + +- name: icw_dump_GPDB7 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-intermediates-prod + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: gpdb_main/icw_planner_rhel8_dump/dump.sql.xz + +- name: terraform + type: terraform + source: + env: + AWS_ACCESS_KEY_ID: ((tf-machine-access-key-id)) + AWS_SECRET_ACCESS_KEY: ((tf-machine-secret-access-key)) + GOOGLE_CREDENTIALS: ((dp/dev/google-service-account-key)) + vars: + project_id: ((dp/dev/google-project-id)) + storage: + access_key_id: ((tf-machine-access-key-id)) + secret_access_key: ((tf-machine-secret-access-key)) + region_name: ((dp/aws-region)) + bucket: ((dp/tf-bucket-name)) + bucket_path: clusters-google/ + +- name: slack-alert + type: slack-notification + source: + url: ((dp/webhook_url)) + disable: false + +- name: gppkgs + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/gpbackup-gppkgs.tar.gz + +- name: ccp_src + type: git + icon: github-circle + source: + branch: ((dp/ccp-git-branch)) + private_key: ((gp-concourse-cluster-provisioner-git-key)) + uri: ((dp/ccp-git-remote)) + +- name: terraform.d + type: s3 + icon: amazon + source: + access_key_id: ((aws-bucket-access-key-id)) + bucket: ccp-terraform-provider-plugins + region_name: ((dp/aws-region)) + secret_access_key: ((aws-bucket-secret-access-key)) + versioned_file: plugin-cache-all.tgz + +## ====================================================================== +## _ +## __ _ _ __ ___| |__ ___ _ __ ___ +## / _` | '_ \ / __| '_ \ / _ \| '__/ __| +## | (_| | | | | (__| | | | (_) | | \__ \ +## \__,_|_| |_|\___|_| |_|\___/|_| |___/ +## ====================================================================== + +anchors: +- &ccp_default_params + action: create + delete_on_failure: true + generate_random_name: true + plugin_dir: ../../terraform.d/plugin-cache/linux_amd64 + terraform_source: ccp_src/google/ + +- &ccp_gen_cluster_default_params + AWS_ACCESS_KEY_ID: ((tf-machine-access-key-id)) + AWS_SECRET_ACCESS_KEY: ((tf-machine-secret-access-key)) + AWS_DEFAULT_REGION: ((dp/aws-region)) + BUCKET_PATH: clusters-google/ + BUCKET_NAME: ((dp/tf-bucket-name)) + CLOUD_PROVIDER: google + +- &destroy_params + action: destroy + plugin_dir: ../../terraform.d/plugin-cache/linux_amd64 + terraform_source: ccp_src/google/ + env_name_file: terraform/name + vars: + aws_instance-node-instance_type: t2.micro #t2.micro is ignored in destroy, but aws_instance-node-instance_type is required. + aws_ebs_volume_type: standard + +- &ccp_destroy + put: terraform + params: + <<: *destroy_params + get_params: + action: destroy + +- &ccp_destroy_nvme + put: terraform + params: + <<: *destroy_params + terraform_source: ccp_src/google-nvme-block-device/ + get_params: + action: destroy + + +- &terraform_cluster + type: terraform + source: + env: + AWS_ACCESS_KEY_ID: ((tf-machine-access-key-id)) + AWS_SECRET_ACCESS_KEY: ((tf-machine-secret-access-key)) + GOOGLE_CREDENTIALS: ((dp/dev/google-service-account-key)) + vars: + project_id: ((dp/dev/google-service-account)) + storage: + access_key_id: ((tf-machine-access-key-id)) + secret_access_key: ((tf-machine-secret-access-key)) + region_name: ((dp/aws-region)) + # This is not parameterized, on purpose. All tfstates will go to this spot, + # and different teams will place there clusters' tfstate files under different paths + bucket: gpdb5-pipeline-dynamic-terraform + bucket_path: clusters-google/ + + +- &slack_alert + put: slack-alert + params: + text: | + [regression/$BUILD_JOB_NAME] failed: + https://dp.ci.gpdb.pivotal.io/teams/main/pipelines/regression/jobs/$BUILD_JOB_NAME/builds/$BUILD_NAME + +## ====================================================================== +## _ _ +## (_) ___ | |__ ___ +## | |/ _ \| '_ \/ __| +## | | (_) | |_) \__ \ +## _/ |\___/|_.__/|___/ +## |__/ +## ====================================================================== + +jobs: +- name: build_binaries + plan: + - in_parallel: + - get: gpbackup_s3_plugin + trigger: true + - get: gpbackup_manager_src + trigger: true + - get: gpbackup + trigger: true + - get: pivnet_release_cache + - get: rocky8-gpdb7-image + - in_parallel: + - do: # RHEL8 + - task: build-go-binaries-rhel8 + file: gpbackup/ci/tasks/build-go-binaries.yml + image: rocky8-gpdb7-image + params: + OS: RHEL8 + - put: gpbackup-go-components-rhel8 + params: + file: go_components/go_components.tar.gz + +- name: build_gppkgs + plan: + - in_parallel: + - get: rocky8-gpdb6-image + - get: gpdb_src + resource: gpdb6_src + - get: gpbackup-go-components-rhel8 + trigger: true + passed: [build_binaries] + - get: bin_gpdb_6x_rhel8 + - get: gpbackup + passed: [build_binaries] + - get: gpbackup_ddboost_plugin + - get: pivnet_release_cache + - get: gpbackup-release-license + - task: gpbackup-tools-versions + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/gpbackup-tools-versions.yml + - in_parallel: + - do: # RHEL8 + - task: build-ddboost-RHEL8 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-ddboost.yml + input_mapping: + bin_gpdb: bin_gpdb_6x_rhel8 + - task: tar-binaries-RHEL8 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-os-tars.yml + input_mapping: + gpbackup-go-components: gpbackup-go-components-rhel8 + output_mapping: + gpbackup_tar: gpbackup_tar_rhel8 + - task: build_gppkgs-RHEL8 + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/build-gppkg.yml + input_mapping: + bin_gpdb: bin_gpdb_6x_rhel8 + gpbackup_tar: gpbackup_tar_rhel8 + output_mapping: + gppkgs: rhel8-gppkg + params: + OS: RHEL8 + - task: tar-gppkgs + image: rocky8-gpdb6-image + config: + platform: linux + inputs: + - name: rhel8-gppkg + outputs: + - name: gppkgs + run: + path: "sh" + args: + - -exc + - | + set -ex + mv rhel8-gppkg/* gppkgs/ + pushd gppkgs + tar cvzf gpbackup-gppkgs.tar.gz * + popd + - put: gppkgs + params: + file: gppkgs/gpbackup-gppkgs.tar.gz + +- name: regdb-GPDB6 + plan: + - in_parallel: + - get: weekly-trigger + trigger: true + - get: rocky8-gpdb6-image + - get: gpbackup + trigger: true + passed: [build_gppkgs] + - get: diffdb_src + - get: gpdb_binary + resource: bin_gpdb_6x_rhel8 + - get: ccp_src + - get: gpdb_src + resource: gpdb6_src + - get: gppkgs + - get: icw_dump + resource: icw_dump_GPDB6 + - get: terraform.d + params: + unpack: true + - put: terraform + params: + <<: *ccp_default_params + terraform_source: ccp_src/google-nvme-block-device/ + vars: + instance_type: n1-standard-8 + PLATFORM: rhel8 + - task: gen_cluster + params: + <<: *ccp_gen_cluster_default_params + PLATFORM: rhel8 + file: ccp_src/ci/tasks/gen_cluster.yml + - task: gpinitsystem + file: ccp_src/ci/tasks/gpinitsystem.yml + - task: setup-centos-env + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/setup-centos-env.yml + params: + default_ami_user: rhel + - task: icw-roundtrip + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/icw-roundtrip.yml + on_success: + <<: *ccp_destroy_nvme + on_failure: + *slack_alert + +- name: regdb-GPDB7 + plan: + - in_parallel: + - get: weekly-trigger + trigger: true + - get: rocky8-gpdb7-image + - get: gpbackup + trigger: true + passed: [build_gppkgs] + - get: gpdb_binary + resource: bin_gpdb_7x_rhel8 + - get: ccp_src + - get: gpdb_src + resource: gpdb_main_src + - get: gppkgs + - get: diffdb_src + - get: icw_dump + resource: icw_dump_GPDB7 + - get: terraform.d + params: + unpack: true + - put: terraform + params: + <<: *ccp_default_params + terraform_source: ccp_src/google-nvme-block-device/ + vars: + instance_type: n1-standard-8 + PLATFORM: rhel8-gpdb7 + - task: gen_cluster + params: + <<: *ccp_gen_cluster_default_params + PLATFORM: rhel8-gpdb7 + file: ccp_src/ci/tasks/gen_cluster.yml + - task: gpinitsystem + file: ccp_src/ci/tasks/gpinitsystem.yml + - task: setup-centos-env + image: rocky8-gpdb7-image + file: gpbackup/ci/tasks/setup-centos-env.yml + params: + default_ami_user: rhel + - task: icw-roundtrip + image: rocky8-gpdb7-image + file: gpbackup/ci/tasks/icw-roundtrip.yml + on_success: + <<: *ccp_destroy_nvme + on_failure: + *slack_alert diff --git a/ci/scale/gpload_yaml/customer.yml b/ci/scale/gpload_yaml/customer.yml index f5b737c30..ad5dae9d9 100644 --- a/ci/scale/gpload_yaml/customer.yml +++ b/ci/scale/gpload_yaml/customer.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/lineitem.yml b/ci/scale/gpload_yaml/lineitem.yml index 916a6ce1b..e19cd6947 100644 --- a/ci/scale/gpload_yaml/lineitem.yml +++ b/ci/scale/gpload_yaml/lineitem.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/nation.yml b/ci/scale/gpload_yaml/nation.yml index 7fde8df36..64a79456b 100644 --- a/ci/scale/gpload_yaml/nation.yml +++ b/ci/scale/gpload_yaml/nation.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/orders.yml b/ci/scale/gpload_yaml/orders.yml index 51fd2d1fb..3d4860b69 100644 --- a/ci/scale/gpload_yaml/orders.yml +++ b/ci/scale/gpload_yaml/orders.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/orders_2.yml b/ci/scale/gpload_yaml/orders_2.yml index ef9e0fc80..5b07ff220 100644 --- a/ci/scale/gpload_yaml/orders_2.yml +++ b/ci/scale/gpload_yaml/orders_2.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/orders_3.yml b/ci/scale/gpload_yaml/orders_3.yml index 90531d809..01260cffa 100644 --- a/ci/scale/gpload_yaml/orders_3.yml +++ b/ci/scale/gpload_yaml/orders_3.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/part.yml b/ci/scale/gpload_yaml/part.yml index 06354cf3c..38fdbc7aa 100644 --- a/ci/scale/gpload_yaml/part.yml +++ b/ci/scale/gpload_yaml/part.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/partsupp.yml b/ci/scale/gpload_yaml/partsupp.yml index ce84aecbc..645ae2e3e 100644 --- a/ci/scale/gpload_yaml/partsupp.yml +++ b/ci/scale/gpload_yaml/partsupp.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/region.yml b/ci/scale/gpload_yaml/region.yml index 2f5ab5165..9501ac413 100644 --- a/ci/scale/gpload_yaml/region.yml +++ b/ci/scale/gpload_yaml/region.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/gpload_yaml/supplier.yml b/ci/scale/gpload_yaml/supplier.yml index bbeb0ab69..f64c8b796 100644 --- a/ci/scale/gpload_yaml/supplier.yml +++ b/ci/scale/gpload_yaml/supplier.yml @@ -1,7 +1,7 @@ VERSION: 1.0.0.1 DATABASE: scaletestdb USER: gpadmin -HOST: mdw +HOST: cdw GPLOAD: INPUT: - SOURCE: diff --git a/ci/scale/scale_pipeline.yml b/ci/scale/scale_pipeline.yml index 2e8a35880..dbb6255d7 100644 --- a/ci/scale/scale_pipeline.yml +++ b/ci/scale/scale_pipeline.yml @@ -160,7 +160,7 @@ resources: source: bucket: gpbackup-release-licenses json_key: ((dp/dev/gcp_svc_acct_key)) - regexp: open_source_license_VMware_Tanzu_Greenplum_Backup_and_Restore_(.*)_.*.txt + regexp: open_source_license_VMware_Greenplum_Backup_and_Restore_(.*)_.*.txt - name: pivnet_release_cache type: s3 diff --git a/ci/scripts/all-tests.bash b/ci/scripts/all-tests.bash index 6baf87e15..2d1aa8d0c 100755 --- a/ci/scripts/all-tests.bash +++ b/ci/scripts/all-tests.bash @@ -4,19 +4,19 @@ set -ex # setup cluster and install gpbackup tools using gppkg ccp_src/scripts/setup_ssh_to_cluster.sh -out=$(ssh -t mdw 'source env.sh && psql postgres -c "select version();"') +out=$(ssh -t cdw 'source env.sh && psql postgres -c "select version();"') TEST_GPDB_VERSION=$(echo ${out} | sed -n 's/.*Greenplum Database \([0-9].[0-9]\+.[0-9]\+\).*/\1/p') GPDB_VERSION=$(echo ${TEST_GPDB_VERSION} | head -c 1) mkdir -p /tmp/untarred tar -xzf gppkgs/gpbackup-gppkgs.tar.gz -C /tmp/untarred -scp /tmp/untarred/gpbackup_tools*gp${GPDB_VERSION}*RHEL*.gppkg mdw:/home/gpadmin -ssh -t mdw "source env.sh; gppkg -q gpbackup*gp*.gppkg | grep 'is installed' || gppkg -i gpbackup_tools*.gppkg" +scp /tmp/untarred/gpbackup_tools*gp${GPDB_VERSION}*${OS}*.gppkg cdw:/home/gpadmin +ssh -t cdw "source env.sh; gppkg -q gpbackup*gp*.gppkg | grep 'is installed' || gppkg -i gpbackup_tools*.gppkg" # place correct tarballs in gpbackup dir for consumption if [[ -f "bin_gpbackup_1.0.0_and_1.7.1/gpbackup_bins_1.0.0_and_1.7.1.tar.gz" ]] && \ [[ "${GPBACKUP_VERSION}" != "" ]] ; then tar -xzf bin_gpbackup_1.0.0_and_1.7.1/gpbackup_bins_1.0.0_and_1.7.1.tar.gz -C /tmp/ - scp -r /tmp/${GPBACKUP_VERSION} mdw:/tmp + scp -r /tmp/${GPBACKUP_VERSION} cdw:/tmp fi cat <