diff --git a/.ci/gcb-pr-downstream-generation-and-test.yml b/.ci/gcb-pr-downstream-generation-and-test.yml index a1dc4555ad6b..f888a22b65da 100644 --- a/.ci/gcb-pr-downstream-generation-and-test.yml +++ b/.ci/gcb-pr-downstream-generation-and-test.yml @@ -265,6 +265,7 @@ steps: - $BUILD_ID - $PROJECT_ID - "23" # Build step + - "true" - name: 'gcr.io/graphite-docker-images/go-plus' entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' diff --git a/.ci/magician/cmd/check_cassettes.go b/.ci/magician/cmd/check_cassettes.go index bbf94d800933..b27767874e61 100644 --- a/.ci/magician/cmd/check_cassettes.go +++ b/.ci/magician/cmd/check_cassettes.go @@ -77,7 +77,7 @@ var checkCassettesCmd = &cobra.Command{ ctlr := source.NewController(env["GOPATH"], "modular-magician", githubToken, rnr) - vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "vcr-check-cassettes", rnr) + vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "vcr-check-cassettes", rnr, false) if err != nil { return fmt.Errorf("error creating VCR tester: %w", err) } diff --git a/.ci/magician/cmd/collect_nightly_test_status.go b/.ci/magician/cmd/collect_nightly_test_status.go index b940f9958444..98ee94bd3410 100644 --- a/.ci/magician/cmd/collect_nightly_test_status.go +++ b/.ci/magician/cmd/collect_nightly_test_status.go @@ -43,6 +43,7 @@ type TestInfo struct { Status string `json:"status"` Service string `json:"service"` Resource string `json:"resource"` + CommitSha string `json:"commit_sha"` ErrorMessage string `json:"error_message"` LogLink string `json:"log_link"` ProviderVersion string `json:"provider_version"` @@ -189,6 +190,7 @@ func createTestReport(pVersion provider.Version, tc TeamcityClient, gcs Cloudsto Status: testResult.Status, Service: serviceName, Resource: convertTestNameToResource(testResult.Name), + CommitSha: build.Number, ErrorMessage: errorMessage, LogLink: logLink, ProviderVersion: strings.ToUpper(pVersion.String()), diff --git a/.ci/magician/cmd/generate_comment.go b/.ci/magician/cmd/generate_comment.go index 976435500c75..d11cc81962ae 100644 --- a/.ci/magician/cmd/generate_comment.go +++ b/.ci/magician/cmd/generate_comment.go @@ -89,7 +89,7 @@ type diffCommentData struct { MissingServiceLabels []string MissingTests map[string]*MissingTestInfo MissingDocs *MissingDocsSummary - AddedResources []string + MultipleResources []string Errors []Errors } @@ -377,7 +377,8 @@ func execGenerateComment(prNumber int, ghTokenMagicModules, buildId, buildStep, // Check if multiple resources were added. multipleResourcesState := "success" - if len(uniqueAddedResources) > 1 { + data.MultipleResources = multipleResources(maps.Keys(uniqueAddedResources)) + if len(data.MultipleResources) > 1 { multipleResourcesState = "failure" for _, label := range pullRequest.Labels { if label.Name == allowMultipleResourcesLabel { @@ -391,8 +392,6 @@ func execGenerateComment(prNumber int, ghTokenMagicModules, buildId, buildStep, fmt.Printf("Error posting terraform-provider-multiple-resources build status for pr %d commit %s: %v\n", prNumber, commitSha, err) errors["Other"] = append(errors["Other"], "Failed to update missing-service-labels status check with state: "+multipleResourcesState) } - data.AddedResources = maps.Keys(uniqueAddedResources) - slices.Sort(data.AddedResources) // Compute affected resources based on changed files changedFilesAffectedResources := map[string]struct{}{} @@ -642,6 +641,38 @@ func formatDiffComment(data diffCommentData) (string, error) { return sb.String(), nil } +// addedMultipleResources returns a sorted slice of resource names that are considered "separate" resources. +// In particular, IAM resources are merged with the parent resource as part of this check. +func multipleResources(resources []string) []string { + if len(resources) == 0 { + return nil + } + iam := map[string]struct{}{} + final := map[string]struct{}{} + + for _, r := range resources { + if k, found := strings.CutSuffix(r, "_iam_member"); found { + iam[k] = struct{}{} + } else if k, found := strings.CutSuffix(r, "_iam_binding"); found { + iam[k] = struct{}{} + } else if k, found := strings.CutSuffix(r, "_iam_policy"); found { + iam[k] = struct{}{} + } else { + final[k] = struct{}{} + } + } + + for r, _ := range iam { + if _, ok := final[r]; !ok { + final[r+"_iam_*"] = struct{}{} + } + } + + ret := maps.Keys(final) + slices.Sort(ret) + return ret +} + var resourceFileRegexp = regexp.MustCompile(`^.*/services/[^/]+/(?:data_source_|resource_|iam_)(.*?)(?:_test|_sweeper|_iam_test|_generated_test|_internal_test)?.go`) var resourceDocsRegexp = regexp.MustCompile(`^.*website/docs/(?:r|d)/(.*).html.markdown`) diff --git a/.ci/magician/cmd/generate_comment_test.go b/.ci/magician/cmd/generate_comment_test.go index bd51b87c60ff..8fb5ebd664c9 100644 --- a/.ci/magician/cmd/generate_comment_test.go +++ b/.ci/magician/cmd/generate_comment_test.go @@ -246,7 +246,7 @@ func TestFormatDiffComment(t *testing.T) { }, "multiple resources are displayed": { data: diffCommentData{ - AddedResources: []string{"google_redis_instance", "google_alloydb_cluster"}, + MultipleResources: []string{"google_redis_instance", "google_alloydb_cluster"}, }, expectedStrings: []string{ "## Diff report", @@ -352,6 +352,72 @@ func TestFormatDiffComment(t *testing.T) { } } +func TestMultipleResources(t *testing.T) { + cases := []struct { + name string + resources []string + want []string + }{ + { + name: "no resources", + }, + { + name: "single non-iam", + resources: []string{"google_redis_instance"}, + want: []string{"google_redis_instance"}, + }, + { + name: "multiple non-iam", + resources: []string{"google_redis_instance", "google_alloydb_cluster"}, + want: []string{"google_alloydb_cluster", "google_redis_instance"}, + }, + { + name: "single iam only", + resources: []string{"google_redis_instance_iam_member", "google_redis_instance_iam_policy", "google_redis_instance_iam_binding"}, + want: []string{"google_redis_instance_iam_*"}, + }, + { + name: "single iam with parent", + resources: []string{"google_redis_instance_iam_member", "google_redis_instance_iam_policy", "google_redis_instance_iam_binding", "google_redis_instance"}, + want: []string{"google_redis_instance"}, + }, + { + name: "multiple iam", + resources: []string{ + "google_redis_instance_iam_member", + "google_redis_instance_iam_policy", + "google_redis_instance_iam_binding", + "google_alloydb_cluster_iam_member", + "google_alloydb_cluster_iam_policy", + "google_alloydb_cluster_iam_binding", + }, + want: []string{"google_alloydb_cluster_iam_*", "google_redis_instance_iam_*"}, + }, + { + name: "multiple iam with parent", + resources: []string{ + "google_redis_instance_iam_member", + "google_redis_instance_iam_policy", + "google_redis_instance_iam_binding", + "google_alloydb_cluster_iam_member", + "google_alloydb_cluster_iam_policy", + "google_alloydb_cluster_iam_binding", + "google_redis_instance", + }, + want: []string{"google_alloydb_cluster_iam_*", "google_redis_instance"}, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got := multipleResources(tc.resources) + assert.Equal(t, tc.want, got) + }) + } +} + func TestFileToResource(t *testing.T) { cases := map[string]struct { path string diff --git a/.ci/magician/cmd/templates/DIFF_COMMENT.md.tmpl b/.ci/magician/cmd/templates/DIFF_COMMENT.md.tmpl index 5c2942060dc0..740728959a43 100644 --- a/.ci/magician/cmd/templates/DIFF_COMMENT.md.tmpl +++ b/.ci/magician/cmd/templates/DIFF_COMMENT.md.tmpl @@ -51,10 +51,10 @@ If you believe this detection to be incorrect please raise the concern with your An `override-missing-service-label` label can be added to allow merging. {{end}} -{{- if gt (len .AddedResources) 1 }} +{{- if gt (len .MultipleResources) 1 }} ## Multiple resources added -This PR adds multiple new resources: {{range $i, $resource := .AddedResources}}{{ if gt $i 0}}, {{end}}`{{$resource}}`{{end}}. This makes review significantly more difficult. Please split it into multiple PRs, one per resource. +This PR adds multiple new resources: {{range $i, $resource := .MultipleResources}}{{ if gt $i 0}}, {{end}}`{{$resource}}`{{end}}. This makes review significantly more difficult. Please split it into multiple PRs, one per resource. An `override-multiple-resources` label can be added to allow merging. {{end}} diff --git a/.ci/magician/cmd/test_eap_vcr.go b/.ci/magician/cmd/test_eap_vcr.go index 7f288d0d9f8a..28433895b0a5 100644 --- a/.ci/magician/cmd/test_eap_vcr.go +++ b/.ci/magician/cmd/test_eap_vcr.go @@ -120,7 +120,7 @@ The following environment variables are required: if err != nil { return err } - vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "ci-vcr-logs", rnr) + vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "ci-vcr-logs", rnr, false) if err != nil { return err } diff --git a/.ci/magician/cmd/test_terraform_vcr.go b/.ci/magician/cmd/test_terraform_vcr.go index 95091556dfae..f841308b5ea2 100644 --- a/.ci/magician/cmd/test_terraform_vcr.go +++ b/.ci/magician/cmd/test_terraform_vcr.go @@ -93,7 +93,8 @@ It expects the following arguments: 3. Build ID 4. Project ID where Cloud Builds are located 5. Build step number - + 6. Enable async upload cassettes + The following environment variables are required: ` + listTTVRequiredEnvironmentVariables(), RunE: func(cmd *cobra.Command, args []string) error { @@ -134,13 +135,17 @@ The following environment variables are required: } ctlr := source.NewController(env["GOPATH"], "modular-magician", env["GITHUB_TOKEN_DOWNSTREAMS"], rnr) - vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "ci-vcr-logs", rnr) - if err != nil { - return fmt.Errorf("error creating VCR tester: %w", err) + if len(args) < 5 { + return fmt.Errorf("wrong number of arguments %d, expected >=5", len(args)) + } + enableAsyncUploadCassettes := false + if len(args) > 5 { + enableAsyncUploadCassettes = strings.ToLower(args[5]) == "true" } - if len(args) != 5 { - return fmt.Errorf("wrong number of arguments %d, expected 5", len(args)) + vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "ci-vcr-logs", rnr, enableAsyncUploadCassettes) + if err != nil { + return fmt.Errorf("error creating VCR tester: %w", err) } return execTestTerraformVCR(args[0], args[1], args[2], args[3], args[4], baseBranch, gh, rnr, ctlr, vt) @@ -260,10 +265,11 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, } if len(replayingResult.FailedTests) > 0 { recordingResult, recordingErr := vt.RunParallel(vcr.RunOptions{ - Mode: vcr.Recording, - Version: provider.Beta, - TestDirs: testDirs, - Tests: replayingResult.FailedTests, + Mode: vcr.Recording, + Version: provider.Beta, + TestDirs: testDirs, + Tests: replayingResult.FailedTests, + UploadBranchName: newBranch, }) if recordingErr != nil { testState = "failure" diff --git a/.ci/magician/cmd/vcr_cassette_update.go b/.ci/magician/cmd/vcr_cassette_update.go index c03ef43ce470..69e1bf1ade76 100644 --- a/.ci/magician/cmd/vcr_cassette_update.go +++ b/.ci/magician/cmd/vcr_cassette_update.go @@ -103,7 +103,7 @@ var vcrCassetteUpdateCmd = &cobra.Command{ } ctlr := source.NewController(env["GOPATH"], "hashicorp", env["GITHUB_TOKEN_CLASSIC"], rnr) - vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "", rnr) + vt, err := vcr.NewTester(env, "ci-vcr-cassettes", "", rnr, true) if err != nil { return fmt.Errorf("error creating VCR tester: %w", err) } diff --git a/.ci/magician/cmd/vcr_cassette_update_test.go b/.ci/magician/cmd/vcr_cassette_update_test.go index fdb48b5447c2..f2380e685982 100644 --- a/.ci/magician/cmd/vcr_cassette_update_test.go +++ b/.ci/magician/cmd/vcr_cassette_update_test.go @@ -407,7 +407,7 @@ func TestExecVCRCassetteUpdate(t *testing.T) { ctlr := source.NewController("gopath", "hashicorp", "token", rnr) vt, err := vcr.NewTester(map[string]string{ "SA_KEY": "sa_key", - }, "ci-vcr-cassettes", "", rnr) + }, "ci-vcr-cassettes", "", rnr, false) if err != nil { t.Fatalf("Failed to create new tester: %v", err) } diff --git a/.ci/magician/github/membership_data.go b/.ci/magician/github/membership_data.go index 4487104ee3cc..7e5e7da8a5a9 100644 --- a/.ci/magician/github/membership_data.go +++ b/.ci/magician/github/membership_data.go @@ -86,8 +86,8 @@ var ( "melinath": { vacations: []Vacation{ { - startDate: newDate(2025, 9, 17), - endDate: newDate(2025, 9, 22), + startDate: newDate(2025, 12, 18), + endDate: newDate(2026, 1, 6), }, }, }, diff --git a/.ci/magician/go.mod b/.ci/magician/go.mod index 923aecc58206..4ce6ecf4d5f9 100644 --- a/.ci/magician/go.mod +++ b/.ci/magician/go.mod @@ -15,6 +15,7 @@ require ( require ( cloud.google.com/go/storage v1.50.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/google/go-cmp v0.6.0 github.com/google/go-github/v68 v68.0.0 github.com/otiai10/copy v1.12.0 diff --git a/.ci/magician/go.sum b/.ci/magician/go.sum index d5db267de18d..8de650bc2e6e 100644 --- a/.ci/magician/go.sum +++ b/.ci/magician/go.sum @@ -57,6 +57,8 @@ github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6 github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/.ci/magician/vcr/tester.go b/.ci/magician/vcr/tester.go index 1cef793568f7..679c94273e95 100644 --- a/.ci/magician/vcr/tester.go +++ b/.ci/magician/vcr/tester.go @@ -4,12 +4,16 @@ import ( "fmt" "io/fs" "magician/provider" + "math" "path/filepath" "regexp" "sort" "strconv" "strings" "sync" + "time" + + "github.com/fsnotify/fsnotify" ) type Result struct { @@ -60,6 +64,11 @@ type Tester struct { cassettePaths map[provider.Version]string // where cassettes are relative to baseDir by version logPaths map[logKey]string // where logs are relative to baseDir by version and mode repoPaths map[provider.Version]string // relative paths of already cloned repos by version + + // the following are for async upload cassettes + enableAsyncUploadCassettes bool + watcher *fsnotify.Watcher + uploadFunc func(head string, version provider.Version, fileName string) error } const accTestParallelism = 32 @@ -116,7 +125,7 @@ var safeToLog = map[string]bool{ } // true if shown, false if hidden (default false) // Create a new tester in the current working directory and write the service account key file. -func NewTester(env map[string]string, cassetteBucket, logBucket string, rnr ExecRunner) (*Tester, error) { +func NewTester(env map[string]string, cassetteBucket, logBucket string, rnr ExecRunner, enableAsyncUpload bool) (*Tester, error) { var saKeyPath string if saKeyVal, ok := env["SA_KEY"]; ok { saKeyPath = "sa_key.json" @@ -124,7 +133,7 @@ func NewTester(env map[string]string, cassetteBucket, logBucket string, rnr Exec return nil, err } } - return &Tester{ + vt := &Tester{ env: env, rnr: rnr, cassetteBucket: cassetteBucket, @@ -134,7 +143,13 @@ func NewTester(env map[string]string, cassetteBucket, logBucket string, rnr Exec cassettePaths: make(map[provider.Version]string, provider.NumVersions), logPaths: make(map[logKey]string, provider.NumVersions*numModes), repoPaths: make(map[provider.Version]string, provider.NumVersions), - }, nil + } + + if enableAsyncUpload { + vt.enableAsyncUploadCassettes = true + vt.uploadFunc = vt.uploadOneCassetteFile + } + return vt, nil } func (vt *Tester) SetRepoPath(version provider.Version, repoPath string) { @@ -194,10 +209,11 @@ func (vt *Tester) LogPath(mode Mode, version provider.Version) string { } type RunOptions struct { - Mode Mode - Version provider.Version - TestDirs []string - Tests []string + Mode Mode + Version provider.Version + TestDirs []string + Tests []string + UploadBranchName string } // Run the vcr tests in the given mode and provider version and return the result. @@ -348,6 +364,19 @@ func (vt *Tester) RunParallel(opt RunOptions) (Result, error) { return Result{}, fmt.Errorf("error creating cassette dir: %v", err) } vt.cassettePaths[opt.Version] = cassettePath + + if vt.enableAsyncUploadCassettes { + w, err := fsnotify.NewWatcher() + if err != nil { + return Result{}, fmt.Errorf("failed to create watcher") + } + defer w.Close() + if err := w.Add(cassettePath); err != nil { + return Result{}, fmt.Errorf("failed to add cassette path into watcher") + } + vt.watcher = w + go vt.asyncUploadCassettes(opt.Version, opt.UploadBranchName, w) + } } running := make(chan struct{}, parallelJobs) @@ -534,6 +563,77 @@ func (vt *Tester) UploadLogs(opts UploadLogsOptions) error { return nil } +func (vt *Tester) asyncUploadCassettes(version provider.Version, branch string, w *fsnotify.Watcher) error { + var ( + waitFor = 100 * time.Millisecond + mu sync.Mutex + timers = make(map[string]*time.Timer) + + // Callback we run. + cb = func(e fsnotify.Event) { + err := vt.uploadFunc(branch, version, e.Name) + if err != nil { + fmt.Println("upload failed: ", err) + } + mu.Lock() + delete(timers, e.Name) + mu.Unlock() + } + ) + + for { + select { + case err, ok := <-w.Errors: + if !ok { // Channel was closed (i.e. Watcher.Close() was called). + return nil + } + fmt.Println(err) + case e, ok := <-w.Events: + if !ok { // Channel was closed (i.e. Watcher.Close() was called). + return nil + } + // ignore everything outside of Create and Write. + if !e.Has(fsnotify.Create) && !e.Has(fsnotify.Write) { + continue + } + + // Get timer. + mu.Lock() + t, ok := timers[e.Name] + mu.Unlock() + + // No timer yet, so create one. + if !ok { + t = time.AfterFunc(math.MaxInt64, func() { cb(e) }) + t.Stop() + + mu.Lock() + timers[e.Name] = t + mu.Unlock() + } + + // Reset the timer for this path, so it will start from 100ms again. + t.Reset(waitFor) + } + } +} + +func (vt *Tester) uploadOneCassetteFile(head string, version provider.Version, fileName string) error { + uploadPath := fmt.Sprintf("gs://%s/%s/refs/heads/%s/fixtures/", vt.cassetteBucket, version, head) + args := []string{ + "-m", + "-q", + "cp", + fileName, + uploadPath, + } + fmt.Printf("Uploading %s to %s: %v\n", fileName, uploadPath, "gsutil "+strings.Join(args, " ")) + if _, err := vt.rnr.Run("gsutil", args, nil); err != nil { + return fmt.Errorf("error uploading file %s: %s", fileName, err) + } + return nil +} + func (vt *Tester) UploadCassettes(head string, version provider.Version) error { cassettePath, ok := vt.cassettePaths[version] if !ok { diff --git a/.gitignore b/.gitignore index d9b4e4ec3b71..80512bc3d8ea 100644 --- a/.gitignore +++ b/.gitignore @@ -65,4 +65,5 @@ tpgtools/serialization.go /.ijwb/ MODULE.bazel.lock go.work -go.work.sum \ No newline at end of file +go.work.sum +terraform-provider-google/ diff --git a/docs/content/reference/metadata.md b/docs/content/reference/metadata.md index f97b9bc17955..b8956cba38bc 100644 --- a/docs/content/reference/metadata.md +++ b/docs/content/reference/metadata.md @@ -11,39 +11,39 @@ This page documents all properties for metadata. Metadata does not impact the pr ### `resource` -The name of the Terraform resource e.g., "google_cloudfunctions2_function". +The name of the Terraform resource. For example, "google_cloudfunctions2_function". ### `generation_type` -The generation method used to create the Terraform resource e.g., "mmv1", "dcl", "handwritten". +The generation method used to create the Terraform resource. For example, "mmv1", "dcl", "handwritten". ## Optional ### `api_service_name` -The base name of the API used for this resource e.g., "cloudfunctions.googleapis.com". +The base name of the API used for this resource. For example, "cloudfunctions.googleapis.com". ### `api_version` -The version of the API used for this resource e.g., "v2". +The version of the API used for this resource. For example, "v2". ### `api_resource_type_kind` -The API "resource type kind" used for this resource e.g., "Function". +The API "resource type kind" used for this resource. For example, "Function". ### `cai_asset_name_format` -The custom CAI asset name format for this resource is typically specified (e.g., //cloudsql.googleapis.com/projects/{{project}}/instances/{{name}}). If this format is not provided, the Terraform resource ID format is used instead. +The custom CAI asset name format for this resource is typically specified (for example, //cloudsql.googleapis.com/projects/{{project}}/instances/{{name}}). This should only have a value if it's different than the Terraform resource ID format. ### `api_variant_patterns` -The API URL patterns used by this resource that represent variants e.g., "folders/{folder}/feeds/{feed}". Each pattern must match the value defined in the API exactly. The use of `api_variant_patterns` is only meaningful when the resource type has multiple parent types available. +The API URL patterns used by this resource that represent variants. For example, "folders/{folder}/feeds/{feed}". Each pattern must match the value defined in the API exactly. The use of `api_variant_patterns` is only meaningful when the resource type has multiple parent types available. ### `fields` The list of fields used by this resource. Each field can contain the following attributes: -- `api_field`: The name of the field in the REST API, including the path e.g., "buildConfig.source.storageSource.bucket". -- `field`: The name of the field in Terraform, including the path e.g., "build_config.source.storage_source.bucket". Defaults to the value of `api_field` converted to snake_case. +- `api_field`: The name of the field in the REST API, including the path. For example, "buildConfig.source.storageSource.bucket". +- `field`: The name of the field in Terraform, including the path. For example, "build_config.source.storage_source.bucket". Defaults to the value of `api_field` converted to snake_case. - `provider_only`: If true, the field is only present in the provider. This primarily applies for virtual fields and url-only parameters. When set to true, `field` should be set and `api_field` should be left empty. Default: `false`. - `json`: If true, this is a JSON field which "covers" all child API fields. As a special case, JSON fields which cover an entire resource can have `api_field` set to `*`. diff --git a/mmv1/BUILD.bazel b/mmv1/BUILD.bazel index 2e078b844ddc..de4cbc5c7906 100644 --- a/mmv1/BUILD.bazel +++ b/mmv1/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:private"], deps = [ "//mmv1/api", + "//mmv1/google", "//mmv1/loader", "//mmv1/openapi_generate", "//mmv1/provider", diff --git a/mmv1/api/compiler.go b/mmv1/api/compiler.go index f4229345271d..c02454ca269c 100644 --- a/mmv1/api/compiler.go +++ b/mmv1/api/compiler.go @@ -14,24 +14,19 @@ package api import ( - "bytes" "log" "os" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" ) -func Compile(yamlPath string, obj interface{}, overrideDir string) { +func Compile(yamlPath string, obj interface{}) { objYaml, err := os.ReadFile(yamlPath) if err != nil { log.Fatalf("Cannot open the file: %s", yamlPath) } - if overrideDir != "" { - objYaml = bytes.ReplaceAll(objYaml, []byte("{{override_path}}"), []byte(overrideDir)) - } - yamlValidator := google.YamlValidator{} yamlValidator.Parse(objYaml, obj, yamlPath) } diff --git a/mmv1/api/metadata/field.go b/mmv1/api/metadata/field.go new file mode 100644 index 000000000000..0e93820aed0a --- /dev/null +++ b/mmv1/api/metadata/field.go @@ -0,0 +1,78 @@ +package metadata + +import ( + "slices" + "strings" + + "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" +) + +func FromProperties(props []*api.Type) []Field { + // Sort props by lineage + slices.SortFunc(props, func(a, b *api.Type) int { + if strings.Join(a.Lineage(), ".") < strings.Join(b.Lineage(), ".") { + return -1 + } + return 1 + }) + + var fields []Field + for _, p := range props { + // Skip non-maps with nested fields + if !p.IsA("Map") && len(p.NestedProperties()) > 0 { + continue + } + f := Field{ + Json: p.IsJsonField(), + ProviderOnly: p.ProviderOnly(), + } + lineage := p.Lineage() + apiLineage := p.ApiLineage() + if !p.ProviderOnly() { + f.ApiField = strings.Join(apiLineage, ".") + } + if p.ProviderOnly() || !IsDefaultLineage(lineage, apiLineage) { + f.Field = strings.Join(lineage, ".") + } + // For maps (which all have nested children), modify the entry slightly; the map field itself is skipped, + // but we need a `key` API field that corresponds to the key_name of the map field. + if p.IsA("Map") { + f.ApiField += ".key" + f.Field = strings.Join(append(lineage, p.KeyName), ".") + } + + fields = append(fields, f) + } + return fields +} + +// Field is a field in a metadata.yaml file. +type Field struct { + // The name of the field in the REST API, including the path. For example, "buildConfig.source.storageSource.bucket". + ApiField string `yaml:"api_field,omitempty"` + // The name of the field in Terraform, including the path. For example, "build_config.source.storage_source.bucket". Defaults to the value + // of `api_field` converted to snake_case. + Field string `yaml:"field,omitempty"` + // If true, the field is only present in the provider. This primarily applies for virtual fields and url-only parameters. When set to true, + // `field` should be set and `api_field` should be left empty. Default: `false`. + ProviderOnly bool `yaml:"provider_only,omitempty"` + // If true, this is a JSON field which "covers" all child API fields. As a special case, JSON fields which cover an entire resource can + // have `api_field` set to `*`. + Json bool `yaml:"json,omitempty"` +} + +// Returns true if the lineage is the default we'd expect for a field, and false otherwise. +// If any ancestor has a non-default lineage, this will return false. +func IsDefaultLineage(lineage, apiLineage []string) bool { + if len(lineage) != len(apiLineage) { + return false + } + for i, part := range lineage { + apiPart := apiLineage[i] + if part != google.Underscore(apiPart) { + return false + } + } + return true +} diff --git a/mmv1/api/metadata/field_test.go b/mmv1/api/metadata/field_test.go new file mode 100644 index 000000000000..c2142bec623b --- /dev/null +++ b/mmv1/api/metadata/field_test.go @@ -0,0 +1,260 @@ +package metadata + +import ( + "testing" + + "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" + "github.com/google/go-cmp/cmp" +) + +func TestFromProperties(t *testing.T) { + cases := []struct { + name string + resourceMetadata *api.Resource + virtualFields []*api.Type + parameters []*api.Type + properties []*api.Type + wantFields []Field + }{ + { + name: "json field", + properties: []*api.Type{ + { + Name: "fieldName", + CustomFlatten: "templates/terraform/custom_flatten/json_schema.tmpl", + }, + }, + wantFields: []Field{ + { + Json: true, + ApiField: "fieldName", + }, + }, + }, + { + name: "fine-grained resource field", + resourceMetadata: &api.Resource{ApiResourceField: "parentField"}, + properties: []*api.Type{ + { + Name: "fieldName", + }, + }, + wantFields: []Field{ + { + ApiField: "parentField.fieldName", + Field: "field_name", + }, + }, + }, + { + name: "provider-only", + properties: []*api.Type{ + { + Name: "fieldName", + UrlParamOnly: true, + }, + }, + wantFields: []Field{ + { + Field: "field_name", + ProviderOnly: true, + }, + }, + }, + { + name: "nested field", + properties: []*api.Type{ + { + Name: "root", + Type: "NestedObject", + Properties: []*api.Type{ + { + Name: "foo", + Type: "NestedObject", + Properties: []*api.Type{ + { + Name: "bars", + Type: "Array", + ItemType: &api.Type{ + Type: "NestedObject", + Properties: []*api.Type{ + { + Name: "fooBar", + Type: "String", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantFields: []Field{ + { + ApiField: "root.foo.bars.fooBar", + }, + }, + }, + { + name: "nested virtual", + virtualFields: []*api.Type{ + { + Name: "root", + Type: "NestedObject", + Properties: []*api.Type{ + { + Name: "foo", + Type: "String", + }, + }, + }, + }, + wantFields: []Field{ + { + Field: "root.foo", + ProviderOnly: true, + }, + }, + }, + { + name: "nested param", + parameters: []*api.Type{ + { + Name: "root", + Type: "NestedObject", + Properties: []*api.Type{ + { + Name: "foo", + Type: "String", + UrlParamOnly: true, + }, + }, + }, + }, + wantFields: []Field{ + { + Field: "root.foo", + ProviderOnly: true, + }, + }, + }, + { + name: "map", + properties: []*api.Type{ + { + Name: "root", + Type: "Map", + KeyName: "whatever", + ValueType: &api.Type{ + Type: "NestedObject", + Properties: []*api.Type{ + { + Name: "foo", + Type: "String", + }, + }, + }, + }, + }, + wantFields: []Field{ + { + Field: "root.whatever", + ApiField: "root.key", + }, + { + Field: "root.foo", + ApiField: "root.value.foo", + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + r := tc.resourceMetadata + if r == nil { + r = &api.Resource{} + } + r.VirtualFields = tc.virtualFields + r.Parameters = tc.parameters + r.Properties = tc.properties + r.SetDefault(&api.Product{}) + + got := FromProperties(r.AllNestedProperties(google.Concat(r.RootProperties(), r.UserVirtualFields()))) + if diff := cmp.Diff(tc.wantFields, got); diff != "" { + t.Errorf("FromProperties() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestIsDefaultLineage(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + lineage []string + apiLineage []string + want bool + }{ + { + name: "empty", + lineage: []string{}, + apiLineage: []string{}, + want: true, + }, + { + name: "single field", + lineage: []string{"foo_bar"}, + apiLineage: []string{"fooBar"}, + want: true, + }, + { + name: "multiple fields", + lineage: []string{"foo_bar", "baz_moo"}, + apiLineage: []string{"fooBar", "bazMoo"}, + want: true, + }, + { + name: "longer lineage", + lineage: []string{"foo_bar", "baz_moo"}, + apiLineage: []string{"fooBar"}, + want: false, + }, + { + name: "longer apiLineage", + lineage: []string{"foo_bar"}, + apiLineage: []string{"fooBar", "bazMoo"}, + want: false, + }, + { + name: "parent override", + lineage: []string{"foo_bar", "baz_moo"}, + apiLineage: []string{"otherName", "bazMoo"}, + want: false, + }, + { + name: "child override", + lineage: []string{"foo_bar", "baz_moo"}, + apiLineage: []string{"fooBar", "otherName"}, + want: false, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got := IsDefaultLineage(tc.lineage, tc.apiLineage) + if got != tc.want { + t.Errorf("IsDefaultLineage(%s) failed; want %t, got %t", tc.name, tc.want, got) + } + }) + } +} diff --git a/mmv1/api/metadata/metadata.go b/mmv1/api/metadata/metadata.go new file mode 100644 index 000000000000..8604ec6dd95e --- /dev/null +++ b/mmv1/api/metadata/metadata.go @@ -0,0 +1,71 @@ +package metadata + +import ( + "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" +) + +// FromResource returns a Metadata object based on a Resource. +func FromResource(r api.Resource) Metadata { + m := Metadata{ + Resource: r.TerraformName(), + GenerationType: "mmv1", + SourceFile: r.SourceYamlFile, + ApiServiceName: r.ProductMetadata.ServiceName(), + ApiVersion: r.ProductMetadata.ServiceVersion(), + ApiResourceTypeKind: r.ApiResourceTypeKind, + CaiAssetNameFormat: r.CAIFormatOverride(), + ApiVariantPatterns: r.ApiVariantPatterns, + AutogenStatus: r.AutogenStatus != "", + Fields: FromProperties(r.AllNestedProperties(google.Concat(r.RootProperties(), r.UserVirtualFields()))), + } + + if m.ApiVersion == "" { + m.ApiVersion = r.ServiceVersion() + } + if m.ApiResourceTypeKind == "" { + m.ApiResourceTypeKind = r.Name + } + + if r.HasSelfLink { + m.Fields = append(m.Fields, Field{ + ApiField: "selfLink", + }) + } + return m +} + +// Metadata represents a metadata.yaml file for a single Terraform resource. +type Metadata struct { + // The name of the Terraform resource. For example, "google_cloudfunctions2_function". + Resource string `yaml:"resource"` + + // The generation method used to create the Terraform resource. For example, "mmv1", "dcl", "handwritten". + GenerationType string `yaml:"generation_type"` + + // The source file of this metadata. This will only be set for generated resources, and will be the yaml file that contains the resource definition. + SourceFile string `yaml:"source_file"` + + // The base name of the API used for this resource. For example, "cloudfunctions.googleapis.com". + ApiServiceName string `yaml:"api_service_name"` + + // The version of the API used for this resource. For example, "v2". + ApiVersion string `yaml:"api_version"` + + // The API "resource type kind" used for this resource. For example, "Function". + ApiResourceTypeKind string `yaml:"api_resource_type_kind"` + + // The custom CAI asset name format for this resource is typically specified (for example, //cloudsql.googleapis.com/projects/{{project}}/instances/{{name}}). + // This will only have a value if it's different than the Terraform resource ID format. + CaiAssetNameFormat string `yaml:"cai_asset_name_format,omitempty"` + + // The API URL patterns used by this resource that represent variants. For example, "folders/{folder}/feeds/{feed}". Each pattern must match the value defined + // in the API exactly. The use of `api_variant_patterns` is only meaningful when the resource type has multiple parent types available. + ApiVariantPatterns []string `yaml:"api_variant_patterns,omitempty"` + + // Whether the resource was autogenerated from OpenAPI specs. + AutogenStatus bool `yaml:"autogen_status,omitempty"` + + // List of fields on the resource. + Fields []Field `yaml:"fields"` +} diff --git a/mmv1/api/metadata/metadata_test.go b/mmv1/api/metadata/metadata_test.go new file mode 100644 index 000000000000..a3a45d701432 --- /dev/null +++ b/mmv1/api/metadata/metadata_test.go @@ -0,0 +1,103 @@ +package metadata + +import ( + "testing" + + "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/google/go-cmp/cmp" +) + +func TestFromResource(t *testing.T) { + product := &api.Product{ + Name: "Product", + BaseUrl: "https://compute.googleapis.com/beta", + } + cases := []struct { + name string + resource api.Resource + wantMetadata Metadata + }{ + { + name: "empty resource", + resource: api.Resource{}, + wantMetadata: Metadata{ + Resource: "google_product_", + GenerationType: "mmv1", + ApiServiceName: "compute.googleapis.com", + ApiVersion: "beta", + }, + }, + { + name: "standard", + resource: api.Resource{ + Name: "Test", + AutogenStatus: "base64", + SourceYamlFile: "Test.yaml", + Properties: []*api.Type{ + { + Name: "field", + ApiName: "field", + }, + }, + }, + wantMetadata: Metadata{ + Resource: "google_product_test", + GenerationType: "mmv1", + SourceFile: "Test.yaml", + ApiServiceName: "compute.googleapis.com", + ApiVersion: "beta", + ApiResourceTypeKind: "Test", + AutogenStatus: true, + Fields: []Field{ + { + ApiField: "field", + }, + }, + }, + }, + { + name: "selfLink", + resource: api.Resource{ + Name: "Test", + AutogenStatus: "base64", + SourceYamlFile: "Test.yaml", + Properties: []*api.Type{ + { + Name: "field", + ApiName: "field", + }, + }, + HasSelfLink: true, + }, + wantMetadata: Metadata{ + Resource: "google_product_test", + GenerationType: "mmv1", + SourceFile: "Test.yaml", + ApiServiceName: "compute.googleapis.com", + ApiVersion: "beta", + ApiResourceTypeKind: "Test", + AutogenStatus: true, + Fields: []Field{ + { + ApiField: "field", + }, + { + ApiField: "selfLink", + }, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.resource.SetDefault(product) + + got := FromResource(tc.resource) + if diff := cmp.Diff(tc.wantMetadata, got); diff != "" { + t.Errorf("FromResource() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index f81251df887d..8e66e20648e9 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -14,6 +14,7 @@ package api import ( "fmt" + "io/fs" "log" "maps" "path/filepath" @@ -709,7 +710,7 @@ func (r Resource) SensitivePropsToString() string { var props []string for _, prop := range r.SensitiveProps() { - props = append(props, fmt.Sprintf("`%s`", prop.Lineage())) + props = append(props, fmt.Sprintf("`%s`", strings.Join(prop.Lineage(), "."))) } return strings.Join(props, ", ") @@ -719,7 +720,7 @@ func (r Resource) WriteOnlyPropsToString() string { var props []string for _, prop := range r.WriteOnlyProps() { - props = append(props, fmt.Sprintf("`%s`", prop.Lineage())) + props = append(props, fmt.Sprintf("`%s`", strings.Join(prop.Lineage(), "."))) } return strings.Join(props, ", ") @@ -783,28 +784,6 @@ func (r Resource) RootProperties() []*Type { return props } -// Returns a sorted list of all "leaf" properties, meaning properties that have -// no children. -func (r Resource) LeafProperties() []*Type { - types := r.AllNestedProperties(google.Concat(r.RootProperties(), r.UserVirtualFields())) - - // Remove types that have children, because we only want "leaf" fields - types = slices.DeleteFunc(types, func(t *Type) bool { - nestedProperties := t.NestedProperties() - return len(nestedProperties) > 0 - }) - - // Sort types by lineage - slices.SortFunc(types, func(a, b *Type) int { - if a.MetadataLineage() < b.MetadataLineage() { - return -1 - } - return 1 - }) - - return types -} - // Return the product-level async object, or the resource-specific one // if one exists. func (r Resource) GetAsync() *Async { @@ -886,7 +865,7 @@ func (r *Resource) attachConstraintGroup(groupType string, source []string) *[]s } func buildWriteOnlyField(name string, versionFieldName string, originalField *Type) *Type { - originalFieldLineage := originalField.TerraformLineage() + originalFieldLineage := strings.Join(originalField.Lineage(), ".0.") newFieldLineage := strings.ReplaceAll(originalFieldLineage, google.Underscore(originalField.Name), google.Underscore(name)) requiredWith := strings.ReplaceAll(originalFieldLineage, google.Underscore(originalField.Name), google.Underscore(versionFieldName)) @@ -938,7 +917,7 @@ func buildWriteOnlyField(name string, versionFieldName string, originalField *Ty func buildWriteOnlyVersionField(name string, originalField *Type, writeOnlyField *Type) *Type { description := fmt.Sprintf("Triggers update of `%s` write-only. Increment this value when an update to `%s` is needed. For more info see [updating write-only arguments](/docs/providers/google/guides/using_write_only_arguments.html#updating-write-only-arguments)", google.Underscore(writeOnlyField.Name), google.Underscore(writeOnlyField.Name)) - requiredWith := strings.ReplaceAll(originalField.TerraformLineage(), google.Underscore(originalField.Name), google.Underscore(writeOnlyField.Name)) + requiredWith := strings.ReplaceAll(strings.Join(originalField.Lineage(), ".0."), google.Underscore(originalField.Name), google.Underscore(writeOnlyField.Name)) options := []func(*Type){ propertyWithType("Int"), @@ -1100,7 +1079,7 @@ func (r Resource) IgnoreReadLabelsFields(props []*Type) []string { if p.IsA("KeyValueLabels") || p.IsA("KeyValueTerraformLabels") || p.IsA("KeyValueAnnotations") { - fields = append(fields, p.TerraformLineage()) + fields = append(fields, strings.Join(p.Lineage(), ".0.")) } else if p.IsA("NestedObject") && len(p.AllProperties()) > 0 { fields = google.Concat(fields, r.IgnoreReadLabelsFields(p.AllProperties())) } @@ -1501,7 +1480,7 @@ func ignoreReadFields(props []*Type) []string { var fields []string for _, tp := range props { if tp.IgnoreRead && !tp.UrlParamOnly && !tp.IsA("ResourceRef") { - fields = append(fields, tp.TerraformLineage()) + fields = append(fields, strings.Join(tp.Lineage(), ".0.")) } else if tp.IsA("NestedObject") && tp.AllProperties() != nil { fields = append(fields, ignoreReadFields(tp.AllProperties())...) } @@ -1833,33 +1812,38 @@ func (r Resource) IamParentSourceType() string { return t } -func (r Resource) IamImportFormat() string { +func (r Resource) IamImportFormatTemplate() string { var importFormat string if len(r.IamPolicy.ImportFormat) > 0 { importFormat = r.IamPolicy.ImportFormat[0] } else { importFormat = r.IamPolicy.SelfLink if importFormat == "" { - importFormat = r.SelfLinkUrl() + if len(r.ImportFormat) > 0 { + importFormat = r.ImportFormat[0] + } else { + importFormat = r.SelfLinkUrl() + } } } + return strings.ReplaceAll(importFormat, "{{name}}", fmt.Sprintf("{{%s}}", r.IamParentResourceName())) +} + +func (r Resource) IamImportFormat() string { + importFormat := r.IamImportFormatTemplate() importFormat = regexp.MustCompile(`\{\{%?(\w+)\}\}`).ReplaceAllString(importFormat, "%s") return strings.ReplaceAll(importFormat, r.ProductMetadata.BaseUrl, "") } -func (r Resource) IamImportQualifiersForTest() string { - var importFormat string - if len(r.IamPolicy.ImportFormat) > 0 { - importFormat = r.IamPolicy.ImportFormat[0] - } else { - importFormat = r.IamPolicy.SelfLink - if importFormat == "" { - importFormat = r.SelfLinkUrl() - } - } +func (r Resource) IamImportParams() []string { + importFormat := r.IamImportFormatTemplate() - params := r.ExtractIdentifiers(importFormat) + return r.ExtractIdentifiers(importFormat) +} + +func (r Resource) IamImportQualifiersForTest() string { + params := r.IamImportParams() var importQualifiers []string for i, param := range params { if param == "project" { @@ -2142,7 +2126,7 @@ func (r Resource) TestSamples() []*resource.Sample { }) } -func (r Resource) TestSampleSetUp() { +func (r Resource) TestSampleSetUp(sysfs fs.FS) { res := make(map[string]string) for _, sample := range r.Samples { sample.TargetVersionName = r.TargetVersionName @@ -2158,7 +2142,7 @@ func (r Resource) TestSampleSetUp() { if step.ConfigPath == "" { step.ConfigPath = fmt.Sprintf("templates/terraform/samples/services/%s/%s.tf.tmpl", packageName, step.Name) } - step.SetHCLText() + step.SetHCLText(sysfs) configName := step.Name if _, ok := res[step.Name]; !ok { res[configName] = sample.Name @@ -2591,13 +2575,19 @@ func (r Resource) TGCTestIgnorePropertiesToStrings() []string { "timeouts", } for _, tp := range r.VirtualFields { - props = append(props, tp.MetadataLineage()) + props = append(props, strings.Join(tp.Lineage(), ".")) } for _, tp := range r.AllNestedProperties(r.RootProperties()) { if tp.UrlParamOnly { props = append(props, google.Underscore(tp.Name)) } else if tp.IsMissingInCai || tp.IgnoreRead || tp.ClientSide || tp.WriteOnlyLegacy { - props = append(props, tp.MetadataLineage()) + props = append(props, strings.Join(tp.Lineage(), ".")) + } + } + + for _, e := range r.Examples { + for _, p := range e.IgnoreReadExtra { + props = append(props, p) } } @@ -2606,7 +2596,7 @@ func (r Resource) TGCTestIgnorePropertiesToStrings() []string { } slices.Sort(props) - return props + return slices.Compact(props) } // Filters out computed properties during cai2hcl diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index 5946a5e2735e..da61c2d9870f 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -16,9 +16,9 @@ package resource import ( "bytes" "fmt" + "io/fs" "log" "net/url" - "os" "path/filepath" "regexp" "slices" @@ -257,7 +257,7 @@ func (e *Examples) ValidateExternalProviders() error { } // Executes example templates for documentation and tests -func (e *Examples) LoadHCLText(baseDir string) (err error) { +func (e *Examples) LoadHCLText(sysfs fs.FS) (err error) { originalVars := e.Vars originalTestEnvVars := e.TestEnvVars docTestEnvVars := make(map[string]string) @@ -284,7 +284,7 @@ func (e *Examples) LoadHCLText(baseDir string) (err error) { docTestEnvVars[key] = docs_defaults[e.TestEnvVars[key]] } e.TestEnvVars = docTestEnvVars - e.DocumentationHCLText, err = e.ExecuteTemplate(baseDir) + e.DocumentationHCLText, err = e.ExecuteTemplate(sysfs) if err != nil { return err } @@ -328,7 +328,7 @@ func (e *Examples) LoadHCLText(baseDir string) (err error) { e.Vars = testVars e.TestEnvVars = testTestEnvVars - e.TestHCLText, err = e.ExecuteTemplate(baseDir) + e.TestHCLText, err = e.ExecuteTemplate(sysfs) if err != nil { return err } @@ -344,8 +344,8 @@ func (e *Examples) LoadHCLText(baseDir string) (err error) { return nil } -func (e *Examples) ExecuteTemplate(baseDir string) (string, error) { - templateContent, err := os.ReadFile(filepath.Join(baseDir, e.ConfigPath)) +func (e *Examples) ExecuteTemplate(sysfs fs.FS) (string, error) { + templateContent, err := fs.ReadFile(sysfs, e.ConfigPath) if err != nil { return "", err } @@ -359,7 +359,7 @@ func (e *Examples) ExecuteTemplate(baseDir string) (string, error) { validateRegexForContents(varRegex, fileContentString, e.ConfigPath, "vars", e.Vars) templateFileName := filepath.Base(e.ConfigPath) - tmpl, err := template.New(templateFileName).Funcs(google.TemplateFunctions).Parse(fileContentString) + tmpl, err := template.New(templateFileName).Funcs(google.TemplateFunctions(sysfs)).Parse(fileContentString) if err != nil { return "", err } @@ -408,7 +408,7 @@ func (e *Examples) ResourceType(terraformName string) string { } // Executes example templates for documentation and tests -func (e *Examples) SetOiCSHCLText() { +func (e *Examples) SetOiCSHCLText(sysfs fs.FS) { var err error originalVars := e.Vars originalTestEnvVars := e.TestEnvVars @@ -430,7 +430,7 @@ func (e *Examples) SetOiCSHCLText() { e.Vars = testVars // SetOiCSHCLText is generated from the provider, assume base directory is // always relative for this case - e.OicsHCLText, err = e.ExecuteTemplate("") + e.OicsHCLText, err = e.ExecuteTemplate(sysfs) if err != nil { log.Fatal(err) } diff --git a/mmv1/api/resource/iam_policy.go b/mmv1/api/resource/iam_policy.go index 0c26e271dfa4..abc9cff3a6d3 100644 --- a/mmv1/api/resource/iam_policy.go +++ b/mmv1/api/resource/iam_policy.go @@ -84,6 +84,9 @@ type IamPolicy struct { // CompareSelfLinkOrResourceName CustomDiffSuppress *string `yaml:"custom_diff_suppress,omitempty"` + // ImportStateIDFuncs may use a custom template if default funcs don't work. + CustomImportStateIDFuncs string `yaml:"custom_import_state_id_funcs"` + // Some resources (IAP) use fields named differently from the parent resource. // We need to use the parent's attributes to create an IAM policy, but they may not be // named as the IAM resource expects. diff --git a/mmv1/api/resource/step.go b/mmv1/api/resource/step.go index 6f0d64cf8cd3..264010c1db48 100644 --- a/mmv1/api/resource/step.go +++ b/mmv1/api/resource/step.go @@ -16,9 +16,9 @@ package resource import ( "bytes" "fmt" + "io/fs" "log" "net/url" - "os" "path/filepath" "regexp" "strings" @@ -123,7 +123,6 @@ func (s *Step) Validate(rName, sName string) { if s.Name == "" { log.Fatalf("Missing `name` for one step in test sample %s in resource %s", sName, rName) } - } func validateRegexForContents(r *regexp.Regexp, contents string, configPath string, objName string, vars map[string]string) { @@ -143,7 +142,7 @@ func validateRegexForContents(r *regexp.Regexp, contents string, configPath stri } // Executes step configuration templates for documentation and tests -func (s *Step) SetHCLText() { +func (s *Step) SetHCLText(sysfs fs.FS) { originalPrefixedVars := s.PrefixedVars // originalVars := s.Vars originalTestEnvVars := s.TestEnvVars @@ -171,7 +170,7 @@ func (s *Step) SetHCLText() { docTestEnvVars[key] = docs_defaults[s.TestEnvVars[key]] } s.TestEnvVars = docTestEnvVars - s.DocumentationHCLText = s.ExecuteTemplate() + s.DocumentationHCLText = s.ExecuteTemplate(sysfs) s.DocumentationHCLText = regexp.MustCompile(`\n\n$`).ReplaceAllString(s.DocumentationHCLText, "\n") // Remove region tags @@ -215,7 +214,7 @@ func (s *Step) SetHCLText() { s.PrefixedVars = testPrefixedVars s.TestEnvVars = testTestEnvVars - s.TestHCLText = s.ExecuteTemplate() + s.TestHCLText = s.ExecuteTemplate(sysfs) s.TestHCLText = regexp.MustCompile(`\n\n$`).ReplaceAllString(s.TestHCLText, "\n") // Remove region tags s.TestHCLText = re1.ReplaceAllString(s.TestHCLText, "") @@ -227,8 +226,8 @@ func (s *Step) SetHCLText() { s.TestEnvVars = originalTestEnvVars } -func (s *Step) ExecuteTemplate() string { - templateContent, err := os.ReadFile(s.ConfigPath) +func (s *Step) ExecuteTemplate(sysfs fs.FS) string { + templateContent, err := fs.ReadFile(sysfs, s.ConfigPath) if err != nil { glog.Exit(err) } @@ -245,7 +244,7 @@ func (s *Step) ExecuteTemplate() string { templateFileName := filepath.Base(s.ConfigPath) - tmpl, err := template.New(templateFileName).Funcs(google.TemplateFunctions).Parse(fileContentString) + tmpl, err := template.New(templateFileName).Funcs(google.TemplateFunctions(sysfs)).Parse(fileContentString) if err != nil { glog.Exit(err) } @@ -300,7 +299,7 @@ func SubstituteTestPaths(config string) string { } // Executes step configuration templates for documentation and tests -func (s *Step) SetOiCSHCLText() { +func (s *Step) SetOiCSHCLText(sysfs fs.FS) { originalPrefixedVars := s.PrefixedVars // // Remove region tags @@ -318,7 +317,7 @@ func (s *Step) SetOiCSHCLText() { } s.PrefixedVars = testPrefixedVars - s.OicsHCLText = s.ExecuteTemplate() + s.OicsHCLText = s.ExecuteTemplate(sysfs) s.OicsHCLText = regexp.MustCompile(`\n\n$`).ReplaceAllString(s.OicsHCLText, "\n") // Remove region tags diff --git a/mmv1/api/resource_test.go b/mmv1/api/resource_test.go index 28c235d55d6d..116b01244123 100644 --- a/mmv1/api/resource_test.go +++ b/mmv1/api/resource_test.go @@ -204,123 +204,6 @@ func TestResourceServiceVersion(t *testing.T) { } } -func TestLeafProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - description string - obj Resource - expected Type - }{ - { - description: "non-nested type", - obj: Resource{ - BaseUrl: "test", - Properties: []*Type{ - { - Name: "basic", - Type: "String", - }, - }, - }, - expected: Type{ - Name: "basic", - }, - }, - { - description: "nested type", - obj: Resource{ - BaseUrl: "test", - Properties: []*Type{ - { - Name: "root", - Type: "NestedObject", - Properties: []*Type{ - { - Name: "foo", - Type: "NestedObject", - Properties: []*Type{ - { - Name: "bars", - Type: "Array", - ItemType: &Type{ - Type: "NestedObject", - Properties: []*Type{ - { - Name: "fooBar", - Type: "String", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - expected: Type{ - Name: "fooBar", - }, - }, - { - description: "nested virtual", - obj: Resource{ - BaseUrl: "test", - VirtualFields: []*Type{ - { - Name: "root", - Type: "NestedObject", - Properties: []*Type{ - { - Name: "foo", - Type: "String", - }, - }, - }, - }, - }, - expected: Type{ - Name: "foo", - }, - }, - { - description: "nested param", - obj: Resource{ - BaseUrl: "test", - Parameters: []*Type{ - { - Name: "root", - Type: "NestedObject", - Properties: []*Type{ - { - Name: "foo", - Type: "String", - }, - }, - }, - }, - }, - expected: Type{ - Name: "foo", - }, - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.description, func(t *testing.T) { - t.Parallel() - - tc.obj.SetDefault(nil) - if got, want := tc.obj.LeafProperties(), tc.expected; got[0].Name != want.Name { - t.Errorf("expected %q to be %q", got[0].Name, want.Name) - } - }) - } -} - // TestMagicianLocation verifies that the current package is being executed from within // the RELATIVE_MAGICIAN_LOCATION ("mmv1/") directory structure. This ensures that references // to files relative to this location will remain valid even if the repository structure diff --git a/mmv1/api/type.go b/mmv1/api/type.go index f27152c28e32..87b17ae1a7a1 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -524,82 +524,44 @@ func (t *Type) Validate(rName string) { // check the allowed types for Type field // check the allowed fields for each type, for example, KeyName is only allowed for Map -// Prints a dot notation path to where the field is nested within the parent -// object. eg: parent.meta.label.foo -// The only intended purpose is to allow better error messages. Some objects -// and at some points in the build this doesn't output a valid output. -func (t Type) Lineage() string { - if t.ParentMetadata == nil { - return google.Underscore(t.Name) - } - - return fmt.Sprintf("%s.%s", t.ParentMetadata.Lineage(), google.Underscore(t.Name)) -} - -// Returns the actual Terraform lineage for the field, formatted for resource metadata. -// This will return a simple dot notation path, like: foo_field.bar_field -func (t Type) MetadataLineage() string { +// Returns a slice of Terraform field names representing where the field is nested within the parent resource. +// For example, []string{"parent_field", "meta", "label", "foo_bar"}. +func (t Type) Lineage() []string { if t.ParentMetadata == nil || t.ParentMetadata.FlattenObject { - return google.Underscore(t.Name) + return []string{google.Underscore(t.Name)} } - // Skip arrays because otherwise the array name will be included twice - if t.ParentMetadata.IsA("Array") { - return t.ParentMetadata.MetadataLineage() + // Skip arrays & maps because otherwise the parent field name will be duplicated + if t.ParentMetadata.IsA("Array") || t.ParentMetadata.IsA("Map") { + return t.ParentMetadata.Lineage() } - return fmt.Sprintf("%s.%s", t.ParentMetadata.MetadataLineage(), google.Underscore(t.Name)) -} - -// Returns the default Terraform lineage for the field, based on converting MetadataApiLineage -// to snake_case. This is used to determine whether an explicit Terraform field name is required. -// This will return a simple dot notation path like: foo_field.bar_field -func (t Type) MetadataDefaultLineage() string { - apiLineage := t.MetadataApiLineage() - parts := strings.Split(apiLineage, ".") - var snakeParts []string - for _, p := range parts { - snakeParts = append(snakeParts, google.Underscore(p)) - } - return strings.Join(snakeParts, ".") + return append(t.ParentMetadata.Lineage(), google.Underscore(t.Name)) } -// Returns the actual API lineage for the field (that is, using API names), formatted for -// resource metadata. This will return a simple dot notation path, like: fooField.barField -// This format is intended for to represent an API type. -func (t Type) MetadataApiLineage() string { - apiName := t.ApiName +// Returns a slice of API field names representing where the field is nested within the parent resource. +// For example, []string{"parentField", "meta", "label", "fooBar"}. For fine-grained resources, this will +// include the field on the API resource that the fine-grained resource manages. +func (t Type) ApiLineage() []string { if t.ParentMetadata == nil { if !t.UrlParamOnly && t.ResourceMetadata.ApiResourceField != "" { - apiName = fmt.Sprintf("%s.%s", t.ResourceMetadata.ApiResourceField, apiName) + return []string{t.ResourceMetadata.ApiResourceField, t.ApiName} } - return apiName + return []string{t.ApiName} } + // Skip arrays because otherwise the array will be included twice if t.ParentMetadata.IsA("Array") { - return t.ParentMetadata.MetadataApiLineage() + return t.ParentMetadata.ApiLineage() } - return fmt.Sprintf("%s.%s", t.ParentMetadata.MetadataApiLineage(), apiName) -} - -// Returns the lineage in snake case -func (t Type) LineageAsSnakeCase() string { - if t.ParentMetadata == nil { - return google.Underscore(t.Name) - } - - return fmt.Sprintf("%s_%s", t.ParentMetadata.LineageAsSnakeCase(), google.Underscore(t.Name)) -} - -// Prints the access path of the field in the configration eg: metadata.0.labels -// The only intended purpose is to get the value of the labes field by calling d.Get(). -func (t Type) TerraformLineage() string { - if t.ParentMetadata == nil || t.ParentMetadata.FlattenObject { - return google.Underscore(t.Name) + // Insert `value` for children of Map fields, and exclude this type because + // it will have the same Name as the parent field. + if t.ParentMetadata.IsA("Map") { + return append(t.ParentMetadata.ApiLineage(), "value") } - return fmt.Sprintf("%s.0.%s", t.ParentMetadata.TerraformLineage(), google.Underscore(t.Name)) + return append(t.ParentMetadata.ApiLineage(), t.ApiName) } func (t Type) EnumValuesToString(quoteSeperator string, addEmpty bool) string { @@ -1114,7 +1076,7 @@ func (t Type) AllProperties() []*Type { func (t Type) UserProperties() []*Type { if t.IsA("NestedObject") { if t.Properties == nil { - log.Fatalf("Field '{%s}' properties are nil!", t.Lineage()) + log.Fatalf("Field '{%s}' properties are nil!", strings.Join(t.Lineage(), ".")) } return google.Reject(t.Properties, func(p *Type) bool { @@ -1268,7 +1230,7 @@ func propertyWithAtLeastOneOfPointer(ptr *[]string) func(*Type) { func (t *Type) validateLabelsField() { productName := t.ResourceMetadata.ProductMetadata.Name resourceName := t.ResourceMetadata.Name - lineage := t.Lineage() + lineage := strings.Join(t.Lineage(), ".") if lineage == "labels" || lineage == "metadata.labels" || lineage == "configuration.labels" { if !t.IsA("KeyValueLabels") && // The label value must be empty string, so skip this resource diff --git a/mmv1/api/type_test.go b/mmv1/api/type_test.go index 9585ca18daa1..8dffecc144f2 100644 --- a/mmv1/api/type_test.go +++ b/mmv1/api/type_test.go @@ -2,6 +2,7 @@ package api import ( "reflect" + "strings" "testing" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" @@ -333,7 +334,7 @@ func TestTypeExcludeIfNotInVersion(t *testing.T) { } } -func TestMetadataLineage(t *testing.T) { +func TestLineage(t *testing.T) { t.Parallel() root := Type{ @@ -396,7 +397,7 @@ func TestMetadataLineage(t *testing.T) { t.Run(tc.description, func(t *testing.T) { t.Parallel() - got := tc.obj.MetadataLineage() + got := strings.Join(tc.obj.Lineage(), ".") if got != tc.expected { t.Errorf("expected %q to be %q", got, tc.expected) } @@ -404,88 +405,7 @@ func TestMetadataLineage(t *testing.T) { } } -func TestMetadataDefaultLineage(t *testing.T) { - t.Parallel() - - root := Type{ - Name: "root", - Type: "NestedObject", - Properties: []*Type{ - { - Name: "foo", - Type: "NestedObject", - Properties: []*Type{ - { - Name: "bars", - Type: "Array", - ItemType: &Type{ - Type: "NestedObject", - Properties: []*Type{ - { - Name: "fooBar", - Type: "String", - }, - }, - }, - }, - }, - }, - { - Name: "baz", - ApiName: "bazbaz", - Type: "String", - }, - }, - } - root.SetDefault(&Resource{}) - - cases := []struct { - description string - obj Type - expected string - }{ - { - description: "root type", - obj: root, - expected: "root", - }, - { - description: "sub type", - obj: *root.Properties[0], - expected: "root.foo", - }, - { - description: "array", - obj: *root.Properties[0].Properties[0], - expected: "root.foo.bars", - }, - { - description: "array of objects", - obj: *root.Properties[0].Properties[0].ItemType.Properties[0], - expected: "root.foo.bars.foo_bar", - }, - { - description: "with api name", - obj: *root.Properties[1], - expected: "root.bazbaz", - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.description, func(t *testing.T) { - t.Parallel() - - got := tc.obj.MetadataDefaultLineage() - if got != tc.expected { - t.Errorf("expected %q to be %q", got, tc.expected) - } - }) - } -} - -func TestMetadataApiLineage(t *testing.T) { +func TestApiLineage(t *testing.T) { t.Parallel() root := Type{ @@ -615,7 +535,7 @@ func TestMetadataApiLineage(t *testing.T) { t.Run(tc.description, func(t *testing.T) { t.Parallel() - got := tc.obj.MetadataApiLineage() + got := strings.Join(tc.obj.ApiLineage(), ".") if got != tc.expected { t.Errorf("expected %q to be %q", got, tc.expected) } diff --git a/mmv1/go.mod b/mmv1/go.mod index 09665191e801..9a42bc22bc32 100644 --- a/mmv1/go.mod +++ b/mmv1/go.mod @@ -11,6 +11,7 @@ require github.com/golang/glog v1.2.0 require ( github.com/getkin/kin-openapi v0.127.0 + github.com/google/go-cmp v0.6.0 github.com/otiai10/copy v1.9.0 ) diff --git a/mmv1/google/template_utils.go b/mmv1/google/template_utils.go index 9e39f8fb9eb7..f2b66262fe83 100644 --- a/mmv1/google/template_utils.go +++ b/mmv1/google/template_utils.go @@ -17,6 +17,7 @@ import ( "bytes" "errors" "fmt" + "io/fs" "path/filepath" "reflect" "strings" @@ -55,9 +56,15 @@ func plus(a, b int) int { return a + b } -var TemplateFunctions = templateFunctions() +func TemplateFunctions(templateFs fs.FS) template.FuncMap { + return functionsData{templateFS: templateFs}.templateFunctions() +} + +type functionsData struct { + templateFS fs.FS +} -func templateFunctions() template.FuncMap { +func (t functionsData) templateFunctions() template.FuncMap { return template.FuncMap{ "title": SpaceSeparatedTitle, "replace": strings.Replace, @@ -76,8 +83,8 @@ func templateFunctions() template.FuncMap { "sub": subtract, "plus": plus, "firstSentence": FirstSentence, - "trimTemplate": TrimTemplate, - "customTemplate": executeCustomTemplate, + "trimTemplate": t.trimTemplate, + "customTemplate": t.customTemplate, } } @@ -95,38 +102,36 @@ func structToPtr(e any) reflect.Value { // Temporary function to simulate how Ruby MMv1's lines() function works // for nested documentation. Can replace with normal "template" after switchover -func TrimTemplate(templatePath string, e any) string { +func (t *functionsData) trimTemplate(templatePath string, e any) (string, error) { templates := []string{ fmt.Sprintf("templates/terraform/%s", templatePath), "templates/terraform/expand_resource_ref.tmpl", } templateFileName := filepath.Base(templatePath) - // Need to remake TemplateFunctions, referencing it directly here - // causes a declaration loop - tmpl, err := template.New(templateFileName).Funcs(templateFunctions()).ParseFiles(templates...) + tmpl, err := template.New(templateFileName).Funcs(t.templateFunctions()).ParseFS(t.templateFS, templates...) if err != nil { - glog.Exit(err) + return "", err } contents := bytes.Buffer{} if err = tmpl.ExecuteTemplate(&contents, templateFileName, structToPtr(e)); err != nil { - glog.Exit(err) + return "", err } rs := contents.String() if rs == "" { - return rs + return "", nil } for strings.HasSuffix(rs, "\n") { rs = strings.TrimSuffix(rs, "\n") } - return fmt.Sprintf("%s\n", rs) + return fmt.Sprintf("%s\n", rs), nil } -func executeCustomTemplate(e any, templatePath string, appendNewline bool) string { +func (t functionsData) customTemplate(e any, templatePath string, appendNewline bool) (string, error) { templates := []string{ templatePath, "templates/terraform/expand_resource_ref.tmpl", @@ -139,9 +144,9 @@ func executeCustomTemplate(e any, templatePath string, appendNewline bool) strin } templateFileName := filepath.Base(templatePath) - tmpl, err := template.New(templateFileName).Funcs(templateFunctions()).ParseFiles(templates...) + tmpl, err := template.New(templateFileName).Funcs(t.templateFunctions()).ParseFS(t.templateFS, templates...) if err != nil { - glog.Exit(err) + return "", err } contents := bytes.Buffer{} @@ -157,5 +162,5 @@ func executeCustomTemplate(e any, templatePath string, appendNewline bool) strin if !appendNewline { rs = strings.TrimSuffix(rs, "\n") } - return rs + return rs, nil } diff --git a/mmv1/loader/loader.go b/mmv1/loader/loader.go index f94e09021f6e..ade3a7b542fd 100644 --- a/mmv1/loader/loader.go +++ b/mmv1/loader/loader.go @@ -4,28 +4,30 @@ import ( "errors" "fmt" "log" - "os" "path/filepath" "reflect" - "sort" + "strings" "sync" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "github.com/golang/glog" "golang.org/x/exp/slices" ) type Loader struct { - // BaseDirectory points to mmv1 root, if cwd can be empty as relative paths are used - BaseDirectory string - OverrideDirectory string - Version string + // baseDirectory points to mmv1 root, if cwd can be empty as relative paths are used + baseDirectory string + overrideDirectory string + version string + sysfs google.ReadDirReadFileFS } type Config struct { - BaseDirectory string // optional, defaults to current working directory - OverrideDirectory string // optional - Version string // required + BaseDirectory string // required + OverrideDirectory string // optional + Version string // required + Sysfs google.ReadDirReadFileFS // required } // NewLoader creates a new Loader instance, applying any @@ -35,40 +37,31 @@ func NewLoader(config Config) *Loader { if config.Version == "" { panic("version is required") } - - l := &Loader{ - BaseDirectory: config.BaseDirectory, - OverrideDirectory: config.OverrideDirectory, - Version: config.Version, + if config.BaseDirectory == "" { + panic("a base directory is required") } - - // Normalize override dir to a path that is relative to the magic-modules directory - // This is needed for templates that concatenate pwd + override dir + path - if filepath.IsAbs(l.OverrideDirectory) { - mmv1Dir := l.BaseDirectory - if mmv1Dir == "" { - wd, err := os.Getwd() - if err != nil { - panic(err) - } - mmv1Dir = wd - } - l.OverrideDirectory, _ = filepath.Rel(mmv1Dir, l.OverrideDirectory) - log.Printf("Override directory normalized to relative path %s", l.OverrideDirectory) + if config.Sysfs == nil { + panic("sysfs is required") + } + l := &Loader{ + baseDirectory: config.BaseDirectory, + overrideDirectory: config.OverrideDirectory, + version: config.Version, + sysfs: config.Sysfs, } return l } func (l *Loader) LoadProducts() map[string]*api.Product { - if l.Version == "" { + if l.version == "" { log.Printf("No version specified, assuming ga") - l.Version = "ga" + l.version = "ga" } var allProductFiles []string = make([]string, 0) - files, err := filepath.Glob(filepath.Join(l.BaseDirectory, "products/**/product.yaml")) + files, err := filepath.Glob(filepath.Join(l.baseDirectory, "products/**/product.yaml")) if err != nil { panic(err) } @@ -77,14 +70,15 @@ func (l *Loader) LoadProducts() map[string]*api.Product { allProductFiles = append(allProductFiles, fmt.Sprintf("products/%s", filepath.Base(dir))) } - if l.OverrideDirectory != "" { - log.Printf("Using override directory %s", l.OverrideDirectory) - overrideFiles, err := filepath.Glob(filepath.Join(l.OverrideDirectory, "products/**/product.yaml")) + log.Printf("Using base directory %q", l.baseDirectory) + if l.overrideDirectory != "" { + log.Printf("Using override directory %q", l.overrideDirectory) + overrideFiles, err := filepath.Glob(filepath.Join(l.overrideDirectory, "products/**/product.yaml")) if err != nil { panic(err) } for _, filePath := range overrideFiles { - product, err := filepath.Rel(l.OverrideDirectory, filePath) + product, err := filepath.Rel(l.overrideDirectory, filePath) if err != nil { panic(err) } @@ -163,11 +157,11 @@ func (l *Loader) LoadProduct(productName string) (*api.Product, error) { productYamlPath := filepath.Join(productName, "product.yaml") var productOverridePath string - if l.OverrideDirectory != "" { - productOverridePath = filepath.Join(l.OverrideDirectory, productYamlPath) + if l.overrideDirectory != "" { + productOverridePath = filepath.Join(l.overrideDirectory, productYamlPath) } - baseProductPath := filepath.Join(l.BaseDirectory, productYamlPath) + baseProductPath := filepath.Join(l.baseDirectory, productYamlPath) baseProductExists := Exists(baseProductPath) overrideProductExists := Exists(productOverridePath) @@ -179,20 +173,20 @@ func (l *Loader) LoadProduct(productName string) (*api.Product, error) { // Compile the product configuration if overrideProductExists { if baseProductExists { - api.Compile(baseProductPath, p, l.OverrideDirectory) + api.Compile(baseProductPath, p) overrideApiProduct := &api.Product{} - api.Compile(productOverridePath, overrideApiProduct, l.OverrideDirectory) - api.Merge(reflect.ValueOf(p).Elem(), reflect.ValueOf(*overrideApiProduct), l.Version) + api.Compile(productOverridePath, overrideApiProduct) + api.Merge(reflect.ValueOf(p).Elem(), reflect.ValueOf(*overrideApiProduct), l.version) } else { - api.Compile(productOverridePath, p, l.OverrideDirectory) + api.Compile(productOverridePath, p) } } else { - api.Compile(baseProductPath, p, l.OverrideDirectory) + api.Compile(baseProductPath, p) } // Check if product exists at the requested l.Version - if !p.ExistsAtVersionOrLower(l.Version) { - return nil, &ErrProductVersionNotFound{ProductName: productName, Version: l.Version} + if !p.ExistsAtVersionOrLower(l.version) { + return nil, &ErrProductVersionNotFound{ProductName: productName, Version: l.version} } // Compile all resources @@ -213,7 +207,7 @@ func (l *Loader) loadResources(product *api.Product) ([]*api.Resource, error) { var resources []*api.Resource = make([]*api.Resource, 0) // Get base resource files - resourceFiles, err := filepath.Glob(filepath.Join(l.BaseDirectory, product.PackagePath, "*")) + resourceFiles, err := filepath.Glob(filepath.Join(l.baseDirectory, product.PackagePath, "*")) if err != nil { return nil, fmt.Errorf("cannot get resource files: %v", err) } @@ -223,10 +217,14 @@ func (l *Loader) loadResources(product *api.Product) ([]*api.Resource, error) { if filepath.Base(resourceYamlPath) == "product.yaml" || filepath.Ext(resourceYamlPath) != ".yaml" { continue } + relPath, err := filepath.Rel(l.baseDirectory, resourceYamlPath) + if err != nil { + return nil, fmt.Errorf("returned %q is not relative to %q", resourceYamlPath, l.baseDirectory) + } // Skip if resource will be merged in the override loop - if l.OverrideDirectory != "" { - overrideResourceExists := Exists(l.OverrideDirectory, resourceYamlPath) + if l.overrideDirectory != "" { + overrideResourceExists := Exists(l.overrideDirectory, relPath) if overrideResourceExists { continue } @@ -237,22 +235,23 @@ func (l *Loader) loadResources(product *api.Product) ([]*api.Resource, error) { } // Compile override resources - if l.OverrideDirectory != "" { + if l.overrideDirectory != "" { resources, err = l.reconcileOverrideResources(product, resources) if err != nil { return nil, err } } + // Sort resources by name for consistent output + slices.SortFunc(resources, func(a, b *api.Resource) int { + return strings.Compare(a.Name, b.Name) + }) return resources, nil } // reconcileOverrideResources handles resolution of override resources func (l *Loader) reconcileOverrideResources(product *api.Product, resources []*api.Resource) ([]*api.Resource, error) { - productOverridePath := filepath.Join(l.OverrideDirectory, product.PackagePath, "product.yaml") - productOverrideDir := filepath.Dir(productOverridePath) - - overrideFiles, err := filepath.Glob(filepath.Join(productOverrideDir, "*")) + overrideFiles, err := filepath.Glob(filepath.Join(l.overrideDirectory, product.PackagePath, "*")) if err != nil { return nil, fmt.Errorf("cannot get override files: %v", err) } @@ -262,63 +261,59 @@ func (l *Loader) reconcileOverrideResources(product *api.Product, resources []*a continue } - baseResourcePath := filepath.Join(product.PackagePath, filepath.Base(overrideYamlPath)) + baseResourcePath := filepath.Join(l.baseDirectory, product.PackagePath, filepath.Base(overrideYamlPath)) resource := l.loadResource(product, baseResourcePath, overrideYamlPath) resources = append(resources, resource) } - // Sort resources by name for consistent output - sort.Slice(resources, func(i, j int) bool { - return resources[i].Name < resources[j].Name - }) - return resources, nil } // loadResource loads a single resource with optional override +// baseResourcePath and overrideResourcePath are expected to be absolute paths. func (l *Loader) loadResource(product *api.Product, baseResourcePath string, overrideResourcePath string) *api.Resource { resource := &api.Resource{} // Check if base resource exists - baseResourceExists := Exists(l.BaseDirectory, baseResourcePath) + baseResourceExists := Exists(baseResourcePath) + baseRelPath, _ := filepath.Rel(l.baseDirectory, baseResourcePath) if baseResourceExists { - relPath, _ := filepath.Rel(l.BaseDirectory, baseResourcePath) - resource.SourceYamlFile = relPath + resource.SourceYamlFile = baseRelPath } else { - relPath, _ := filepath.Rel(l.BaseDirectory, overrideResourcePath) + relPath, _ := filepath.Rel(l.overrideDirectory, overrideResourcePath) resource.SourceYamlFile = relPath } if overrideResourcePath != "" { if baseResourceExists { // Merge base and override - api.Compile(baseResourcePath, resource, l.OverrideDirectory) + api.Compile(baseResourcePath, resource) overrideResource := &api.Resource{} - api.Compile(overrideResourcePath, overrideResource, l.OverrideDirectory) - api.Merge(reflect.ValueOf(resource).Elem(), reflect.ValueOf(*overrideResource), l.Version) + api.Compile(overrideResourcePath, overrideResource) + api.Merge(reflect.ValueOf(resource).Elem(), reflect.ValueOf(*overrideResource), l.version) } else { // Override only - api.Compile(overrideResourcePath, resource, l.OverrideDirectory) + api.Compile(overrideResourcePath, resource) } } else { // Base only - api.Compile(baseResourcePath, resource, l.OverrideDirectory) - resource.SourceYamlFile = baseResourcePath + api.Compile(baseResourcePath, resource) + resource.SourceYamlFile = baseRelPath } // Set resource defaults and validate - resource.TargetVersionName = l.Version + resource.TargetVersionName = l.version // SetDefault before AddExtraFields to ensure relevant metadata is available on existing fields resource.SetDefault(product) resource.Properties = resource.AddExtraFields(resource.PropertiesWithExcluded(), nil) // SetDefault after AddExtraFields to ensure relevant metadata is available for the newly generated fields resource.SetDefault(product) resource.Validate() - resource.TestSampleSetUp() + resource.TestSampleSetUp(l.sysfs) for _, e := range resource.Examples { - if err := e.LoadHCLText(l.BaseDirectory); err != nil { + if err := e.LoadHCLText(l.sysfs); err != nil { glog.Exit(err) } } diff --git a/mmv1/main.go b/mmv1/main.go index 9eb277687867..0f96ca836345 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -3,7 +3,9 @@ package main import ( "flag" "fmt" + "io/fs" "log" + "os" "strings" "sync" "time" @@ -11,6 +13,7 @@ import ( "golang.org/x/exp/slices" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "github.com/GoogleCloudPlatform/magic-modules/mmv1/loader" "github.com/GoogleCloudPlatform/magic-modules/mmv1/openapi_generate" "github.com/GoogleCloudPlatform/magic-modules/mmv1/provider" @@ -26,7 +29,9 @@ var outputPathFlag = flag.String("output", "", "path to output generated files t // Example usage: --version beta var versionFlag = flag.String("version", "", "optional version name. If specified, this version is preferred for resource generation when applicable") -var overrideDirectoryFlag = flag.String("overrides", "", "directory containing yaml overrides") +var baseDirectoryFlag = flag.String("base", "", "optional directory containing mmv1 third_party/ and templates/ directories. Empty value defaults to GetCwd().") + +var overrideDirectoryFlag = flag.String("overrides", "", "optional directory containing yaml overrides") var productFlag = flag.String("product", "", "optional product name. If specified, the resources under the specific product will be generated. Otherwise, resources under all products will be generated.") @@ -56,24 +61,35 @@ func main() { return } - GenerateProducts(*productFlag, *resourceFlag, *providerFlag, *versionFlag, *outputPathFlag, *overrideDirectoryFlag, !*doNotGenerateCode, !*doNotGenerateDocs) + GenerateProducts(*productFlag, *resourceFlag, *providerFlag, *versionFlag, *outputPathFlag, *baseDirectoryFlag, *overrideDirectoryFlag, !*doNotGenerateCode, !*doNotGenerateDocs) } -func GenerateProducts(product, resource, providerName, version, outputPath, overrideDirectory string, generateCode, generateDocs bool) { +func GenerateProducts(product, resource, providerName, version, outputPath, baseDirectory, overrideDirectory string, generateCode, generateDocs bool) { if version == "" { log.Printf("No version specified, assuming ga") version = "ga" } + if baseDirectory == "" { + var err error + if baseDirectory, err = os.Getwd(); err != nil { + panic(err) + } + } startTime := time.Now() if providerName == "" { providerName = "default (terraform)" } - log.Printf("Generating MM output to '%s'", outputPath) - log.Printf("Building %s version", version) - log.Printf("Building %s provider", providerName) + log.Printf("Generating MM output to %q", outputPath) + log.Printf("Building %q version", version) + log.Printf("Building %q provider", providerName) + + ofs, err := google.NewOverlayFS(overrideDirectory, baseDirectory) + if err != nil { + panic(err) + } - loader := loader.NewLoader(loader.Config{Version: version, OverrideDirectory: overrideDirectory}) + loader := loader.NewLoader(loader.Config{Version: version, BaseDirectory: baseDirectory, OverrideDirectory: overrideDirectory, Sysfs: ofs}) loadedProducts := loader.LoadProducts() var productsToGenerate []string @@ -88,7 +104,7 @@ func GenerateProducts(product, resource, providerName, version, outputPath, over for _, productApi := range loadedProducts { wg.Add(1) - go GenerateProduct(version, providerName, productApi, outputPath, startTime, productsToGenerate, resource, generateCode, generateDocs) + go GenerateProduct(version, providerName, productApi, outputPath, startTime, ofs, productsToGenerate, resource, generateCode, generateDocs) } wg.Wait() @@ -102,7 +118,7 @@ func GenerateProducts(product, resource, providerName, version, outputPath, over // In order to only copy/compile files once per provider this must be called outside // of the products loop. Create an MMv1 provider with an arbitrary product (the first loaded). - providerToGenerate := newProvider(providerName, version, productsForVersion[0], startTime) + providerToGenerate := newProvider(providerName, version, productsForVersion[0], startTime, ofs) providerToGenerate.CopyCommonFiles(outputPath, generateCode, generateDocs) if generateCode { @@ -113,7 +129,7 @@ func GenerateProducts(product, resource, providerName, version, outputPath, over // GenerateProduct generates code and documentation for a product // This now uses the CompileProduct method to separate compilation from generation func GenerateProduct(version, providerName string, productApi *api.Product, outputPath string, - startTime time.Time, productsToGenerate []string, resourceToGenerate string, + startTime time.Time, fsys fs.FS, productsToGenerate []string, resourceToGenerate string, generateCode, generateDocs bool) { defer wg.Done() @@ -123,21 +139,21 @@ func GenerateProduct(version, providerName string, productApi *api.Product, outp } log.Printf("%s: Generating files", productApi.PackagePath) - providerToGenerate := newProvider(providerName, version, productApi, startTime) + providerToGenerate := newProvider(providerName, version, productApi, startTime, fsys) providerToGenerate.Generate(outputPath, resourceToGenerate, generateCode, generateDocs) } -func newProvider(providerName, version string, productApi *api.Product, startTime time.Time) provider.Provider { +func newProvider(providerName, version string, productApi *api.Product, startTime time.Time, fsys fs.FS) provider.Provider { switch providerName { case "tgc": - return provider.NewTerraformGoogleConversion(productApi, version, startTime) + return provider.NewTerraformGoogleConversion(productApi, version, startTime, fsys) case "tgc_cai2hcl": - return provider.NewCaiToTerraformConversion(productApi, version, startTime) + return provider.NewCaiToTerraformConversion(productApi, version, startTime, fsys) case "tgc_next": - return provider.NewTerraformGoogleConversionNext(productApi, version, startTime) + return provider.NewTerraformGoogleConversionNext(productApi, version, startTime, fsys) case "oics": - return provider.NewTerraformOiCS(productApi, version, startTime) + return provider.NewTerraformOiCS(productApi, version, startTime, fsys) default: - return provider.NewTerraform(productApi, version, startTime) + return provider.NewTerraform(productApi, version, startTime, fsys) } } diff --git a/mmv1/openapi_generate/parser.go b/mmv1/openapi_generate/parser.go index ee152fc747a6..e8161cd527f6 100644 --- a/mmv1/openapi_generate/parser.go +++ b/mmv1/openapi_generate/parser.go @@ -26,6 +26,7 @@ import ( "path/filepath" "regexp" "slices" + "strconv" "strings" "log" @@ -92,12 +93,15 @@ func (parser Parser) WriteYaml(filePath string) { doc, _ := loader.LoadFromFile(filePath) _ = doc.Validate(ctx) - resourcePaths := findResources(doc) + resources := findResources(doc) productPath := buildProduct(filePath, parser.Output, doc, header) log.Printf("Generated product %+v/product.yaml", productPath) - for _, pathArray := range resourcePaths { - resource := buildResource(filePath, pathArray[0], pathArray[1], doc) + for name, resource := range resources { + if resource.create == nil { + continue + } + resource := buildResource(filePath, name, resource, doc) // marshal method var yamlContent bytes.Buffer @@ -130,24 +134,69 @@ func (parser Parser) WriteYaml(filePath string) { } } -func findResources(doc *openapi3.T) [][]string { - var resourcePaths [][]string +type resourceOp struct { + path string + async bool +} - pathMap := doc.Paths.Map() - for key, pathValue := range pathMap { - if pathValue.Post == nil { - continue +type resource struct { + // nil if not defined + create, update, delete *resourceOp +} + +func anyToBool(a any) bool { + switch v := a.(type) { + case bool: + return v + case string: + if b, err := strconv.ParseBool(v); err == nil { + return b + } + panic(fmt.Sprintf("cannot parse expected boolean value, found string: %q", v)) + default: + panic(fmt.Sprintf("unexpected type: %T", v)) + } +} + +func buildOperation(resourcePath string, op *openapi3.Operation, prefix string) (string, *resourceOp) { + if op == nil { + return "", nil + } + if strings.HasPrefix(op.OperationID, prefix) { + resourceName := strings.Replace(op.OperationID, prefix, "", 1) + async := false + if a, ok := op.Extensions["x-google-lro"]; ok { + async = anyToBool(a) + } + return resourceName, &resourceOp{path: resourcePath, async: async} + } + return "", nil +} + +func findResources(doc *openapi3.T) map[string]*resource { + resources := make(map[string]*resource) + getDefault := func(n string) *resource { + r, ok := resources[n] + if !ok { + r = &resource{} + resources[n] = r } + return r + } - // Not very clever way of identifying create resource methods - if strings.HasPrefix(pathValue.Post.OperationID, "Create") { - resourcePath := key - resourceName := strings.Replace(pathValue.Post.OperationID, "Create", "", 1) - resourcePaths = append(resourcePaths, []string{resourcePath, resourceName}) + for key, pathValue := range doc.Paths.Map() { + if name, op := buildOperation(key, pathValue.Post, "Create"); op != nil { + getDefault(name).create = op + } + if name, op := buildOperation(key, pathValue.Delete, "Delete"); op != nil { + getDefault(name).delete = op + } + if name, op := buildOperation(key, pathValue.Patch, "Update"); op != nil { + getDefault(name).update = op } } - return resourcePaths + return resources } func buildProduct(filePath, output string, root *openapi3.T, header []byte) string { @@ -229,8 +278,9 @@ func stripVersion(path string) string { return re.ReplaceAllString(path, "") } -func buildResource(filePath, resourcePath, resourceName string, root *openapi3.T) api.Resource { +func buildResource(filePath, resourceName string, in *resource, root *openapi3.T) api.Resource { resource := api.Resource{} + resourcePath := in.create.path parsedObjects := parseOpenApi(resourcePath, resourceName, root) @@ -255,14 +305,25 @@ func buildResource(filePath, resourcePath, resourceName string, root *openapi3.T async := api.NewAsync() async.Operation.BaseUrl = "{{op_id}}" async.Result.ResourceInsideResponse = true + // Clear the default, we will attach the right values below + async.Actions = nil resource.Async = async + if in.create.async { + resource.Async.Actions = append(resource.Async.Actions, "create") + } - if hasUpdate(resourceName, root) { + if in.update != nil { resource.UpdateVerb = "PATCH" resource.UpdateMask = true + if in.update.async { + resource.Async.Actions = append(resource.Async.Actions, "update") + } } else { resource.Immutable = true } + if in.delete != nil && in.delete.async { + resource.Async.Actions = append(resource.Async.Actions, "delete") + } example := r.Examples{} example.Name = "name_of_example_file" @@ -279,20 +340,6 @@ func buildResource(filePath, resourcePath, resourceName string, root *openapi3.T return resource } -func hasUpdate(resourceName string, root *openapi3.T) bool { - // Create and Update have different paths in the OpenAPI spec, so look - // through all paths to find one that matches the expected operation name - for _, pathValue := range root.Paths.Map() { - if pathValue.Patch == nil { - continue - } - if pathValue.Patch.OperationID == fmt.Sprintf("Update%s", resourceName) { - return true - } - } - return false -} - func parseOpenApi(resourcePath, resourceName string, root *openapi3.T) []any { returnArray := []any{} path := root.Paths.Find(resourcePath) diff --git a/mmv1/openapi_generate/parser_test.go b/mmv1/openapi_generate/parser_test.go index 914877790ee8..dc4ea98e61b2 100644 --- a/mmv1/openapi_generate/parser_test.go +++ b/mmv1/openapi_generate/parser_test.go @@ -14,10 +14,16 @@ func TestMapType(t *testing.T) { _ = NewOpenapiParser("/fake", "/fake") ctx := t.Context() loader := &openapi3.Loader{Context: ctx, IsExternalRefsAllowed: true} - doc, _ := loader.LoadFromData(testData) - _ = doc.Validate(ctx) + doc, err := loader.LoadFromData(testData) + if err != nil { + t.Fatalf("Could not load data %s", err) + } + err = doc.Validate(ctx) + if err != nil { + t.Fatalf("Could not validate data %s", err) + } - petSchema := doc.Paths.Map()["/pets"].Post.Parameters[0].Value.Schema + petSchema := doc.Paths.Map()["/pets"].Post.RequestBody.Value.Content["application/json"].Schema mmObject := WriteObject("pet", petSchema, propType(petSchema), false) if mmObject.KeyName == "" || mmObject.Type != "Map" { t.Error("Failed to parse map type") @@ -26,3 +32,26 @@ func TestMapType(t *testing.T) { t.Errorf("Expected 4 properties, found %d", len(mmObject.ValueType.Properties)) } } + +func TestFindResources(t *testing.T) { + ctx := t.Context() + loader := &openapi3.Loader{Context: ctx, IsExternalRefsAllowed: true} + doc, err := loader.LoadFromData(testData) + if err != nil { + t.Fatalf("Could not load data %s", err) + } + err = doc.Validate(ctx) + if err != nil { + t.Fatalf("Could not validate data %s", err) + } + res := findResources(doc) + if len(res) != 2 { + t.Fatalf("Expected 2 resources, found: %d", len(res)) + } + if !res["Food"].create.async { + t.Error("Food resource is supposed to be detected as async and is not") + } + if res["Pet"].create.async { + t.Error("Pet resource is not supposed to be detected as async") + } +} diff --git a/mmv1/openapi_generate/test_data/test_api.yaml b/mmv1/openapi_generate/test_data/test_api.yaml index e0c3b96ef121..2442c7db0dfe 100644 --- a/mmv1/openapi_generate/test_data/test_api.yaml +++ b/mmv1/openapi_generate/test_data/test_api.yaml @@ -41,18 +41,18 @@ paths: $ref: "#/components/schemas/Error" post: summary: Create a pet - operationId: createPets + operationId: CreatePet tags: - pets parameters: - - name: pet - in: body - required: true - description: The pet to create - schema: - type: object - additionalProperties: - $ref: "#/components/schemas/Pet" + requestBody: + description: "Required. The pets being created" + content: + application/json: + schema: + type: object + additionalProperties: + $ref: "#/components/schemas/Pet" responses: 201: description: Null response @@ -88,9 +88,87 @@ paths: application/json: schema: $ref: "#/components/schemas/Error" + /foods: + get: + summary: List all foods + operationId: listFoods + tags: + - foods + parameters: + - name: limit + in: query + description: How many items to return at one time (max 100) + required: false + schema: + type: integer + format: int32 + responses: + 200: + description: An paged array of foods + headers: + x-next: + description: A link to the next page of responses + schema: + type: string + content: + application/json: + schema: + $ref: "#/components/schemas/Foods" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + post: + summary: Create a food + operationId: CreateFood + tags: + - foods + x-google-lro: true + requestBody: + description: "Required. The food being created" + content: + application/json: + schema: + $ref: "#/components/schemas/Food" + responses: + default: + description: Successful operation + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFoodOperation" + /foods/{foodId}: + get: + summary: Info for a specific food + operationId: showFoodById + tags: + - foods + parameters: + - name: foodId + in: path + required: true + description: The id of the food to retrieve + schema: + type: string + responses: + 200: + description: Expected response to a valid request + content: + application/json: + schema: + $ref: "#/components/schemas/Foods" + default: + description: unexpected error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" components: schemas: Pet: + type: object required: - id - name @@ -112,10 +190,19 @@ components: properties: name: type: string + Foods: + type: array + items: + $ref: "#/components/schemas/Food" Pets: type: array items: $ref: "#/components/schemas/Pet" + CreateFoodOperation: + type: object + properties: + name: + type: string Error: required: - code diff --git a/mmv1/products/alloydb/Cluster.yaml b/mmv1/products/alloydb/Cluster.yaml index 184a01c22d50..589068e7066c 100644 --- a/mmv1/products/alloydb/Cluster.yaml +++ b/mmv1/products/alloydb/Cluster.yaml @@ -59,7 +59,6 @@ custom_code: pre_create: 'templates/terraform/pre_create/alloydb_cluster.go.tmpl' pre_update: 'templates/terraform/pre_update/alloydb_cluster.go.tmpl' pre_delete: 'templates/terraform/pre_delete/alloydb_cluster.go.tmpl' - tgc_decoder: 'templates/tgc_next/decoders/alloydb_cluster.go.tmpl' # Skipping the sweeper because we need to force-delete clusters. exclude_sweeper: true include_in_tgc_next: true diff --git a/mmv1/products/alloydb/Instance.yaml b/mmv1/products/alloydb/Instance.yaml index dbb46d6e94e9..a62f347ad9d1 100644 --- a/mmv1/products/alloydb/Instance.yaml +++ b/mmv1/products/alloydb/Instance.yaml @@ -54,6 +54,9 @@ custom_code: exclude_sweeper: true include_in_tgc_next: true tgc_include_handwritten_tests: true +tgc_tests: + - name: 'TestAccAlloydbInstance_connectionPoolConfig' + skip: 'fix it after the next release, as connection_pool_config will be in the next release' examples: - name: 'alloydb_instance_basic' primary_resource_id: 'default' @@ -263,38 +266,55 @@ properties: description: 'Configuration for enhanced query insights.' min_version: 'beta' default_from_api: true + custom_expand: 'templates/terraform/custom_expand/alloydb_instance_observability_config.go.tmpl' properties: - name: 'enabled' type: Boolean description: 'Observability feature status for an instance.' include_empty_value_in_cai: true # Default value is false in CAI asset + send_empty_value: true + default_from_api: true - name: 'preserveComments' type: Boolean description: 'Preserve comments in the query string.' include_empty_value_in_cai: true # Default value is false in CAI asset + send_empty_value: true + default_from_api: true - name: 'trackWaitEvents' type: Boolean description: 'Record wait events during query execution for an instance.' + send_empty_value: true + default_from_api: true - name: 'trackWaitEventTypes' type: Boolean description: 'Record wait event types during query execution for an instance.' + send_empty_value: true + default_from_api: true - name: 'maxQueryStringLength' type: Integer description: 'Query string length. The default value is 10240. Any integer between 1024 and 100000 is considered valid.' + default_from_api: true - name: 'recordApplicationTags' type: Boolean description: 'Record application tags for an instance. This flag is turned "on" by default.' include_empty_value_in_cai: true # Default value is false in CAI asset + send_empty_value: true + default_from_api: true - name: 'queryPlansPerMinute' type: Integer description: 'Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 200 is considered valid.' + default_from_api: true - name: 'trackActiveQueries' type: Boolean description: 'Track actively running queries. If not set, default value is "off".' include_empty_value_in_cai: true # Default value is false in CAI asset + send_empty_value: true + default_from_api: true - name: 'assistiveExperiencesEnabled' type: Boolean description: 'Whether assistive experiences are enabled for this AlloyDB instance.' + send_empty_value: true + default_from_api: true - name: 'readPoolConfig' type: NestedObject description: 'Read pool specific config. If the instance type is READ_POOL, this configuration must be provided.' @@ -474,3 +494,31 @@ properties: output: true item_type: type: String + - name: 'connectionPoolConfig' + type: NestedObject + description: | + Configuration for Managed Connection Pool. + custom_flatten: 'templates/terraform/custom_flatten/alloydb_instance_connectionpoolconfig_flatten.go.tmpl' + properties: + - name: 'enabled' + type: Boolean + description: | + Whether to enabled Managed Connection Pool. + required: true + - name: 'poolerCount' + type: Integer + output: true + description: | + The number of running poolers per instance. + - name: 'flags' + type: KeyValuePairs + description: | + Flags for configuring managed connection pooling when it is enabled. + These flags will only be set if `connection_pool_config.enabled` is + true. + Please see + https://cloud.google.com/alloydb/docs/configure-managed-connection-pooling#configuration-options + for a comprehensive list of flags that can be set. To specify the flags + in Terraform, please remove the "connection-pooling-" prefix and use + underscores instead of dashes in the name. For example, + "connection-pooling-pool-mode" would be "pool_mode". diff --git a/mmv1/products/apigee/SecurityFeedback.yaml b/mmv1/products/apigee/SecurityFeedback.yaml new file mode 100644 index 000000000000..29abb5e7ce07 --- /dev/null +++ b/mmv1/products/apigee/SecurityFeedback.yaml @@ -0,0 +1,118 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'SecurityFeedback' +description: | + Represents a feedback report from an Advanced API Security customer. + Manages customer feedback about ML models. +references: + guides: + 'Create a SecurityFeedback': 'https://docs.cloud.google.com/apigee/docs/api-security/abuse-detection#exclude-traffic-from-abuse-detection' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.securityFeedback/create' +base_url: '{{org_id}}/securityFeedback' +self_link: '{{org_id}}/securityFeedback/{{feedback_id}}' +create_url: '{{org_id}}/securityFeedback?security_feedback_id={{feedback_id}}' +update_verb: 'PATCH' +import_format: + - '{{org_id}}/securityFeedback/{{feedback_id}}' + - '{{org_id}}/{{feedback_id}}' +custom_code: + custom_import: "templates/terraform/custom_import/apigee_security_feedback.go.tmpl" +examples: + - name: 'apigee_security_feedback_basic' + vars: + security_feedback_id: 'my-feedback' + exclude_test: true + - name: 'apigee_security_feedback_basic_test' + primary_resource_id: 'security_feedback' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + exclude_docs: true + external_providers: ["time"] +parameters: + - name: 'orgId' + type: String + description: | + The Apigee Organization associated with the Apigee Security Feedback, + in the format `organizations/{{org_name}}`. + url_param_only: true + required: true + immutable: true + - name: 'feedbackId' + type: String + description: | + Resource ID of the security feedback. + required: true + immutable: true + url_param_only: true +properties: + - name: 'name' + type: String + description: | + Name of the security feedback resource, + in the format `organizations/{{org_name}}/securityFeedback/{{feedback_id}}`. + output: true + - name: 'displayName' + type: String + description: The display name of the feedback. + - name: 'feedbackContexts' + type: Array + description: | + One or more attribute/value pairs for constraining the feedback. + required: true + item_type: + type: NestedObject + properties: + - name: attribute + type: Enum + description: | + The attribute the user is providing feedback about. + required: true + enum_values: + - 'ATTRIBUTE_ENVIRONMENTS' + - 'ATTRIBUTE_IP_ADDRESS_RANGES' + - name: values + type: Array + description: | + The values of the attribute the user is providing feedback about, separated by commas. + required: true + item_type: + type: String + - name: 'feedbackType' + type: Enum + description: The type of feedback being submitted. + required: true + enum_values: + - 'EXCLUDED_DETECTION' + - name: 'createTime' + type: String + description: The time when this specific feedback id was created. + output: true + - name: 'updateTime' + type: String + description: The time when this specific feedback id was updated. + output: true + - name: 'reason' + type: Enum + description: The reason for the feedback. + enum_values: + - 'INTERNAL_SYSTEM' + - 'NON_RISK_CLIENT' + - 'NAT' + - 'PENETRATION_TEST' + - 'OTHER' + - name: 'comment' + type: String + description: Optional text the user can provide for additional, unstructured context. diff --git a/mmv1/products/apphub/Boundary.yaml b/mmv1/products/apphub/Boundary.yaml new file mode 100644 index 000000000000..f3edf328b53b --- /dev/null +++ b/mmv1/products/apphub/Boundary.yaml @@ -0,0 +1,83 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'Boundary' +description: 'Application management boundary.' +references: + guides: + 'AppHub': 'https://docs.cloud.google.com/app-hub/docs/' + api: 'https://docs.cloud.google.com/app-hub/docs/reference/rest/v1/Boundary' + +docs: +self_link: 'projects/{{project}}/locations/{{location}}/boundary' +# Singleton resource, point create_url to update_url. +create_url: 'projects/{{project}}/locations/{{location}}/boundary' +create_verb: 'PATCH' +update_url: 'projects/{{project}}/locations/{{location}}/boundary' +update_verb: 'PATCH' +exclude_delete: true +id_format: 'projects/{{project}}/locations/{{location}}/boundary' +import_format: + - "projects/{{project}}/locations/{{location}}/boundary" +autogen_async: true +async: + actions: ['create', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + result: + resource_inside_response: true +parameters: + - name: 'location' + type: String + description: 'The location for the Boundary resource. Must be global.' + url_param_only: true + immutable: true + required: true +properties: + - name: name + type: String + description: |- + Identifier. The resource name of the boundary. + Format: "projects/{project}/locations/{{location}}/boundary" + output: true + - name: crmNode + type: String + description: |- + Optional. The resource name of the CRM node being attached to the + boundary. + Format: `projects/{project-number}` + - name: createTime + type: Time + description: 'Create time.' + output: true + - name: updateTime + type: Time + description: ' Update time.' + output: true + - name: type + type: Enum + description: 'Boundary type.' + output: true + enum_values: + - AUTOMATIC + - MANUAL + - MANAGED_AUTOMATIC + +examples: + - name: 'apphub_boundary_basic' + primary_resource_id: 'example' + test_env_vars: + crm_node_project_number: 'PROJECT_NUMBER' + exclude_test: true diff --git a/mmv1/products/backupdr/BackupPlan.yaml b/mmv1/products/backupdr/BackupPlan.yaml index 2d60851a25b6..1f8cf97ff51b 100644 --- a/mmv1/products/backupdr/BackupPlan.yaml +++ b/mmv1/products/backupdr/BackupPlan.yaml @@ -118,6 +118,10 @@ properties: description: | When the `BackupPlan` was last updated. output: true + - name: 'maxCustomOnDemandRetentionDays' + type: Integer + description: | + The maximum number of days for which an on-demand backup taken with custom retention can be retained. - name: 'backupRules' type: Array description: | diff --git a/mmv1/products/backupdr/BackupVault.yaml b/mmv1/products/backupdr/BackupVault.yaml index c5d185f3086c..035bca22b156 100644 --- a/mmv1/products/backupdr/BackupVault.yaml +++ b/mmv1/products/backupdr/BackupVault.yaml @@ -207,6 +207,7 @@ properties: - 'MATCH_BACKUP_EXPIRE_TIME' - name: 'encryptionConfig' type: NestedObject + ignore_read: true description: 'Encryption configuration for the backup vault.' properties: - name: 'kmsKeyName' diff --git a/mmv1/products/beyondcorp/SecurityGateway.yaml b/mmv1/products/beyondcorp/SecurityGateway.yaml index 689848f53c66..7b386c248b2d 100644 --- a/mmv1/products/beyondcorp/SecurityGateway.yaml +++ b/mmv1/products/beyondcorp/SecurityGateway.yaml @@ -221,6 +221,8 @@ properties: description: Client IP configuration. The client IP address is included if true. - name: serviceDiscovery type: NestedObject + send_empty_value: true + allow_empty_object: true description: Settings related to the Service Discovery. properties: - name: apiGateway diff --git a/mmv1/products/beyondcorp/SecurityGatewayApplication.yaml b/mmv1/products/beyondcorp/SecurityGatewayApplication.yaml index 8d96cd87037f..ed90f0724208 100644 --- a/mmv1/products/beyondcorp/SecurityGatewayApplication.yaml +++ b/mmv1/products/beyondcorp/SecurityGatewayApplication.yaml @@ -187,6 +187,8 @@ properties: properties: - name: userInfo type: NestedObject + send_empty_value: true + allow_empty_object: true description: User info configuration. properties: - name: outputType @@ -198,6 +200,8 @@ properties: - 'NONE' - name: groupInfo type: NestedObject + send_empty_value: true + allow_empty_object: true description: Group info configuration. properties: - name: outputType @@ -209,6 +213,8 @@ properties: - 'NONE' - name: deviceInfo type: NestedObject + send_empty_value: true + allow_empty_object: true description: Device info configuration. properties: - name: outputType diff --git a/mmv1/products/biglakeiceberg/IcebergCatalog.yaml b/mmv1/products/biglakeiceberg/IcebergCatalog.yaml new file mode 100644 index 000000000000..320e01f5fd19 --- /dev/null +++ b/mmv1/products/biglakeiceberg/IcebergCatalog.yaml @@ -0,0 +1,149 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'IcebergCatalog' +description: | + IcebergCatalogs are top-level containers for Apache Iceberg REST Catalog served Namespaces and Tables. +references: + guides: + 'Use the BigLake metastore Iceberg REST catalog': 'https://docs.cloud.google.com/biglake/docs/blms-rest-catalog' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource's IAM, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the IAM API will return 403s. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +supports_indirect_user_project_override: true +base_url: 'iceberg/v1/restcatalog/extensions/projects/{{project}}/catalogs' +self_link: 'iceberg/v1/restcatalog/extensions/projects/{{project}}/catalogs/{{name}}' +immutable: false +create_url: 'iceberg/v1/restcatalog/extensions/projects/{{project}}/catalogs?iceberg-catalog-id={{name}}' +custom_code: + custom_update: + templates/terraform/custom_update/biglake_iceberg_catalog_update.go.tmpl +iam_policy: + base_url: 'v1/projects/{{project}}/catalogs/{{name}}' + parent_resource_attribute: 'name' + method_name_separator: ":" + fetch_iam_policy_verb: 'GET' + import_format: + - 'projects/{{project}}/catalogs/{{name}}' + - '{{name}}' + allowed_iam_role: 'roles/biglake.editor' +examples: + - name: 'biglake_iceberg_catalog' + primary_resource_id: 'my_iceberg_catalog' + vars: + name: 'my_iceberg_catalog' + test_env_vars: + GOOGLE_BILLING_PROJECT: 'PROJECT' + USER_PROJECT_OVERRIDE: 'true' +parameters: + - name: 'name' + type: String + required: true + immutable: true + url_param_only: true + description: | + The name of the IcebergCatalog. Format: + projects/{project_id_or_number}/catalogs/{iceberg_catalog_id} +properties: + - name: 'credential_mode' + api_name: 'credential-mode' + type: Enum + description: The credential mode used for the catalog. + CREDENTIAL_MODE_END_USER - End user credentials, default. The authenticating + user must have access to the catalog resources and the corresponding Google + Cloud Storage files. CREDENTIAL_MODE_VENDED_CREDENTIALS - Use credential + vending. The authenticating user must have access to the catalog resources + and the system will provide the caller with downscoped credentials to access + the Google Cloud Storage files. All table operations in this mode would + require `X-Iceberg-Access-Delegation` header with `vended-credentials` value + included. System will generate a service account and the catalog + administrator must grant the service account appropriate permissions. + required: false + immutable: false + output: false + enum_values: + - 'CREDENTIAL_MODE_END_USER' + - 'CREDENTIAL_MODE_VENDED_CREDENTIALS' + default_from_api: true + - name: 'biglake_service_account' + api_name: 'biglake-service-account' + type: String + description: Output only. The service account used for credential vending. It + might be empty if credential vending was never enabled for the catalog. + output: true + - name: 'catalog_type' + api_name: 'catalog-type' + type: Enum + description: The catalog type of the IcebergCatalog. Currently only supports + the type for Google Cloud Storage Buckets. + required: true + immutable: true + output: false + enum_values: + - 'CATALOG_TYPE_GCS_BUCKET' + - name: 'default_location' + api_name: 'default-location' + type: String + description: Output only. The default storage location for the catalog, e.g., + `gs://my-bucket`. + output: true + - name: 'storage_regions' + api_name: 'storage-regions' + type: Array + item_type: + type: String + description: Output only. The GCP region(s) where the physical metadata for + the tables is stored, e.g. `us-central1`, `nam4` or `us`. This will contain + one value for all locations, except for the catalogs that are configured to + use custom dual region buckets. + output: true + - name: 'create_time' + api_name: 'create-time' + type: String + description: Output only. The creation time of the IcebergCatalog. + output: true + - name: 'update_time' + api_name: 'update-time' + type: String + description: Output only. The last modification time of the IcebergCatalog. + output: true + - name: 'replicas' + type: Array + item_type: + type: NestedObject + properties: + - name: 'region' + type: String + description: The region of the replica, e.g., `us-east1`. + output: true + - name: 'state' + type: Enum + description: If the IcebergCatalog is replicated to multiple regions, this + describes the current state of the replica. STATE_UNKNOWN - The replica + state is unknown. STATE_PRIMARY - The replica is the writable primary. + STATE_PRIMARY_IN_PROGRESS - The replica has been recently assigned as + the primary, but not all namespaces are writeable yet. STATE_SECONDARY - + The replica is a read-only secondary replica. + output: true + enum_values: + - 'STATE_UNKNOWN' + - 'STATE_PRIMARY' + - 'STATE_PRIMARY_IN_PROGRESS' + - 'STATE_SECONDARY' + description: Output only. The replicas for the catalog metadata. + output: true diff --git a/mmv1/products/biglakeiceberg/product.yaml b/mmv1/products/biglakeiceberg/product.yaml new file mode 100644 index 000000000000..5fc292a5b4c7 --- /dev/null +++ b/mmv1/products/biglakeiceberg/product.yaml @@ -0,0 +1,23 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'BiglakeIceberg' +legacy_name: 'biglake' +display_name: 'Biglake' +versions: + - name: 'ga' + base_url: 'https://biglake.googleapis.com/' +scopes: + - 'https://www.googleapis.com/auth/bigquery' + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/bigtable/SchemaBundle.yaml b/mmv1/products/bigtable/SchemaBundle.yaml index 129906179825..61ea8d5c9716 100644 --- a/mmv1/products/bigtable/SchemaBundle.yaml +++ b/mmv1/products/bigtable/SchemaBundle.yaml @@ -80,6 +80,14 @@ properties: 'The unique name of the requested schema bundle. Values are of the form `projects//instances//tables//schemaBundles/`.' output: true + - name: 'etag' + type: String + description: | + etag is used for optimistic concurrency control as a way to help prevent simultaneous + updates of a schema bundle from overwriting each other. This may be sent on update and delete + requests to ensure the client has an update-to-date value before proceeding. The server returns + an ABORTED error on a mismatched etag. + output: true - name: 'protoSchema' type: NestedObject description: | diff --git a/mmv1/products/cloudrunv2/Service.yaml b/mmv1/products/cloudrunv2/Service.yaml index fd0550f32bec..1580dd65e2f1 100644 --- a/mmv1/products/cloudrunv2/Service.yaml +++ b/mmv1/products/cloudrunv2/Service.yaml @@ -167,6 +167,15 @@ examples: cloud_run_service_name: 'cloudrun-iap-service' ignore_read_extra: - 'deletion_protection' + - name: 'cloudrunv2_service_zip_deploy' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' + min_version: 'beta' + vars: + cloud_run_service_name: 'cloudrun-zip-service' + targz_path: './test-fixtures/function-source.tar.gz' + ignore_read_extra: + - 'deletion_protection' virtual_fields: - name: 'deletion_protection' description: | @@ -323,6 +332,7 @@ properties: description: | Scaling settings that apply to the whole service default_from_api: true + diff_suppress_func: 'tpgresource.EmptyOrUnsetBlockDiffSuppress' properties: - name: 'minInstanceCount' type: Integer @@ -812,6 +822,33 @@ properties: description: |- Source code location of the image. output: true + - name: 'sourceCode' + type: NestedObject + description: |- + Location of the source. + min_version: 'beta' + properties: + - name: 'cloudStorageSource' + type: NestedObject + description: |- + Cloud Storage source. + exactly_one_of: + - 'cloud_storage_source' + properties: + - name: 'bucket' + type: String + description: |- + The Cloud Storage bucket name. + required: true + - name: 'object' + type: String + description: |- + The Cloud Storage object name. + required: true + - name: 'generation' + type: String + description: |- + The Cloud Storage object generation. The is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. - name: 'volumes' type: Array description: |- diff --git a/mmv1/products/cloudrunv2/WorkerPool.yaml b/mmv1/products/cloudrunv2/WorkerPool.yaml index 055772d16deb..f7961cf31976 100644 --- a/mmv1/products/cloudrunv2/WorkerPool.yaml +++ b/mmv1/products/cloudrunv2/WorkerPool.yaml @@ -131,13 +131,6 @@ examples: - 'deletion_protection' # Currently failing skip_vcr: true - - name: 'cloudrunv2_worker_pool_custom_audiences' - primary_resource_id: 'default' - primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-worker-pool-%s", context["random_suffix"])' - vars: - cloud_run_worker_pool_name: 'cloudrun-worker-pool' - ignore_read_extra: - - 'deletion_protection' - name: 'cloudrunv2_worker_pool_startup_liveness_probe' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-wp%s", context["random_suffix"])' @@ -288,6 +281,7 @@ properties: description: | One or more custom audiences that you want this worker pool to support. Specify each custom audience as the full URL in a string. The custom audiences are encoded in the token and used to authenticate requests. For more information, see https://cloud.google.com/run/docs/configuring/custom-audiences. + deprecation_message: '`custom_audiences` is deprecated since it is not applicable to WorkerPool resource and will be removed in a future major release.' item_type: type: String - name: 'scaling' diff --git a/mmv1/products/cloudtasks/Queue.yaml b/mmv1/products/cloudtasks/Queue.yaml index 273f461dceb4..c9838e8e6602 100644 --- a/mmv1/products/cloudtasks/Queue.yaml +++ b/mmv1/products/cloudtasks/Queue.yaml @@ -16,6 +16,8 @@ name: 'Queue' description: | A named resource to which messages are sent by publishers. docs: +include_in_tgc_next: true +tgc_include_handwritten_tests: true id_format: 'projects/{{project}}/locations/{{location}}/queues/{{name}}' base_url: 'projects/{{project}}/locations/{{location}}/queues' update_verb: 'PATCH' @@ -90,6 +92,7 @@ properties: Overrides for task-level appEngineRouting. These settings apply only to App Engine tasks in this queue custom_flatten: 'templates/terraform/custom_flatten/cloudtasks_queue_appenginerouting.go.tmpl' + tgc_ignore_terraform_custom_flatten: true properties: - name: 'service' type: String @@ -183,6 +186,7 @@ properties: If zero, then the task age is unlimited. default_from_api: true diff_suppress_func: 'suppressOmittedMaxDuration' + custom_tgc_flatten: 'templates/tgc_next/custom_flatten/cloudtasks_queue_max_retry_duration.go.tmpl' - name: 'minBackoff' type: String description: | @@ -358,6 +362,7 @@ properties: Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. conflicts: - oidcToken + is_missing_in_cai: true properties: - name: 'serviceAccountEmail' type: String diff --git a/mmv1/products/colab/RuntimeTemplate.yaml b/mmv1/products/colab/RuntimeTemplate.yaml index e4008c309467..2fb3150b9c75 100644 --- a/mmv1/products/colab/RuntimeTemplate.yaml +++ b/mmv1/products/colab/RuntimeTemplate.yaml @@ -209,3 +209,20 @@ properties: If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not.' + - name: 'postStartupScriptConfig' + type: NestedObject + description: 'Post startup script config.' + properties: + - name: 'postStartupScript' + type: String + description: 'Post startup script to run after runtime is started.' + - name: 'postStartupScriptUrl' + type: String + description: 'Post startup script url to download. Example: https://bucket/script.sh.' + - name: 'postStartupScriptBehavior' + type: Enum + description: 'Post startup script behavior that defines download and execution behavior.' + enum_values: + - 'RUN_ONCE' + - 'RUN_EVERY_START' + - 'DOWNLOAD_AND_RUN_EVERY_START' diff --git a/mmv1/products/compute/BackendBucket.yaml b/mmv1/products/compute/BackendBucket.yaml index aa6136032376..5586df9c740b 100644 --- a/mmv1/products/compute/BackendBucket.yaml +++ b/mmv1/products/compute/BackendBucket.yaml @@ -255,6 +255,7 @@ properties: description: | The security policy associated with this backend bucket. diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + is_missing_in_cai: true - name: 'customResponseHeaders' type: Array description: | diff --git a/mmv1/products/compute/BackendService.yaml b/mmv1/products/compute/BackendService.yaml index a8308c62ccd3..9e1e09b64fad 100644 --- a/mmv1/products/compute/BackendService.yaml +++ b/mmv1/products/compute/BackendService.yaml @@ -44,6 +44,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true iam_policy: allowed_iam_role: 'roles/compute.admin' parent_resource_attribute: 'name' @@ -362,6 +363,7 @@ properties: field can only be used for a global or regional backend service with the loadBalancingScheme set to EXTERNAL_MANAGED, INTERNAL_MANAGED INTERNAL_SELF_MANAGED. + is_missing_in_cai: true - name: 'dryRun' type: Boolean required: true @@ -568,6 +570,7 @@ properties: type: NestedObject description: 'Cloud CDN configuration for this BackendService.' default_from_api: true + is_missing_in_cai: true properties: - name: 'requestCoalescing' type: Boolean diff --git a/mmv1/products/compute/CrossSiteNetwork.yaml b/mmv1/products/compute/CrossSiteNetwork.yaml index 3251f609287b..2d1286136f07 100644 --- a/mmv1/products/compute/CrossSiteNetwork.yaml +++ b/mmv1/products/compute/CrossSiteNetwork.yaml @@ -15,11 +15,10 @@ name: 'CrossSiteNetwork' description: | Represents a cross-site-network resource. A CrossSiteNetwork is used to establish L2 connectivity between groups of Interconnects. -min_version: beta references: guides: 'Create a Cross-Site Interconnect': 'https://cloud.google.com/network-connectivity/docs/interconnect/how-to/cross-site/create-network' - api: 'https://cloud.google.com/compute/docs/reference/rest/beta/crossSiteNetworks' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/crossSiteNetworks' docs: base_url: 'projects/{{project}}/global/crossSiteNetworks' self_link: 'projects/{{project}}/global/crossSiteNetworks/{{name}}' @@ -41,7 +40,6 @@ examples: vars: name: 'test-cross-site-network' description: 'Example cross site network' - min_version: 'beta' test_env_vars: project: 'PROJECT_NAME' parameters: diff --git a/mmv1/products/compute/Disk.yaml b/mmv1/products/compute/Disk.yaml index cddc35f86ecc..5d047966ca4d 100644 --- a/mmv1/products/compute/Disk.yaml +++ b/mmv1/products/compute/Disk.yaml @@ -51,6 +51,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true iam_policy: parent_resource_attribute: 'name' base_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}' @@ -198,6 +199,7 @@ properties: Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. sensitive: true + is_missing_in_cai: true - name: 'rsaEncryptedKey' type: String description: | @@ -205,6 +207,7 @@ properties: customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. sensitive: true + is_missing_in_cai: true - name: 'sha256' type: String description: | diff --git a/mmv1/products/compute/ExternalVpnGateway.yaml b/mmv1/products/compute/ExternalVpnGateway.yaml index 65ee79b0e172..06c6b5a9c6b1 100644 --- a/mmv1/products/compute/ExternalVpnGateway.yaml +++ b/mmv1/products/compute/ExternalVpnGateway.yaml @@ -36,6 +36,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: examples: - name: 'external_vpn_gateway' diff --git a/mmv1/products/compute/Firewall.yaml b/mmv1/products/compute/Firewall.yaml index 37777affedd6..b0735d24d610 100644 --- a/mmv1/products/compute/Firewall.yaml +++ b/mmv1/products/compute/Firewall.yaml @@ -51,6 +51,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: extra_schema_entry: 'templates/terraform/extra_schema_entry/firewall.tmpl' constants: 'templates/terraform/constants/firewall.tmpl' diff --git a/mmv1/products/compute/FirewallPolicy.yaml b/mmv1/products/compute/FirewallPolicy.yaml index 5ac1ea4d6c1c..c2532d587976 100644 --- a/mmv1/products/compute/FirewallPolicy.yaml +++ b/mmv1/products/compute/FirewallPolicy.yaml @@ -42,6 +42,7 @@ custom_code: custom_diff: - 'tpgresource.DefaultProviderProject' include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'firewall_policy' primary_resource_id: 'default' diff --git a/mmv1/products/compute/FutureReservation.yaml b/mmv1/products/compute/FutureReservation.yaml index 6ba11b95448d..947d4cf4ce0e 100644 --- a/mmv1/products/compute/FutureReservation.yaml +++ b/mmv1/products/compute/FutureReservation.yaml @@ -75,6 +75,9 @@ examples: project: 'PROJECT_NAME' org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' + # Skip in VCR until the test issue is resolved + # https://github.com/hashicorp/terraform-provider-google/issues/25087 + skip_vcr: true - name: 'shared_future_reservation' primary_resource_id: 'gce_future_reservation' vars: @@ -89,6 +92,9 @@ examples: org_id: 'ORG_TARGET' billing_account: 'BILLING_ACCT' exclude_docs: true + # Skip in VCR until the test issue is resolved + # https://github.com/hashicorp/terraform-provider-google/issues/25063 + skip_vcr: true parameters: - name: 'name' type: String diff --git a/mmv1/products/compute/GlobalAddress.yaml b/mmv1/products/compute/GlobalAddress.yaml index f2e2ce92cd24..57bf0dc153d8 100644 --- a/mmv1/products/compute/GlobalAddress.yaml +++ b/mmv1/products/compute/GlobalAddress.yaml @@ -40,6 +40,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: pre_create: 'templates/terraform/pre_create/compute_global_address.go.tmpl' post_create: 'templates/terraform/post_create/labels.tmpl' @@ -103,6 +104,7 @@ properties: description: | The IP Version that will be used by this address. The default value is `IPV4`. diff_suppress_func: 'tpgresource.EmptyOrDefaultStringSuppress("IPV4")' + is_missing_in_cai: true enum_values: - 'IPV4' - 'IPV6' diff --git a/mmv1/products/compute/HaVpnGateway.yaml b/mmv1/products/compute/HaVpnGateway.yaml index a3b9e5f65f75..049fdbb6b888 100644 --- a/mmv1/products/compute/HaVpnGateway.yaml +++ b/mmv1/products/compute/HaVpnGateway.yaml @@ -41,6 +41,8 @@ async: resource_inside_response: false collection_url_key: 'items' custom_code: +include_in_tgc_next: true +tgc_include_handwritten_tests: true schema_version: 1 state_upgraders: true examples: diff --git a/mmv1/products/compute/HealthCheck.yaml b/mmv1/products/compute/HealthCheck.yaml index 2a6682a2c433..539766fc927d 100644 --- a/mmv1/products/compute/HealthCheck.yaml +++ b/mmv1/products/compute/HealthCheck.yaml @@ -52,6 +52,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: constants: 'templates/terraform/constants/health_check.tmpl' encoder: 'templates/terraform/encoders/health_check_type.tmpl' @@ -60,6 +61,11 @@ custom_diff: sweeper: dependencies: - "google_compute_subnetwork" +tgc_tests: + - name: 'TestAccComputeHealthCheck_grpcWithTls_create' + skip: 'grpcTlsHealthCheck is not in CAI asset, but is required in this test.' + - name: 'TestAccComputeHealthCheck_grpcWithTls_update' + skip: 'grpcTlsHealthCheck is not in CAI asset, but is required in this test.' examples: - name: 'health_check_tcp' primary_resource_id: 'tcp-health-check' diff --git a/mmv1/products/compute/Image.yaml b/mmv1/products/compute/Image.yaml index f3e367237922..7b340e318a35 100644 --- a/mmv1/products/compute/Image.yaml +++ b/mmv1/products/compute/Image.yaml @@ -58,7 +58,6 @@ iam_policy: include_in_tgc_next: true tgc_include_handwritten_tests: true custom_code: - tgc_decoder: 'templates/tgc_next/decoders/compute_image.go.tmpl' examples: - name: 'image_basic' primary_resource_id: 'example' diff --git a/mmv1/products/compute/Interconnect.yaml b/mmv1/products/compute/Interconnect.yaml index 9ad6f58f0907..ec5a8539d6b2 100644 --- a/mmv1/products/compute/Interconnect.yaml +++ b/mmv1/products/compute/Interconnect.yaml @@ -433,7 +433,6 @@ properties: - 'MACSEC' - name: 'wireGroups' type: Array - min_version: beta description: | A list of the URLs of all CrossSiteNetwork WireGroups configured to use this Interconnect. The Interconnect cannot be deleted if this list is non-empty. output: true diff --git a/mmv1/products/compute/InterconnectAttachment.yaml b/mmv1/products/compute/InterconnectAttachment.yaml index 6de7abf63d20..60919ed68f5d 100644 --- a/mmv1/products/compute/InterconnectAttachment.yaml +++ b/mmv1/products/compute/InterconnectAttachment.yaml @@ -69,7 +69,6 @@ examples: interconnect_attachment_name: 'test-custom-ranges-interconnect-attachment' router_name: 'test-router' network_name: 'test-network' - min_version: beta parameters: - name: 'region' type: ResourceRef @@ -367,28 +366,24 @@ properties: Single IPv4 address + prefix length to be configured on the cloud router interface for this interconnect attachment. Example: 203.0.113.1/29 immutable: true - min_version: beta - name: 'candidateCustomerRouterIpAddress' type: String description: | Single IPv4 address + prefix length to be configured on the customer router interface for this interconnect attachment. Example: 203.0.113.2/29 immutable: true - min_version: beta - name: 'candidateCloudRouterIpv6Address' type: String description: | Single IPv6 address + prefix length to be configured on the cloud router interface for this interconnect attachment. Example: 2001:db8::1/125 immutable: true - min_version: beta - name: 'candidateCustomerRouterIpv6Address' type: String description: | Single IPv6 address + prefix length to be configured on the customer router interface for this interconnect attachment. Example: 2001:db8::2/125 immutable: true - min_version: beta - name: 'attachmentGroup' type: String description: | diff --git a/mmv1/products/compute/NetworkEndpoints.yaml b/mmv1/products/compute/NetworkEndpoints.yaml index 872e9352911b..4bc74355c5b1 100644 --- a/mmv1/products/compute/NetworkEndpoints.yaml +++ b/mmv1/products/compute/NetworkEndpoints.yaml @@ -113,11 +113,12 @@ properties: properties: - name: 'instance' type: ResourceRef + default_from_api: true description: | The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone as the network endpoint group. - custom_expand: 'templates/terraform/custom_expand/resource_from_self_link.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/resource_from_self_link_nullable.go.tmpl' resource: 'Instance' imports: 'name' - name: 'port' diff --git a/mmv1/products/compute/NodeTemplate.yaml b/mmv1/products/compute/NodeTemplate.yaml index c5eb4ebb2461..819d804a7e6f 100644 --- a/mmv1/products/compute/NodeTemplate.yaml +++ b/mmv1/products/compute/NodeTemplate.yaml @@ -39,6 +39,8 @@ async: resource_inside_response: false collection_url_key: 'items' custom_code: +include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'node_template_basic' primary_resource_id: 'template' diff --git a/mmv1/products/compute/OrganizationSecurityPolicyAssociation.yaml b/mmv1/products/compute/OrganizationSecurityPolicyAssociation.yaml index 3f316dacc0d5..a9a2c1fa797b 100644 --- a/mmv1/products/compute/OrganizationSecurityPolicyAssociation.yaml +++ b/mmv1/products/compute/OrganizationSecurityPolicyAssociation.yaml @@ -17,11 +17,10 @@ api_resource_type_kind: SecurityPolicy api_resource_field: 'associations' description: | An association for the OrganizationSecurityPolicy. -min_version: 'beta' references: guides: 'Associating a policy with the organization or folder': 'https://cloud.google.com/vpc/docs/using-firewall-policies#associate' - api: 'https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addAssociation' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/organizationSecurityPolicies/addAssociation' docs: id_format: '{{policy_id}}/association/{{name}}' base_url: '{{policy_id}}' @@ -45,7 +44,8 @@ read_error_transform: 'transformSecurityPolicyAssociationReadError' examples: - name: 'organization_security_policy_association_basic' primary_resource_id: 'policy' - min_version: 'beta' + vars: + short_name: "my-short-name" test_env_vars: org_id: 'ORG_ID' parameters: @@ -54,7 +54,6 @@ parameters: description: | The security policy ID of the association. api_name: securityPolicyId - min_version: 'beta' url_param_only: true required: true properties: @@ -62,17 +61,14 @@ properties: type: String description: | The name for an association. - min_version: 'beta' required: true - name: 'attachmentId' type: String description: | The resource that the security policy is attached to. - min_version: 'beta' required: true - name: 'displayName' type: String description: | The display name of the security policy of the association. - min_version: 'beta' output: true diff --git a/mmv1/products/compute/RegionAutoscaler.yaml b/mmv1/products/compute/RegionAutoscaler.yaml index 025dc15fff4d..2dc28458646d 100644 --- a/mmv1/products/compute/RegionAutoscaler.yaml +++ b/mmv1/products/compute/RegionAutoscaler.yaml @@ -44,6 +44,7 @@ async: resource_inside_response: false collection_url_key: 'items' include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: examples: - name: 'region_autoscaler_basic' diff --git a/mmv1/products/compute/RegionCommitment.yaml b/mmv1/products/compute/RegionCommitment.yaml index d9254e00a3ea..fcf30dc4f1ac 100644 --- a/mmv1/products/compute/RegionCommitment.yaml +++ b/mmv1/products/compute/RegionCommitment.yaml @@ -157,7 +157,7 @@ properties: description: | The type of commitment, which affects the discount rate and the eligible resources. The type could be one of the following value: `MEMORY_OPTIMIZED`, `ACCELERATOR_OPTIMIZED`, - `GENERAL_PURPOSE_N1`, `GENERAL_PURPOSE_N2`, `GENERAL_PURPOSE_N2D`, `GENERAL_PURPOSE_E2`, + `GENERAL_PURPOSE`, `GENERAL_PURPOSE_N2`, `GENERAL_PURPOSE_N2D`, `GENERAL_PURPOSE_E2`, `GENERAL_PURPOSE_T2D`, `GENERAL_PURPOSE_C3`, `COMPUTE_OPTIMIZED_C2`, `COMPUTE_OPTIMIZED_C2D` and `GRAPHICS_OPTIMIZED_G2` default_from_api: true diff --git a/mmv1/products/compute/RegionHealthCheck.yaml b/mmv1/products/compute/RegionHealthCheck.yaml index 8fc9d2b1c5f9..4f6d21e96375 100644 --- a/mmv1/products/compute/RegionHealthCheck.yaml +++ b/mmv1/products/compute/RegionHealthCheck.yaml @@ -58,9 +58,9 @@ tgc_tests: - name: 'TestAccComputeRegionHealthCheck_typeTransition' skip: 'Test data has mismatched steps' - name: 'TestAccComputeRegionHealthCheck_grpcWithTls_create' - skip: 'Test data has mismatched steps' + skip: 'grpcTlsHealthCheck is not in CAI asset, but is required in this test.' - name: 'TestAccComputeRegionHealthCheck_grpcWithTls_update' - skip: 'Test data has mismatched steps' + skip: 'grpcTlsHealthCheck is not in CAI asset, but is required in this test.' sweeper: url_substitutions: - region: "us-central1" diff --git a/mmv1/products/compute/RegionHealthSource.yaml b/mmv1/products/compute/RegionHealthSource.yaml new file mode 100644 index 000000000000..d5183df80c5e --- /dev/null +++ b/mmv1/products/compute/RegionHealthSource.yaml @@ -0,0 +1,157 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +# API resource name +name: 'RegionHealthSource' +# Resource description for the provider documentation. +description: | + A health source resource specifies the source resources and the health + aggregation policy applied to the source resources to determine the + aggregated health status. +references: + guides: + 'Health checks overview': 'https://cloud.google.com/load-balancing/docs/health-check-concepts' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/regionHealthSources' +min_version: beta + +# URL for the resource's standard List method. https://google.aip.dev/132 +# Terraform field names enclosed in double curly braces are replaced with +# the field values from the resource at runtime. +base_url: 'projects/{{project}}/regions/{{region}}/healthSources' +# URL for the resource's standard Get method. https://google.aip.dev/131 +# Terraform field names enclosed in double curly braces are replaced with +# the field values from the resource at runtime. +self_link: 'projects/{{project}}/regions/{{region}}/healthSources/{{name}}' + +# URL for the resource's standard Create method, including query parameters. +# https://google.aip.dev/133 +# Terraform field names enclosed in double curly braces are replaced with +# the field values from the resource at runtime. +create_url: 'projects/{{project}}/regions/{{region}}/healthSources' + +# Overrides the URL for the resource's standard Update method. (If unset, the +# self_link URL is used by default.) https://google.aip.dev/134 +# Terraform field names enclosed in double curly braces are replaced with +# the field values from the resource at runtime. +update_url: 'projects/{{project}}/regions/{{region}}/healthSources/{{name}}' +# The HTTP verb used to update a resource. Allowed values: :POST, :PUT, :PATCH. Default: :PUT. +update_verb: 'PATCH' +# If true, the resource sets an `updateMask` query parameter listing modified +# fields when updating the resource. If false, it does not. +update_mask: true + +# If true, code for handling long-running operations is generated along with +# the resource. If false, that code is not generated. +autogen_async: true +# Sets parameters for handling operations returned by the API. +async: + # Overrides which API calls return operations. Default: ['create', + # 'update', 'delete'] + # actions: ['create', 'update', 'delete'] + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + +examples: + - name: "compute_region_health_source_basic" + primary_resource_id: "example_test_health_source" + vars: + name: "test-health-source" + description: "Example health source basic" + min_version: 'beta' + test_env_vars: + project: 'PROJECT_NAME' + +parameters: + - name: 'region' + type: String + required: true + immutable: true + url_param_only: true + description: | + URL of the region where the health source resides. + - name: 'name' + type: String + required: true + immutable: true + description: | + Name of the resource. Provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + character must be a lowercase letter, and all following characters must + be a dash, lowercase letter, or digit, except the last character, which + cannot be a dash. + +properties: + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you + create the resource. + - name: 'sourceType' + type: Enum + description: | + Specifies the type of the `HealthSource`. The only allowed value + is `BACKEND_SERVICE`. Must be specified when the + `HealthSource` is created, and cannot be mutated. + enum_values: + - 'BACKEND_SERVICE' + required: true + immutable: true + - name: 'sources' + type: Array + item_type: + name: 'source' + type: ResourceRef + resource: 'RegionBackendService' + imports: 'selfLink' + description: | + URLs to the source resources. Must be size 1. Must be a + `BackendService` if the `sourceType` is `BACKEND_SERVICE`. The + `BackendService` must have load balancing scheme + `INTERNAL` or `INTERNAL_MANAGED` and must be regional + and in the same region as the `HealthSource` (cross-region + deployment for `INTERNAL_MANAGED` is not supported). The + `BackendService` may use only IGs, MIGs, or NEGs of type + `GCE_VM_IP` or `GCE_VM_IP_PORT`. The + `BackendService` may not use `haPolicy`. Can be + mutated. + min_size: 1 + max_size: 1 + - name: 'healthAggregationPolicy' + type: ResourceRef + resource: 'RegionHealthAggregationPolicy' + imports: 'selfLink' + description: | + URL to the `HealthAggregationPolicy` resource. Must be set. Must + be regional and in the same region as the `HealthSource`. Can be + mutated. + - name: 'id' + type: String + description: 'The unique identifier for the resource. This identifier is defined by the server.' + output: true + - name: 'creationTimestamp' + type: String + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'selfLinkWithId' + type: String + description: 'Server-defined URL with id for the resource.' + output: true + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this object. + This field is used in optimistic locking. + output: true diff --git a/mmv1/products/compute/RegionNetworkEndpoint.yaml b/mmv1/products/compute/RegionNetworkEndpoint.yaml index 090b95ebcb53..2d0370f5adb2 100644 --- a/mmv1/products/compute/RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/RegionNetworkEndpoint.yaml @@ -74,7 +74,6 @@ examples: network_name: 'network' - name: 'region_network_endpoint_portmap' primary_resource_id: 'region_network_endpoint_portmap' - min_version: 'beta' vars: network_name: 'network' subnetwork_name: 'subnetwork' @@ -140,7 +139,6 @@ properties: type: Integer description: | Client destination port for the `GCE_VM_IP_PORTMAP` NEG. - min_version: 'beta' custom_flatten: 'templates/terraform/custom_flatten/float64_to_int.go.tmpl' - name: 'instance' type: ResourceRef @@ -149,4 +147,3 @@ properties: This is required for network endpoints of type GCE_VM_IP_PORTMAP. resource: 'Instance' imports: 'name' - min_version: 'beta' diff --git a/mmv1/products/compute/RegionSslCertificate.yaml b/mmv1/products/compute/RegionSslCertificate.yaml index 629082845043..91d75d17945b 100644 --- a/mmv1/products/compute/RegionSslCertificate.yaml +++ b/mmv1/products/compute/RegionSslCertificate.yaml @@ -23,6 +23,8 @@ references: guides: 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionSslCertificates' +include_in_tgc_next: true +tgc_include_handwritten_tests: true docs: optional_properties: | * `name_prefix` - (Optional) Creates a unique name beginning with the @@ -96,6 +98,7 @@ properties: The chain must include at least one intermediate cert. required: true sensitive: true + is_missing_in_cai: true - name: 'creationTimestamp' type: Time description: 'Creation timestamp in RFC3339 text format.' @@ -137,3 +140,4 @@ properties: sensitive: true diff_suppress_func: 'sha256DiffSuppress' custom_flatten: 'templates/terraform/custom_flatten/sha256.tmpl' + is_missing_in_cai: true diff --git a/mmv1/products/compute/ResourcePolicy.yaml b/mmv1/products/compute/ResourcePolicy.yaml index 9b3f3a15e35b..9100ec32490e 100644 --- a/mmv1/products/compute/ResourcePolicy.yaml +++ b/mmv1/products/compute/ResourcePolicy.yaml @@ -38,6 +38,8 @@ async: collection_url_key: 'items' custom_code: constants: 'templates/terraform/constants/compute_resource_policy.go.tmpl' +include_in_tgc_next: true +tgc_include_handwritten_tests: true sweeper: url_substitutions: - region: "us-central1" diff --git a/mmv1/products/compute/Route.yaml b/mmv1/products/compute/Route.yaml index e8164df51c12..94eb90b132e5 100644 --- a/mmv1/products/compute/Route.yaml +++ b/mmv1/products/compute/Route.yaml @@ -68,9 +68,12 @@ custom_code: extra_schema_entry: 'templates/terraform/extra_schema_entry/route.tmpl' constants: 'templates/terraform/constants/compute_route.go.tmpl' decoder: 'templates/terraform/decoders/route.tmpl' + tgc_decoder: 'templates/tgc_next/decoders/compute_route.tmpl' error_retry_predicates: - 'transport_tpg.IsPeeringOperationInProgress' +include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'route_basic' primary_resource_id: 'default' @@ -188,6 +191,7 @@ properties: - 'next_hop_vpn_tunnel' - 'next_hop_ilb' custom_expand: 'templates/terraform/custom_expand/route_instance.tmpl' + custom_tgc_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.tmpl' resource: 'Instance' imports: 'selfLink' - name: 'nextHopIp' diff --git a/mmv1/products/compute/Router.yaml b/mmv1/products/compute/Router.yaml index 839f92515bdc..7f82e6d7b59e 100644 --- a/mmv1/products/compute/Router.yaml +++ b/mmv1/products/compute/Router.yaml @@ -44,6 +44,8 @@ custom_code: constants: 'templates/terraform/constants/router.go.tmpl' custom_diff: - 'resourceComputeRouterCustomDiff' +include_in_tgc_next: true +tgc_include_handwritten_tests: true sweeper: prefixes: - "swg-autogen-router" # Secure Web Proxy(SWP) auto-generated router prefix. diff --git a/mmv1/products/compute/Snapshot.yaml b/mmv1/products/compute/Snapshot.yaml index 6281119b43b4..1d2f4653d50c 100644 --- a/mmv1/products/compute/Snapshot.yaml +++ b/mmv1/products/compute/Snapshot.yaml @@ -69,6 +69,13 @@ examples: vars: snapshot_name: 'my-snapshot' disk_name: 'debian-disk' + - name: 'snapshot_basic_2' + primary_resource_id: 'snapshot' + primary_resource_name: 'fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])' + min_version: 'beta' + vars: + snapshot_name: 'my-snapshot' + disk_name: 'debian-disk' - name: 'snapshot_chainname' primary_resource_id: 'snapshot' primary_resource_name: 'fmt.Sprintf("tf-test-snapshot-chainname%s", context["random_suffix"])' @@ -263,6 +270,12 @@ properties: update_url: 'projects/{{project}}/global/snapshots/{{name}}/setLabels' update_verb: 'POST' key_expander: '' + - name: 'guestFlush' + type: Boolean + description: | + Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. + ignore_read: true + min_version: 'beta' - name: 'snapshotType' type: Enum description: | diff --git a/mmv1/products/compute/StoragePool.yaml b/mmv1/products/compute/StoragePool.yaml index c6e457c51a60..f6e67a2a4ec3 100644 --- a/mmv1/products/compute/StoragePool.yaml +++ b/mmv1/products/compute/StoragePool.yaml @@ -292,3 +292,5 @@ virtual_fields: When the field is set to false, deleting the StoragePool is allowed. custom_code: pre_delete: templates/terraform/pre_delete/compute_storage_pool.go.tmpl +include_in_tgc_next: true +tgc_include_handwritten_tests: true diff --git a/mmv1/products/compute/Subnetwork.yaml b/mmv1/products/compute/Subnetwork.yaml index 843621202b30..a153de32db49 100644 --- a/mmv1/products/compute/Subnetwork.yaml +++ b/mmv1/products/compute/Subnetwork.yaml @@ -47,9 +47,6 @@ base_url: 'projects/{{project}}/regions/{{region}}/subnetworks' has_self_link: true include_in_tgc_next: true tgc_include_handwritten_tests: true -tgc_tests: - - name: 'TestAccComputeSubnetwork_secondaryIpRanges' - skip: 'Test data has mismatched steps' immutable: true timeouts: insert_minutes: 20 @@ -481,6 +478,7 @@ properties: update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' update_verb: 'PATCH' fingerprint_name: 'fingerprint' + is_missing_in_cai: true enum_values: - 'EXTERNAL' - 'INTERNAL' diff --git a/mmv1/products/compute/UrlMap.yaml b/mmv1/products/compute/UrlMap.yaml index c316a92dd187..7d88cdb60d00 100644 --- a/mmv1/products/compute/UrlMap.yaml +++ b/mmv1/products/compute/UrlMap.yaml @@ -38,10 +38,6 @@ async: collection_url_key: 'items' include_in_tgc_next: true tgc_include_handwritten_tests: true -tgc_tests: - - name: 'TestAccComputeUrlMap_trafficDirectorRemoveRouteRule' - skip: 'Test data has mismatched steps' -custom_code: examples: - name: 'url_map_bucket_and_service' primary_resource_id: 'urlmap' diff --git a/mmv1/products/compute/VpnTunnel.yaml b/mmv1/products/compute/VpnTunnel.yaml index 3695efcd378a..edc7b639a2e2 100644 --- a/mmv1/products/compute/VpnTunnel.yaml +++ b/mmv1/products/compute/VpnTunnel.yaml @@ -242,25 +242,21 @@ properties: - name: 'encryption' type: Array description: 'Encryption algorithms.' - is_set: true item_type: type: String - name: 'integrity' type: Array description: 'Integrity algorithms.' - is_set: true item_type: type: String - name: 'prf' type: Array description: 'Pseudo-random functions.' - is_set: true item_type: type: String - name: 'dh' type: Array description: 'Diffie-Hellman groups.' - is_set: true item_type: type: String - name: 'phase2' @@ -270,18 +266,15 @@ properties: - name: 'encryption' type: Array description: 'Encryption algorithms.' - is_set: true item_type: type: String - name: 'integrity' type: Array description: 'Integrity algorithms.' - is_set: true item_type: type: String - name: 'pfs' type: Array description: 'Perfect forward secrecy groups.' - is_set: true item_type: type: String diff --git a/mmv1/products/compute/WireGroup.yaml b/mmv1/products/compute/WireGroup.yaml index 39e5dd1d7743..9ef44ddbf9a2 100644 --- a/mmv1/products/compute/WireGroup.yaml +++ b/mmv1/products/compute/WireGroup.yaml @@ -20,8 +20,7 @@ description: | references: guides: 'Create a WireGroup': 'https://cloud.google.com/network-connectivity/docs/interconnect/how-to/cross-site/modify-network#add-wire-group' - api: 'https://cloud.google.com/compute/docs/reference/rest/beta/wireGroups' -min_version: beta + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/wireGroups' docs: id_format: 'projects/{{project}}/global/crossSiteNetworks/{{cross_site_network}}/wireGroups/{{name}}' base_url: 'projects/{{project}}/global/crossSiteNetworks/{{cross_site_network}}/wireGroups' @@ -48,9 +47,17 @@ examples: name: 'test-wire-group' description: 'Example Wire Group' cross_site_network: 'test-cross-site-network' - min_version: 'beta' test_env_vars: project: 'PROJECT_NAME' + - name: 'compute_wire_group_basic_beta' + primary_resource_id: 'example-test-wire-group-beta' + vars: + name: 'test-wire-group-beta' + description: 'Example Wire Group Beta' + cross_site_network: 'test-cross-site-network-beta' + test_env_vars: + project: 'PROJECT_NAME' + min_version: 'beta' parameters: - name: 'crossSiteNetwork' type: ResourceRef @@ -62,7 +69,6 @@ parameters: imports: 'name' diff_suppress_func: 'tpgresource.CompareResourceNames' custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.tmpl' - min_version: beta properties: - name: 'description' type: String @@ -106,7 +112,7 @@ properties: properties: - name: interconnect type: string - - name: vlan_tags + - name: vlanTags type: Array description: | VLAN tags for the interconnect. @@ -121,6 +127,7 @@ properties: type: NestedObject description: | Properties specific to the wire group. + min_version: 'beta' properties: - name: type type: enum diff --git a/mmv1/products/datacatalog/Tag.yaml b/mmv1/products/datacatalog/Tag.yaml index 7024dc1f308e..f293151b6e50 100644 --- a/mmv1/products/datacatalog/Tag.yaml +++ b/mmv1/products/datacatalog/Tag.yaml @@ -141,7 +141,7 @@ properties: name: field_value type: NestedObject properties: - - name: 'display_name' + - name: 'displayName' type: String description: | The display name of this field diff --git a/mmv1/products/datafusion/Instance.yaml b/mmv1/products/datafusion/Instance.yaml index 5991a9e1ab96..8add16232b24 100644 --- a/mmv1/products/datafusion/Instance.yaml +++ b/mmv1/products/datafusion/Instance.yaml @@ -39,7 +39,7 @@ iam_policy: method_name_separator: ':' parent_resource_attribute: 'name' import_format: - - 'projects/{{project}}/locations/{{location}}/instances/{{name}}' + - 'projects/{{project}}/locations/{{region}}/instances/{{name}}' - '{{name}}' custom_code: constants: 'templates/terraform/constants/data_fusion_instance_option.go.tmpl' diff --git a/mmv1/products/dataplex/Datascan.yaml b/mmv1/products/dataplex/Datascan.yaml index 722c6a98a5ed..53bbd1141bfa 100644 --- a/mmv1/products/dataplex/Datascan.yaml +++ b/mmv1/products/dataplex/Datascan.yaml @@ -79,6 +79,13 @@ examples: test_env_vars: project_name: 'PROJECT_NAME' exclude_docs: true + - name: 'dataplex_datascan_onetime_profile' + primary_resource_id: 'onetime_profile' + primary_resource_name: 'fmt.Sprintf("tf-test-dataprofile-onetime%s", context["random_suffix"])' + vars: + datascan_name: 'dataprofile-onetime' + test_env_vars: + project_name: 'PROJECT_NAME' - name: 'dataplex_datascan_basic_quality' primary_resource_id: 'basic_quality' vars: @@ -99,6 +106,13 @@ examples: test_env_vars: project_name: 'PROJECT_NAME' exclude_docs: true + - name: 'dataplex_datascan_onetime_quality' + primary_resource_id: 'onetime_quality' + vars: + datascan_name: 'dataquality-onetime' + test_env_vars: + project_name: 'PROJECT_NAME' + exclude_test: true - name: 'dataplex_datascan_basic_discovery' primary_resource_id: 'basic_discovery' vars: @@ -113,6 +127,14 @@ examples: test_env_vars: project_name: 'PROJECT_NAME' location: 'REGION' + - name: 'dataplex_datascan_onetime_discovery' + primary_resource_id: 'onetime_discovery' + vars: + datascan_name: 'datadiscovery-onetime' + test_env_vars: + project_name: 'PROJECT_NAME' + location: 'REGION' + exclude_test: true - name: 'dataplex_datascan_documentation' primary_resource_id: 'documentation' vars: @@ -120,6 +142,13 @@ examples: test_env_vars: project_name: 'PROJECT_NAME' location: 'REGION' + - name: 'dataplex_datascan_onetime_documentation' + primary_resource_id: 'onetime_documentation' + vars: + datascan_name: 'datadocumentation-onetime' + test_env_vars: + project_name: 'PROJECT_NAME' + location: 'REGION' parameters: - name: 'location' type: String @@ -224,6 +253,7 @@ properties: exactly_one_of: - 'execution_spec.0.trigger.0.on_demand' - 'execution_spec.0.trigger.0.schedule' + - 'execution_spec.0.trigger.0.one_time' properties: [] - name: 'schedule' @@ -233,6 +263,7 @@ properties: exactly_one_of: - 'execution_spec.0.trigger.0.on_demand' - 'execution_spec.0.trigger.0.schedule' + - 'execution_spec.0.trigger.0.one_time' properties: - name: 'cron' type: String @@ -240,6 +271,22 @@ properties: Cron schedule for running scans periodically. This field is required for Schedule scans. required: true + - name: oneTime + type: NestedObject + description: | + The scan runs once upon DataScan creation. + send_empty_value: true + allow_empty_object: true + immutable: true + exactly_one_of: + - 'execution_spec.0.trigger.0.on_demand' + - 'execution_spec.0.trigger.0.schedule' + - 'execution_spec.0.trigger.0.one_time' + properties: + - name: ttlAfterScanCompletion + type: DURATION + description: | + Time to live for the DataScan and its results after the one-time run completes. Accepts a string with a unit suffix 's' (e.g., '7200s'). Default is 24 hours. Ranges between 0 and 31536000 seconds (1 year). - name: 'field' type: String description: | diff --git a/mmv1/products/dataplex/Entry.yaml b/mmv1/products/dataplex/Entry.yaml index f7684c993b7a..07164f58bff6 100644 --- a/mmv1/products/dataplex/Entry.yaml +++ b/mmv1/products/dataplex/Entry.yaml @@ -32,8 +32,9 @@ references: base_url: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}/entries/{{entry_id}}' self_link: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}/entries/{{entry_id}}' -create_url: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}/entries?entryId={{entry_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}/entries/{{entry_id}}' update_verb: 'PATCH' +create_verb: 'PATCH' update_mask: true import_format: - 'projects/{{project}}/locations/{{location}}/entryGroups/{{entry_group_id}}/entries/{{entry_id}}' @@ -45,6 +46,11 @@ custom_code: custom_import: templates/terraform/custom_import/dataplex_entry.go.tmpl pre_read: templates/terraform/pre_read/dataplex_entry.go.tmpl pre_update: templates/terraform/pre_update/dataplex_entry.go.tmpl + pre_create: templates/terraform/pre_create/dataplex_entry.go.tmpl + pre_delete: templates/terraform/pre_delete/dataplex_entry.go.tmpl + +error_retry_predicates: + - 'transport_tpg.IsDataplex1PEntryIngestedError' timeouts: insert_minutes: 5 @@ -74,6 +80,30 @@ examples: entry_type_name: "entry-type-full" test_env_vars: project_number: 'PROJECT_NUMBER' + - name: 'dataplex_entry_bigquery_table' + primary_resource_id: 'tf_test_table' + primary_resource_name: 'fmt.Sprintf("tf_test_table%s", context["random_suffix"])' + ignore_read_extra: + - 'aspects' + vars: + table_id: 'table-basic' + dataset_id: 'dataset_basic' + aspect_type_name: "aspect-type" + test_env_vars: + project_number: 'PROJECT_NUMBER' + project_id: 'PROJECT_NAME' + - name: 'dataplex_entry_glossary_term' + primary_resource_id: 'tf_test_glossary_term' + primary_resource_name: 'fmt.Sprintf("tf_test_glossary_term%s", context["random_suffix"])' + ignore_read_extra: + - 'aspects' + vars: + glossary_id: 'glossary-basic' + glossary_term_id: 'glossary-term' + test_env_vars: + project_number: 'PROJECT_NUMBER' + project_id: 'PROJECT_NAME' + parameters: - name: 'location' diff --git a/mmv1/products/datastream/ConnectionProfile.yaml b/mmv1/products/datastream/ConnectionProfile.yaml index be43e6446022..db4c2eb90570 100644 --- a/mmv1/products/datastream/ConnectionProfile.yaml +++ b/mmv1/products/datastream/ConnectionProfile.yaml @@ -102,6 +102,22 @@ examples: test_vars_overrides: 'deletion_protection': 'false' exclude_test: true + - name: 'datastream_stream_postgresql_sslconfig_server_and_client_verification' + primary_resource_id: 'default' + vars: + connection_profile_id: 'profile-id' + deletion_protection: 'true' + database_instance_name: 'my-instance' + test_vars_overrides: + 'deletion_protection': 'false' + oics_vars_overrides: + 'deletion_protection': 'false' + external_providers: ["random"] + skip_vcr: true + ignore_read_extra: + - 'postgresql_profile.0.password' + # TODO: include the new added field by using the latest commit of GA provider in tgc tests + tgc_skip_test: fix it after ssl_config is released in v7.16.0 - name: 'datastream_connection_profile_salesforce' primary_resource_id: 'default' vars: @@ -371,6 +387,68 @@ properties: description: | Database for the PostgreSQL connection. required: true + - name: 'sslConfig' + type: NestedObject + description: | + SSL configuration for the PostgreSQL connection. + properties: + - name: 'serverVerification' + type: NestedObject + description: | + If this field is set, the communication will be encrypted with TLS encryption + and the server identity will be authenticated. + exactly_one_of: + - 'ssl_config.0.server_verification' + - 'ssl_config.0.server_and_client_verification' + properties: + - name: 'caCertificate' + type: String + description: PEM-encoded server root CA certificate. + required: true + immutable: true + sensitive: true + ignore_read: true + - name: 'serverAndClientVerification' + type: NestedObject + description: | + If this field is set, the communication will be encrypted with TLS encryption + and both the server identity and the client identity will be authenticated. + exactly_one_of: + - 'ssl_config.0.server_verification' + - 'ssl_config.0.server_and_client_verification' + ignore_read: true + properties: + - name: 'clientCertificate' + type: String + description: | + PEM-encoded certificate used by the source database to authenticate the + client identity (i.e., the Datastream's identity). This certificate is + signed by either a root certificate trusted by the server or one or more + intermediate certificates (which is stored with the leaf certificate) to + link to this certificate to the trusted root certificate. + immutable: true + required: true + sensitive: true + ignore_read: true + - name: 'clientKey' + type: String + description: | + PEM-encoded private key associated with the client certificate. + This value will be used during the SSL/TLS handshake, allowing + the PostgreSQL server to authenticate the client's identity, + i.e. identity of the stream. + immutable: true + required: true + sensitive: true + ignore_read: true + - name: 'caCertificate' + type: String + description: | + PEM-encoded server root CA certificate. + immutable: true + required: true + sensitive: true + ignore_read: true - name: 'salesforceProfile' min_version: beta type: NestedObject @@ -626,6 +704,7 @@ properties: Specifies whether the client connects directly to the host[:port] in the connection URI. - name: 'forwardSshConnectivity' + is_missing_in_cai: true type: NestedObject description: | Forward SSH tunnel connectivity. diff --git a/mmv1/products/datastream/Stream.yaml b/mmv1/products/datastream/Stream.yaml index 5dbd6cb7baaa..f0b5d4bdf25b 100644 --- a/mmv1/products/datastream/Stream.yaml +++ b/mmv1/products/datastream/Stream.yaml @@ -238,6 +238,19 @@ examples: skip_vcr: true # Involves complex dependency creation, which makes it impractical in this context exclude_test: true + - name: 'datastream_stream_rule_sets_bigquery' + primary_resource_id: 'default' + vars: + stream_id: 'rules-stream' + source_connection_profile_id: 'rules-source-profile' + destination_connection_profile_id: 'rules-dest-profile' + dataset_id: 'rules-project:rules-dataset' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + skip_vcr: true + # Involves complex dependency creation, which makes it impractical in this context + exclude_test: true - name: 'datastream_stream_salesforce' primary_resource_id: 'default' vars: @@ -1986,3 +1999,153 @@ properties: will be encrypted using an internal Stream-specific encryption key provisioned through KMS. immutable: true is_missing_in_cai: true + - name: 'ruleSets' + type: Array + description: 'Rule sets to apply to the stream.' + item_type: + type: NestedObject + properties: + - name: 'customizationRules' + type: Array + description: 'List of customization rules to apply.' + required: true + item_type: + type: NestedObject + properties: + - name: 'bigqueryPartitioning' + type: NestedObject + description: 'BigQuery partitioning rule.' + properties: + - name: 'integerRangePartition' + type: NestedObject + properties: + - name: 'column' + type: String + description: 'The partitioning column.' + required: true + - name: 'start' + type: Integer + description: 'The starting value for range partitioning (inclusive).' + required: true + - name: 'end' + type: Integer + description: 'The ending value for range partitioning (exclusive).' + required: true + - name: 'interval' + type: Integer + description: 'The interval of each range within the partition.' + required: true + - name: 'timeUnitPartition' + type: NestedObject + properties: + - name: 'column' + type: String + description: 'The partitioning column.' + required: true + - name: 'partitioningTimeGranularity' + type: Enum + description: 'Partition granularity.' + enum_values: + - 'PARTITIONING_TIME_GRANULARITY_UNSPECIFIED' + - 'PARTITIONING_TIME_GRANULARITY_HOUR' + - 'PARTITIONING_TIME_GRANULARITY_DAY' + - 'PARTITIONING_TIME_GRANULARITY_MONTH' + - 'PARTITIONING_TIME_GRANULARITY_YEAR' + - name: 'ingestionTimePartition' + type: NestedObject + allow_empty_object: true + send_empty_value: true + properties: + - name: 'partitioningTimeGranularity' + type: Enum + description: 'Partition granularity.' + enum_values: + - 'PARTITIONING_TIME_GRANULARITY_UNSPECIFIED' + - 'PARTITIONING_TIME_GRANULARITY_HOUR' + - 'PARTITIONING_TIME_GRANULARITY_DAY' + - 'PARTITIONING_TIME_GRANULARITY_MONTH' + - 'PARTITIONING_TIME_GRANULARITY_YEAR' + - name: 'requirePartitionFilter' + type: Boolean + description: 'If true, queries over the table require a partition filter.' + - name: 'bigqueryClustering' + type: NestedObject + description: 'BigQuery clustering rule.' + properties: + - name: 'columns' + type: Array + description: 'Column names to set as clustering columns.' + required: true + item_type: + type: String + - name: 'objectFilter' + type: NestedObject + description: 'Object filter to apply the customization rules to.' + required: true + properties: + - name: 'sourceObjectIdentifier' + type: NestedObject + description: 'Specific source object identifier.' + properties: + - name: 'oracleIdentifier' + type: NestedObject + properties: + - name: 'schema' + type: String + description: 'The schema name.' + required: true + - name: 'table' + type: String + description: 'The table name.' + required: true + - name: 'mysqlIdentifier' + type: NestedObject + properties: + - name: 'database' + type: String + description: 'The database name.' + required: true + - name: 'table' + type: String + description: 'The table name.' + required: true + - name: 'postgresqlIdentifier' + type: NestedObject + properties: + - name: 'schema' + type: String + description: 'The schema name.' + required: true + - name: 'table' + type: String + description: 'The table name.' + required: true + - name: 'sqlServerIdentifier' + type: NestedObject + properties: + - name: 'schema' + type: String + description: 'The schema name.' + required: true + - name: 'table' + type: String + description: 'The table name.' + required: true + - name: 'salesforceIdentifier' + type: NestedObject + properties: + - name: 'objectName' + type: String + description: 'The Salesforce object name.' + required: true + - name: 'mongodbIdentifier' + type: NestedObject + properties: + - name: 'database' + type: String + description: 'The MongoDB database name.' + required: true + - name: 'collection' + type: String + description: 'The MongoDB collection name.' + required: true diff --git a/mmv1/products/dialogflowcx/TestCase.yaml b/mmv1/products/dialogflowcx/TestCase.yaml index c6534e417bfe..b5e7f9b1ab30 100644 --- a/mmv1/products/dialogflowcx/TestCase.yaml +++ b/mmv1/products/dialogflowcx/TestCase.yaml @@ -41,6 +41,9 @@ examples: primary_resource_id: 'basic_test_case' vars: agent_name: 'dialogflowcx-agent' + # Skip in VCR until the test issue is resolved + # https://github.com/hashicorp/terraform-provider-google/issues/21532 + skip_vcr: true parameters: - name: 'parent' type: String diff --git a/mmv1/products/discoveryengine/CmekConfig.yaml b/mmv1/products/discoveryengine/CmekConfig.yaml index 2ac10fce4c33..910440d07958 100644 --- a/mmv1/products/discoveryengine/CmekConfig.yaml +++ b/mmv1/products/discoveryengine/CmekConfig.yaml @@ -61,6 +61,8 @@ examples: kms_key_name: 'acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us", "tftest-shared-key-5").CryptoKey.Name' ignore_read_extra: - 'project' + # Skipping test because cmek configs are expensive to provision and teardown. + exclude_test: true parameters: - name: 'location' type: String @@ -75,6 +77,7 @@ parameters: description: | The unique id of the cmek config. url_param_only: true + required: true immutable: true - name: 'setDefault' type: Boolean diff --git a/mmv1/products/filestore/Snapshot.yaml b/mmv1/products/filestore/Snapshot.yaml index da8f4b8f448b..e7823b90b55f 100644 --- a/mmv1/products/filestore/Snapshot.yaml +++ b/mmv1/products/filestore/Snapshot.yaml @@ -40,6 +40,7 @@ async: resource_inside_response: true custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true error_abort_predicates: - 'transport_tpg.Is429QuotaError' diff --git a/mmv1/products/firebasedataconnect/Service.yaml b/mmv1/products/firebasedataconnect/Service.yaml index 11546a41ecab..efec2fa8c097 100644 --- a/mmv1/products/firebasedataconnect/Service.yaml +++ b/mmv1/products/firebasedataconnect/Service.yaml @@ -29,6 +29,7 @@ import_format: - '{{project}}/{{location}}/{{service_id}}' - '{{location}}/{{service_id}}' include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: firebasedataconnect_service_basic primary_resource_id: default @@ -80,6 +81,7 @@ properties: - name: displayName type: String description: Optional. Mutable human-readable name. 63 character limit. + is_missing_in_cai: true - name: annotations type: KeyValueAnnotations description: Optional. Stores small amounts of arbitrary data. diff --git a/mmv1/products/gkehub2/Feature.yaml b/mmv1/products/gkehub2/Feature.yaml index a7b47c382313..ad35cc8ad287 100644 --- a/mmv1/products/gkehub2/Feature.yaml +++ b/mmv1/products/gkehub2/Feature.yaml @@ -381,6 +381,7 @@ properties: - name: 'auditIntervalSeconds' type: Integer description: 'Interval for Policy Controller Audit scans (in seconds). When set to 0, this disables audit functionality altogether.' + default_from_api: true - name: 'exemptableNamespaces' type: Array description: 'The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.' diff --git a/mmv1/products/healthcare/PipelineJob.yaml b/mmv1/products/healthcare/PipelineJob.yaml index e3d4c68f9ad1..260f4380a098 100644 --- a/mmv1/products/healthcare/PipelineJob.yaml +++ b/mmv1/products/healthcare/PipelineJob.yaml @@ -51,6 +51,9 @@ examples: backfill_pipeline_name: 'example_backfill_pipeline' dataset_name: 'example_dataset' mapping_pipeline_name: 'example_mapping_pipeline_job' + # Skip in VCR until the test issue is resolved + # https://github.com/hashicorp/terraform-provider-google/issues/21294 + skip_vcr: true - name: 'healthcare_pipeline_job_whistle_mapping' primary_resource_id: 'example-mapping-pipeline' vars: diff --git a/mmv1/products/iambeta/WorkloadIdentityPool.yaml b/mmv1/products/iambeta/WorkloadIdentityPool.yaml index bef09a80fa27..86c79fd78e4a 100644 --- a/mmv1/products/iambeta/WorkloadIdentityPool.yaml +++ b/mmv1/products/iambeta/WorkloadIdentityPool.yaml @@ -31,6 +31,7 @@ update_mask: true import_format: - 'projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}' include_in_tgc_next: true +tgc_include_handwritten_tests: true timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml b/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml index f6903a48ebba..4ba65906982a 100644 --- a/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml +++ b/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml @@ -27,6 +27,7 @@ update_mask: true import_format: - 'projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}' include_in_tgc_next: true +tgc_include_handwritten_tests: true timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/iamworkforcepool/WorkforcePoolProviderScimTenant.yaml b/mmv1/products/iamworkforcepool/WorkforcePoolProviderScimTenant.yaml index b9e8e7f5d1ae..0897ed433b56 100644 --- a/mmv1/products/iamworkforcepool/WorkforcePoolProviderScimTenant.yaml +++ b/mmv1/products/iamworkforcepool/WorkforcePoolProviderScimTenant.yaml @@ -23,7 +23,7 @@ references: base_url: 'locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}/scimTenants' self_link: 'locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}/scimTenants/{{scim_tenant_id}}' create_url: 'locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}/scimTenants?workforcePoolProviderScimTenantId={{scim_tenant_id}}' - +delete_url: 'locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}/scimTenants/{{scim_tenant_id}}?hardDelete={{hard_delete}}' import_format: - 'locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}/scimTenants/{{scim_tenant_id}}' update_verb: 'PATCH' @@ -42,6 +42,7 @@ examples: workforce_pool_id: 'example-pool' provider_id: 'example-prvdr' scim_tenant_id: 'example-scim-tenant' + hard_delete: 'true' test_env_vars: org_id: 'ORG_ID' properties: @@ -124,3 +125,8 @@ parameters: url_param_only: true required: true immutable: true + - name: 'hardDelete' + type: Boolean + description: Deletes the SCIM tenant immediately. This operation cannot be undone. + url_param_only: true + default_value: false diff --git a/mmv1/products/iap/WebTypeAppEngine.yaml b/mmv1/products/iap/WebTypeAppEngine.yaml index 63c54560ee62..6d2056e5f126 100644 --- a/mmv1/products/iap/WebTypeAppEngine.yaml +++ b/mmv1/products/iap/WebTypeAppEngine.yaml @@ -37,6 +37,7 @@ iam_policy: test_project_name: 'tf-test' iam_conditions_request_type: 'REQUEST_BODY' custom_diff_suppress: 'templates/terraform/iam/iap_web_appengine_diff_suppress.go.tmpl' + custom_import_state_id_funcs: 'templates/terraform/iam/iap_web_appengine_state_id_funcs.go.tmpl' custom_code: exclude_tgc: true examples: diff --git a/mmv1/products/kms/AutokeyConfig.yaml b/mmv1/products/kms/AutokeyConfig.yaml index 359d46e77470..d5c54ee2fa53 100644 --- a/mmv1/products/kms/AutokeyConfig.yaml +++ b/mmv1/products/kms/AutokeyConfig.yaml @@ -56,6 +56,7 @@ custom_code: # Using a handwritten sweeper because of pre_delete. exclude_sweeper: true include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'kms_autokey_config_all' primary_resource_id: 'example-autokeyconfig' diff --git a/mmv1/products/kms/CryptoKey.yaml b/mmv1/products/kms/CryptoKey.yaml index 7e2c823f7a89..4582d7bad6d8 100644 --- a/mmv1/products/kms/CryptoKey.yaml +++ b/mmv1/products/kms/CryptoKey.yaml @@ -40,9 +40,6 @@ import_format: - '{{key_ring}}/cryptoKeys/{{name}}' include_in_tgc_next: true tgc_include_handwritten_tests: true -tgc_tests: - - name: 'TestAccKmsCryptoKey_rotation' - skip: 'Test data has mismatched steps' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/kms/KeyHandle.yaml b/mmv1/products/kms/KeyHandle.yaml index cfc41136e092..bf76c3433755 100644 --- a/mmv1/products/kms/KeyHandle.yaml +++ b/mmv1/products/kms/KeyHandle.yaml @@ -50,6 +50,7 @@ custom_code: decoder: 'templates/terraform/decoders/kms.go.tmpl' tgc_decoder: 'templates/tgc_next/decoders/kms.go.tmpl' include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'kms_key_handle_basic' primary_resource_id: 'example-keyhandle' diff --git a/mmv1/products/looker/Instance.yaml b/mmv1/products/looker/Instance.yaml index 7d69336e38db..70541c995123 100644 --- a/mmv1/products/looker/Instance.yaml +++ b/mmv1/products/looker/Instance.yaml @@ -426,6 +426,58 @@ properties: The client secret for the Oauth config. required: true # Oauth Object - End + # Periodic Export Config Object + - name: 'periodicExportConfig' + type: NestedObject + description: | + Configuration for periodic export. + update_mask_fields: + - 'periodic_export_config' + properties: + - name: 'kmsKey' + type: String + description: | + Name of the CMEK key in KMS. + Format: + projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} + required: true + - name: 'gcsUri' + type: String + description: | + Cloud Storage bucket URI for periodic export. + Format: gs://{bucket_name} + required: true + - name: 'startTime' + type: NestedObject + description: | + Time in UTC to start the periodic export job. + required: true + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + validation: + function: 'validation.IntBetween(0,23)' + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Must be from 0 to 59. + validation: + function: 'validation.IntBetween(0,60)' + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + validation: + function: 'validation.IntBetween(0,60)' + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + validation: + function: 'validation.IntBetween(0,999999999)' + # Periodic Export Config Object - End - name: 'platformEdition' type: Enum description: | diff --git a/mmv1/products/lustre/Instance.yaml b/mmv1/products/lustre/Instance.yaml index 2dbed259c794..39bf5d0ef36a 100644 --- a/mmv1/products/lustre/Instance.yaml +++ b/mmv1/products/lustre/Instance.yaml @@ -27,9 +27,9 @@ id_format: projects/{{project}}/locations/{{location}}/instances/{{instance_id}} import_format: - projects/{{project}}/locations/{{location}}/instances/{{instance_id}} timeouts: - insert_minutes: 40 - update_minutes: 20 - delete_minutes: 20 + insert_minutes: 120 + update_minutes: 60 + delete_minutes: 60 sweeper: url_substitutions: - location: "us-central1-a" @@ -49,8 +49,8 @@ async: operation: timeouts: insert_minutes: 120 - update_minutes: 20 - delete_minutes: 20 + update_minutes: 60 + delete_minutes: 60 base_url: '{{op_id}}' actions: - create @@ -158,3 +158,56 @@ properties: description: |- The reason why the instance is in a certain state. output: true + - name: accessRulesOptions + type: NestedObject + description: |- + Access control rules for the Lustre instance. Configures default root + squashing behavior and specific access rules based on IP addresses. + properties: + - name: defaultSquashMode + type: Enum + required: true + description: |- + Set to "ROOT_SQUASH" to enable root squashing by default. + Other values include "NO_SQUASH". + enum_values: + - 'ROOT_SQUASH' + - 'NO_SQUASH' + - name: defaultSquashUid + type: Integer + description: |- + The UID to map the root user to when root squashing is enabled + (e.g., 65534 for nobody). + - name: defaultSquashGid + type: Integer + description: |- + The GID to map the root user to when root squashing is enabled + (e.g., 65534 for nobody). + - name: accessRules + type: Array + description: |- + An array of access rule exceptions. Each rule defines IP address ranges + that should have different squash behavior than the default. + item_type: + type: NestedObject + properties: + - name: name + type: String + description: |- + A unique identifier for the access rule. + required: true + - name: ipAddressRanges + type: Array + description: |- + An array of IP address strings or CIDR ranges that this rule applies to. + required: true + item_type: + type: String + - name: squashMode + type: Enum + description: |- + The squash mode for this specific rule. Currently, only "NO_SQUASH" + is supported for exceptions. + required: true + enum_values: + - 'NO_SQUASH' diff --git a/mmv1/products/managedkafka/Cluster.yaml b/mmv1/products/managedkafka/Cluster.yaml index 24645c6a5b4d..20cf9ad807bf 100644 --- a/mmv1/products/managedkafka/Cluster.yaml +++ b/mmv1/products/managedkafka/Cluster.yaml @@ -154,9 +154,9 @@ properties: description: "Capacity configuration at a per-broker level within the Kafka cluster. The config will be appled to each broker in the cluster." ignore_read: true properties: - - name: 'diskSizeGb' + - name: 'diskSizeGib' type: String - description: "The disk to provision for each broker in Gigabytes. Minimum: 100 GB." + description: "The disk to provision for each broker in Gibibytes. Minimum: 100 GiB." - name: 'rebalanceConfig' type: NestedObject description: "Defines rebalancing behavior of a Kafka cluster." diff --git a/mmv1/products/managedkafka/ConnectCluster.yaml b/mmv1/products/managedkafka/ConnectCluster.yaml index d7df17017f93..37493a69e5da 100644 --- a/mmv1/products/managedkafka/ConnectCluster.yaml +++ b/mmv1/products/managedkafka/ConnectCluster.yaml @@ -135,6 +135,7 @@ properties: network endpoints in either the primary or additional subnets." item_type: type: String + deprecation_message: '`additionalSubnets` is deprecated and will be removed in a future major release. Managed Kafka Connect clusters can now reach any endpoint accessible from the primary subnet without the need to define additional subnets. Please see https://cloud.google.com/managed-service-for-apache-kafka/docs/connect-cluster/create-connect-cluster#worker-subnet for more information.' - name: 'dnsDomainNames' type: Array description: "Additional DNS domain names from the subnet's network to be made visible to the Connect Cluster. When using diff --git a/mmv1/products/memcache/Instance.yaml b/mmv1/products/memcache/Instance.yaml index 27924743c277..afe63b8d2819 100644 --- a/mmv1/products/memcache/Instance.yaml +++ b/mmv1/products/memcache/Instance.yaml @@ -37,6 +37,8 @@ async: base_url: '{{op_id}}' result: resource_inside_response: true +include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: pre_delete: 'templates/terraform/pre_delete/memcache_instance.go.tmpl' examples: diff --git a/mmv1/products/monitoring/AlertPolicy.yaml b/mmv1/products/monitoring/AlertPolicy.yaml index 4b60ba0e3136..4c6b45e71b26 100644 --- a/mmv1/products/monitoring/AlertPolicy.yaml +++ b/mmv1/products/monitoring/AlertPolicy.yaml @@ -37,6 +37,16 @@ timeouts: custom_code: constants: 'templates/terraform/constants/monitoring_alert_policy.go.tmpl' custom_import: 'templates/terraform/custom_import/self_link_as_name_set_project.go.tmpl' +include_in_tgc_next: true +tgc_include_handwritten_tests: true +tgc_tests: + - name: 'TestAccMonitoringAlertPolicy/basic' + - name: 'TestAccMonitoringAlertPolicy/forecast' + - name: 'TestAccMonitoringAlertPolicy/full' + - name: 'TestAccMonitoringAlertPolicy/log' + - name: 'TestAccMonitoringAlertPolicy/mql' + - name: 'TestAccMonitoringAlertPolicy/promql' + - name: 'TestAccMonitoringAlertPolicy/update' error_retry_predicates: - 'transport_tpg.IsMonitoringConcurrentEditError' @@ -245,6 +255,7 @@ properties: otherwise an error is returned. - name: 'crossSeriesReducer' type: Enum + custom_tgc_flatten: 'templates/tgc_next/custom_flatten/monitoring_alert_policy_cross_series_reducer.go.tmpl' description: | The approach to be used to combine time series. Not all reducer @@ -538,6 +549,7 @@ properties: otherwise an error is returned. - name: 'crossSeriesReducer' type: Enum + custom_tgc_flatten: 'templates/tgc_next/custom_flatten/monitoring_alert_policy_cross_series_reducer.go.tmpl' description: | The approach to be used to combine time series. Not all reducer @@ -762,6 +774,7 @@ properties: otherwise an error is returned. - name: 'crossSeriesReducer' type: Enum + custom_tgc_flatten: 'templates/tgc_next/custom_flatten/monitoring_alert_policy_cross_series_reducer.go.tmpl' description: | The approach to be used to combine time series. Not all reducer diff --git a/mmv1/products/monitoring/NotificationChannel.yaml b/mmv1/products/monitoring/NotificationChannel.yaml index 6506d889b5ec..19e411f3554f 100644 --- a/mmv1/products/monitoring/NotificationChannel.yaml +++ b/mmv1/products/monitoring/NotificationChannel.yaml @@ -49,6 +49,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: constants: 'templates/terraform/constants/monitoring_notification_channel.go.tmpl' encoder: 'templates/terraform/encoders/monitoring_notification_channel.go.tmpl' diff --git a/mmv1/products/monitoring/UptimeCheckConfig.yaml b/mmv1/products/monitoring/UptimeCheckConfig.yaml index 207453acff95..465f9b1eddb0 100644 --- a/mmv1/products/monitoring/UptimeCheckConfig.yaml +++ b/mmv1/products/monitoring/UptimeCheckConfig.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: encoder: 'templates/terraform/encoders/uptime_check_config.go.tmpl' constants: 'templates/terraform/constants/monitoring_uptime_check_config.go.tmpl' @@ -250,6 +251,7 @@ properties: - 'password' - 'password_wo' sensitive: true + is_missing_in_cai: true custom_flatten: 'templates/terraform/custom_flatten/uptime_check_http_password.tmpl' - name: 'passwordWo' type: String diff --git a/mmv1/products/netapp/ActiveDirectory.yaml b/mmv1/products/netapp/ActiveDirectory.yaml index e1645256b002..12a03d1f9c9f 100644 --- a/mmv1/products/netapp/ActiveDirectory.yaml +++ b/mmv1/products/netapp/ActiveDirectory.yaml @@ -118,18 +118,21 @@ properties: description: | Username for the Active Directory account with permissions to create the compute account within the specified organizational unit. required: true + is_missing_in_cai: true - name: 'password' type: String description: | Password for specified username. Note - Manual changes done to the password will not be detected. Terraform will not re-apply the password, unless you use a new password in Terraform. required: true ignore_read: true + is_missing_in_cai: true sensitive: true - name: 'backupOperators' type: Array description: | Domain user/group accounts to be added to the Backup Operators group of the SMB service. The Backup Operators group allows members to backup and restore files regardless of whether they have read or write access to the files. Comma-separated list. required: false + is_missing_in_cai: true item_type: type: String - name: 'administrators' @@ -137,6 +140,7 @@ properties: description: | Domain user accounts to be added to the local Administrators group of the SMB service. Comma-separated list of domain users or groups. The Domain Admin group is automatically added when the service joins your domain as a hidden group. required: false + is_missing_in_cai: true item_type: type: String - name: 'securityOperators' @@ -144,6 +148,7 @@ properties: description: | Domain accounts that require elevated privileges such as `SeSecurityPrivilege` to manage security logs. Comma-separated list. required: false + is_missing_in_cai: true item_type: type: String - name: 'kdcHostname' @@ -187,3 +192,5 @@ properties: description: | The state details of the Active Directory. output: true +include_in_tgc_next: true +tgc_include_handwritten_tests: true diff --git a/mmv1/products/netapp/Backup.yaml b/mmv1/products/netapp/Backup.yaml index 5511bd0332de..2d8dbed509ff 100644 --- a/mmv1/products/netapp/Backup.yaml +++ b/mmv1/products/netapp/Backup.yaml @@ -57,7 +57,8 @@ async: base_url: '{{op_id}}' result: resource_inside_response: false -custom_code: +include_in_tgc_next: true +tgc_include_handwritten_tests: true # Skipping the sweeper since we need to sweep multiple regions exclude_sweeper: true examples: diff --git a/mmv1/products/netapp/HostGroup.yaml b/mmv1/products/netapp/HostGroup.yaml index 290ad26aa4ba..8c32f3138abd 100644 --- a/mmv1/products/netapp/HostGroup.yaml +++ b/mmv1/products/netapp/HostGroup.yaml @@ -59,14 +59,14 @@ parameters: url_param_only: true required: true immutable: true +properties: - name: 'name' type: String description: | The resource name of the Host Group. Needs to be unique per location. - url_param_only: true required: true immutable: true -properties: + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.tmpl' - name: 'state' type: String description: | diff --git a/mmv1/products/netapp/Volume.yaml b/mmv1/products/netapp/Volume.yaml index 7d43509096f9..53f4623a9cdd 100644 --- a/mmv1/products/netapp/Volume.yaml +++ b/mmv1/products/netapp/Volume.yaml @@ -1,4 +1,4 @@ -# Copyright 2024 Google Inc. +# Copyright 2025 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -554,8 +554,10 @@ properties: Only applicable to Flex service level. - name: 'hybridReplicationParameters' type: NestedObject - description: |- - The Hybrid Replication parameters for the volume. + description: | + [Volume migration](https://docs.cloud.google.com/netapp/volumes/docs/migrate/ontap/overview) and + [external replication](https://docs.cloud.google.com/netapp/volumes/docs/protect-data/replicate-ontap/overview) + are two types of Hybrid Replication. This parameter block specifies the parameters for a hybrid replication. properties: - name: 'replication' type: String @@ -564,25 +566,26 @@ properties: - name: 'peerVolumeName' type: String description: | - Required. Name of the user's local source volume to be peered with the destination volume. + Required. Name of the ONTAP source volume to be replicated to NetApp Volumes destination volume. - name: 'peerClusterName' type: String description: | - Required. Name of the user's local source cluster to be peered with the destination cluster. + Required. Name of the ONTAP source cluster to be peered with NetApp Volumes. - name: 'peerSvmName' type: String description: | - Required. Name of the user's local source vserver svm to be peered with the destination vserver svm. + Required. Name of the ONTAP source vserver SVM to be peered with NetApp Volumes. - name: 'peerIpAddresses' type: Array description: | - Required. List of node ip addresses to be peered with. + Required. List of all intercluster LIF IP addresses of the ONTAP source cluster. item_type: type: String - name: 'clusterLocation' type: String description: | - Optional. Name of source cluster location associated with the Hybrid replication. This is a free-form field for the display purpose only. + Optional. Name of source cluster location associated with the replication. This is a free-form field + for display purposes only. - name: 'description' type: String description: | @@ -603,7 +606,10 @@ properties: - name: 'hybridReplicationType' type: Enum description: | - Optional. Type of the volume's hybrid replication. + Optional. Type of the hybrid replication. Use `MIGRATION` to create a volume migration + and `ONPREM_REPLICATION` to create an external replication. + Other values are read-only. `REVERSE_ONPREM_REPLICATION` is used to represent an external + replication which got reversed. Default is `MIGRATION`. enum_values: - 'MIGRATION' - 'CONTINUOUS_REPLICATION' @@ -612,7 +618,7 @@ properties: - name: 'largeVolumeConstituentCount' type: Integer description: | - Optional. Constituent volume count for large volume. + Optional. If the source is a FlexGroup volume, this field needs to match the number of constituents in the FlexGroup. - name: 'throughputMibps' type: Double description: | diff --git a/mmv1/products/networkconnectivity/Destination.yaml b/mmv1/products/networkconnectivity/Destination.yaml new file mode 100644 index 000000000000..559602c6e2a1 --- /dev/null +++ b/mmv1/products/networkconnectivity/Destination.yaml @@ -0,0 +1,172 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'Destination' +description: | + 'Manage Multicloud Data Transfer Destinations' +references: + guides: + 'QUICKSTART_TITLE': 'https://docs.cloud.google.com/data-transfer-essentials/docs/create-resources' + api: 'https://docs.cloud.google.com/network-connectivity/docs/reference/networkconnectivity/rest/v1/projects.locations.multicloudDataTransferConfigs.destinations' + +base_url: 'projects/{{project}}/locations/{{location}}/multicloudDataTransferConfigs/{{multicloud_data_transfer_config}}/destinations' +self_link: 'projects/{{project}}/locations/{{location}}/multicloudDataTransferConfigs/{{multicloud_data_transfer_config}}/destinations/{{name}}' + +create_url: 'projects/{{project}}/locations/{{location}}/multicloudDataTransferConfigs/{{multicloud_data_transfer_config}}/destinations?destination_id={{name}}' + +update_verb: 'PATCH' +update_mask: true + +autogen_async: true + +async: + operation: + base_url: '{{op_id}}' + +examples: + - name: "network_connectivity_destination_basic" + primary_resource_id: "example" + vars: + config_name: "basic-config" + destination_name: "basic-destination" + +parameters: + - name: 'multicloudDataTransferConfig' + type: String + required: true + immutable: true + url_param_only: true + description: | + The multicloud data transfer config of the destination. + - name: 'location' + type: String + required: true + immutable: true + url_param_only: true + description: | + The location of the destination. + - name: 'name' + type: String + required: true + immutable: true + url_param_only: true + description: | + The name of the destination. + +properties: + - name: 'createTime' + type: Time + output: true + description: | + Time when the `Destination` resource was created. + - name: 'updateTime' + type: Time + output: true + description: | + Time when the `Destination` resource was updated. + - name: 'labels' + type: KeyValueLabels + description: | + User-defined labels. + - name: 'etag' + type: Fingerprint + output: true + description: | + The etag is computed by the server, and might be sent with update and + delete requests so that the client has an up-to-date value before + proceeding. + - name: 'description' + type: String + description: | + A description of this resource. + - name: 'ipPrefix' + type: String + required: true + immutable: true + description: | + The IP prefix that represents your workload on another CSP. + - name: 'endpoints' + type: Array + required: true + is_set: true + description: | + The list of DestinationEndpoint resources configured for the IP prefix. + item_type: + type: NestedObject + properties: + - name: 'asn' + type: String + required: true + description: | + The ASN of the remote IP prefix. + - name: 'csp' + type: String + required: true + description: | + The CSP of the remote IP prefix. + - name: 'state' + type: Enum + output: true + enum_values: + - 'VALID' + - 'INVALID' + description: | + The state of the DestinationEndpoint resource. + - name: 'updateTime' + type: Time + output: true + description: | + Time when the DestinationEndpoint resource was updated. + - name: 'stateTimeline' + type: NestedObject + output: true + description: | + The timeline of the expected `Destination` states or the current rest + state. If a state change is expected, the value is `ADDING`, + `DELETING` or `SUSPENDING`, depending on the action specified. + properties: + - name: 'states' + type: Array + output: true + description: The state and activation time details of the resource state. + item_type: + type: NestedObject + properties: + - name: 'state' + type: Enum + output: true + enum_values: + - 'ADDING' + - 'ACTIVE' + - 'DELETING' + - 'SUSPENDING' + - 'SUSPENDED' + description: 'The state of the resource.' + - name: 'effectiveTime' + type: Time + output: true + description: | + Accompanies only the transient states, which include `ADDING`, + `DELETING`, and `SUSPENDING`, to denote the time until which the + transient state of the resource will be effective. For instance, if the + state is `ADDING`, this field shows the time when the resource state + transitions to `ACTIVE`. + - name: 'uid' + type: String + output: true + description: | + The Google-generated unique ID for the `Destination` resource. + This value is unique across all `Destination` resources. + If a resource is deleted and another with the same name is + created, the new resource is assigned a different and unique ID. diff --git a/mmv1/products/networkconnectivity/MulticloudDataTransferConfig.yaml b/mmv1/products/networkconnectivity/MulticloudDataTransferConfig.yaml new file mode 100644 index 000000000000..ceb5ced43af6 --- /dev/null +++ b/mmv1/products/networkconnectivity/MulticloudDataTransferConfig.yaml @@ -0,0 +1,134 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: 'MulticloudDataTransferConfig' +description: | + 'Manage Multicloud Data Transfer Configs' +references: + guides: + 'QUICKSTART_TITLE': 'https://docs.cloud.google.com/data-transfer-essentials/docs/create-resources' + api: 'https://docs.cloud.google.com/network-connectivity/docs/reference/networkconnectivity/rest/v1/projects.locations.multicloudDataTransferConfigs' + +base_url: 'projects/{{project}}/locations/{{location}}/multicloudDataTransferConfigs' +self_link: 'projects/{{project}}/locations/{{location}}/multicloudDataTransferConfigs/{{name}}' + +create_url: 'projects/{{project}}/locations/{{location}}/multicloudDataTransferConfigs?multicloudDataTransferConfigId={{name}}' + +update_verb: 'PATCH' +update_mask: true + +autogen_async: true +async: + operation: + base_url: '{{op_id}}' + +examples: + - name: "network_connectivity_multicloud_data_transfer_config_basic" + primary_resource_id: "example" + vars: + config_name: "basic_config" + +parameters: + - name: 'location' + type: String + required: true + immutable: true + url_param_only: true + description: | + The location of the multicloud data transfer config. + - name: 'name' + type: String + required: true + immutable: true + url_param_only: true + description: | + The name of the MulticloudDataTransferConfig resource. + +properties: + - name: 'createTime' + type: Time + output: true + description: | + Time when the MulticloudDataTransferConfig resource was created. + - name: 'updateTime' + type: Time + output: true + description: | + Time when the MulticloudDataTransferConfig resource was updated. + - name: 'labels' + type: KeyValueLabels + description: | + User-defined labels. + - name: 'etag' + type: Fingerprint + output: true + description: | + The etag is computed by the server, and might be sent with update and + delete requests so that the client has an up-to-date value before + proceeding. + - name: 'description' + type: String + description: | + A description of this resource. + - name: 'destinationsCount' + type: Integer + output: true + description: | + The number of Destination resources configured for the + MulticloudDataTransferConfig resource. + - name: 'destinationsActiveCount' + type: Integer + output: true + description: | + The number of Destination resources in use with the + MulticloudDataTransferConfig resource. + - name: 'services' + type: Array + description: | + Maps services to their current or planned states. Service names are keys, + and the associated values describe the state of the service. + custom_flatten: 'templates/terraform/custom_flatten/network_connectivity_mcdt_services_state_timeline_flatten.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/network_connectivity_mcdt_services_state_timeline_expand.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'service_name' + type: String + description: | + The name of the service, like "big-query" or "cloud-storage". + This corresponds to the map key in the API. + required: true + - name: 'states' + type: Array + description: 'The state and activation time details for the service.' + output: true + item_type: + type: NestedObject + properties: + - name: 'state' + type: String + description: 'The state of the resource.' + output: true + - name: 'effective_time' + type: String + description: 'The time when the state becomes effective' + output: true + - name: 'uid' + type: String + output: true + description: | + The Google-generated unique ID for the MulticloudDataTransferConfig + resource. This value is unique across all MulticloudDataTransferConfig + resources. If a resource is deleted and another with the same name is + created, the new resource is assigned a different and unique ID. diff --git a/mmv1/products/networksecurity/AddressGroup.yaml b/mmv1/products/networksecurity/AddressGroup.yaml index eaca5598a44f..53209360491e 100644 --- a/mmv1/products/networksecurity/AddressGroup.yaml +++ b/mmv1/products/networksecurity/AddressGroup.yaml @@ -47,6 +47,7 @@ async: include_project: true custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'network_security_address_groups_basic' primary_resource_id: 'default' diff --git a/mmv1/products/networksecurity/ClientTlsPolicy.yaml b/mmv1/products/networksecurity/ClientTlsPolicy.yaml index f76f3e87af7b..a10fb5d43ac1 100644 --- a/mmv1/products/networksecurity/ClientTlsPolicy.yaml +++ b/mmv1/products/networksecurity/ClientTlsPolicy.yaml @@ -44,6 +44,7 @@ async: resource_inside_response: false custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true sweeper: url_substitutions: - region: "global" diff --git a/mmv1/products/networksecurity/DnsThreatDetector.yaml b/mmv1/products/networksecurity/DnsThreatDetector.yaml index 46bf295dd68a..53d503683c9f 100644 --- a/mmv1/products/networksecurity/DnsThreatDetector.yaml +++ b/mmv1/products/networksecurity/DnsThreatDetector.yaml @@ -15,7 +15,6 @@ name: "DnsThreatDetector" description: | DNS Armor is a fully-managed service that provides DNS-layer security for your Google Cloud workloads. -min_version: "beta" references: guides: "DNS Threat Detector": "https://cloud.google.com/dns/docs/threat-detection" @@ -37,7 +36,6 @@ timeouts: custom_code: examples: - name: "network_security_dns_threat_detector_basic" - min_version: "beta" primary_resource_id: "default" vars: resource_name: "my-threat-detector" diff --git a/mmv1/products/networksecurity/GatewaySecurityPolicy.yaml b/mmv1/products/networksecurity/GatewaySecurityPolicy.yaml index d40dccb16cc5..8e6984a8cbd9 100644 --- a/mmv1/products/networksecurity/GatewaySecurityPolicy.yaml +++ b/mmv1/products/networksecurity/GatewaySecurityPolicy.yaml @@ -44,6 +44,7 @@ async: resource_inside_response: false custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true sweeper: dependencies: - "google_network_services_gateway" diff --git a/mmv1/products/networksecurity/GatewaySecurityPolicyRule.yaml b/mmv1/products/networksecurity/GatewaySecurityPolicyRule.yaml index a99f070299be..72f20100f49d 100644 --- a/mmv1/products/networksecurity/GatewaySecurityPolicyRule.yaml +++ b/mmv1/products/networksecurity/GatewaySecurityPolicyRule.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 30 delete_minutes: 30 include_in_tgc_next: true +tgc_include_handwritten_tests: true autogen_async: true async: actions: ['create', 'delete', 'update'] diff --git a/mmv1/products/networksecurity/SecurityProfile.yaml b/mmv1/products/networksecurity/SecurityProfile.yaml index a255ced38f64..c38e22e63ff2 100644 --- a/mmv1/products/networksecurity/SecurityProfile.yaml +++ b/mmv1/products/networksecurity/SecurityProfile.yaml @@ -41,6 +41,7 @@ async: include_project: true custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true sweeper: url_substitutions: - parent: "organizations/${ORG_ID}" @@ -309,7 +310,6 @@ properties: This field is used for Packet Broker mirroring endpoint groups to specify the deployment groups that the packet should be mirrored to by the broker. Format: projects/{project_id}/locations/global/mirroringDeploymentGroups/{deployment_group_id} - immutable: true min_version: 'beta' - name: 'mirroringEndpointGroupType' type: String diff --git a/mmv1/products/networksecurity/SecurityProfileGroup.yaml b/mmv1/products/networksecurity/SecurityProfileGroup.yaml index 001508f8e7a7..8e4e713f665b 100644 --- a/mmv1/products/networksecurity/SecurityProfileGroup.yaml +++ b/mmv1/products/networksecurity/SecurityProfileGroup.yaml @@ -42,6 +42,7 @@ async: include_project: true custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'network_security_security_profile_group_basic' primary_resource_id: 'default' diff --git a/mmv1/products/networksecurity/ServerTlsPolicy.yaml b/mmv1/products/networksecurity/ServerTlsPolicy.yaml index 1e2358b551e5..2ac03de13d53 100644 --- a/mmv1/products/networksecurity/ServerTlsPolicy.yaml +++ b/mmv1/products/networksecurity/ServerTlsPolicy.yaml @@ -43,6 +43,7 @@ async: result: resource_inside_response: false include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: sweeper: url_substitutions: diff --git a/mmv1/products/networksecurity/UrlLists.yaml b/mmv1/products/networksecurity/UrlLists.yaml index 4e44f5b936ab..72cfda65d1a8 100644 --- a/mmv1/products/networksecurity/UrlLists.yaml +++ b/mmv1/products/networksecurity/UrlLists.yaml @@ -46,6 +46,7 @@ async: resource_inside_response: false custom_code: include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'network_security_url_lists_basic' primary_resource_id: 'default' diff --git a/mmv1/products/networkservices/AuthzExtension.yaml b/mmv1/products/networkservices/AuthzExtension.yaml index e5232b52ee82..a42b326ee247 100644 --- a/mmv1/products/networkservices/AuthzExtension.yaml +++ b/mmv1/products/networkservices/AuthzExtension.yaml @@ -55,6 +55,14 @@ examples: backend_name: 'authz-service' test_env_vars: project: 'PROJECT_NAME' + - name: 'network_services_authz_extension_basic_with_auth_grpc' + min_version: 'beta' + primary_resource_id: 'default' + vars: + resource_name: 'my-authz-ext-with-grpc' + backend_name: 'authz-service-grpc' + test_env_vars: + project: 'PROJECT_NAME' parameters: - name: 'name' type: String @@ -142,8 +150,23 @@ properties: - name: 'wireFormat' type: Enum description: | - The format of communication supported by the callout extension. Will be set to EXT_PROC_GRPC by the backend if no value is set. + Specifies the communication protocol used by the callout extension + to communicate with its backend service. + Supported values: + - WIRE_FORMAT_UNSPECIFIED: + No wire format is explicitly specified. The backend automatically + defaults this value to EXT_PROC_GRPC. + - EXT_PROC_GRPC: + Uses Envoy's External Processing (ext_proc) gRPC API over a single + gRPC stream. The backend service must support HTTP/2 or H2C. + All supported events for a client request are sent over the same + gRPC stream. This is the default wire format. + - EXT_AUTHZ_GRPC: + Uses Envoy's external authorization (ext_authz) gRPC API. + The backend service must support HTTP/2 or H2C. + This option is only supported for regional AuthzExtension resources. default_from_api: true enum_values: - 'WIRE_FORMAT_UNSPECIFIED' - 'EXT_PROC_GRPC' + - 'EXT_AUTHZ_GRPC' diff --git a/mmv1/products/networkservices/LbRouteExtension.yaml b/mmv1/products/networkservices/LbRouteExtension.yaml index 150e43702ac1..1a5445a7a078 100644 --- a/mmv1/products/networkservices/LbRouteExtension.yaml +++ b/mmv1/products/networkservices/LbRouteExtension.yaml @@ -18,7 +18,7 @@ description: | references: guides: 'Configure a route extension': 'https://cloud.google.com/service-extensions/docs/configure-callout#configure_a_route_extension' - api: 'https://cloud.google.com/service-extensions/docs/reference/rest/v1beta1/projects.locations.lbRouteExtensions' + api: 'https://cloud.google.com/service-extensions/docs/reference/rest/v1/projects.locations.lbRouteExtensions' docs: base_url: 'projects/{{project}}/locations/{{location}}/lbRouteExtensions' self_link: 'projects/{{project}}/locations/{{location}}/lbRouteExtensions/{{name}}' @@ -38,6 +38,7 @@ async: result: resource_inside_response: false custom_code: + constants: 'templates/terraform/constants/network_services_lb_route_extension.go.tmpl' sweeper: url_substitutions: - region: "us-west1" @@ -66,6 +67,30 @@ examples: ignore_read_extra: - 'port_range' - 'target' + - name: 'network_services_lb_route_extension_observability' + primary_resource_id: 'default' + vars: + ilb_network_name: 'l7-ilb-network' + proxy_subnet_name: 'l7-ilb-proxy-subnet' + backend_subnet_name: 'l7-ilb-subnet' + forwarding_rule_name: 'l7-ilb-forwarding-rule' + target_http_proxy_name: 'l7-ilb-target-http-proxy' + regional_url_map_name: 'l7-ilb-regional-url-map' + backend_service_name: 'l7-ilb-backend-subnet' + mig_template_name: 'l7-ilb-mig-template' + hc_name: 'l7-ilb-hc' + mig_name: 'l7-ilb-mig1' + fw_allow_iap_hc_name: 'l7-ilb-fw-allow-iap-hc' + fw_allow_ilb_to_backends_name: 'l7-ilb-fw-allow-ilb-to-backends' + vm_test_name: 'l7-ilb-test-vm' + lb_route_extension_name: 'l7-ilb-route-ext' + callouts_instance_name: 'l7-ilb-callouts-ins' + callouts_instance_group: 'l7-ilb-callouts-ins-group' + callouts_hc_name: 'l7-ilb-callouts-hc' + callouts_backend_name: 'l7-ilb-callouts-backend' + ignore_read_extra: + - 'port_range' + - 'target' parameters: - name: 'location' type: String @@ -105,6 +130,7 @@ properties: Match conditions for each extension chain are evaluated in sequence for a given request. The first extension chain that has a condition that matches the request is executed. Any subsequent extension chains do not execute. Limited to 5 extension chains per resource. + Further information can be found at https://cloud.google.com/service-extensions/docs/reference/rest/v1/ExtensionChain required: true item_type: type: NestedObject @@ -134,6 +160,7 @@ properties: A set of extensions to execute for the matching request. At least one extension is required. Up to 3 extensions can be defined for each extension chain for LbTrafficExtension resource. LbRouteExtension chains are limited to 1 extension per extension chain. + Further documentation can be found at https://cloud.google.com/service-extensions/docs/reference/rest/v1/ExtensionChain#Extension required: true item_type: type: NestedObject @@ -181,6 +208,47 @@ properties: If omitted, all headers are sent. Each element is a string indicating the header name. item_type: type: String + - name: 'supportedEvents' + type: Array + is_set: true + description: | + A set of events during request or response processing for which this extension is called. + This field is optional for the LbRouteExtension resource. If unspecified, `REQUEST_HEADERS` event is assumed as supported. + Possible values: `REQUEST_HEADERS`, `REQUEST_BODY`, `REQUEST_TRAILERS`. + item_type: + type: String + validation: + function: 'ValidateSupportedEvent' + - name: 'metadata' + type: KeyValuePairs + description: | + The metadata provided here is included as part of the `metadata_context` (of type `google.protobuf.Struct`) + in the `ProcessingRequest` message sent to the extension server. + The metadata is available under the namespace `com.google.lb_route_extension...`. + The following variables are supported in the metadata: `{forwarding_rule_id}` - substituted with the forwarding rule's fully qualified resource name. + This field must not be set for plugin extensions. Setting it results in a validation error. + - name: 'requestBodySendMode' + type: Enum + description: | + Configures the send mode for request body processing. + The field can only be set if `supported_events` includes `REQUEST_BODY`. + If `supported_events` includes `REQUEST_BODY`, but `request_body_send_mode` is unset, the default value `STREAMED` is used. + When this field is set to `FULL_DUPLEX_STREAMED`, `supported_events` must include both `REQUEST_BODY` and `REQUEST_TRAILERS`. + This field can be set only when the `service` field of the extension points to a `BackendService`. + Only `FULL_DUPLEX_STREAMED` mode is supported for `LbRouteExtension` resources. + enum_values: + - 'BODY_SEND_MODE_UNSPECIFIED' + - 'BODY_SEND_MODE_STREAMED' + - 'BODY_SEND_MODE_FULL_DUPLEX_STREAMED' + - name: 'observabilityMode' + type: Boolean + description: | + When set to `TRUE`, enables `observability_mode` on the `ext_proc` filter. + This makes `ext_proc` calls asynchronous. Envoy doesn't check for the response from `ext_proc` calls. + For more information about the filter, see: https://www.envoyproxy.io/docs/envoy/v1.32.3/api-v3/extensions/filters/http/ext_proc/v3/ext_proc.proto + This field is helpful when you want to try out the extension in async log-only mode. + Supported by regional `LbTrafficExtension` and `LbRouteExtension` resources. + Only `STREAMED` (default) body processing mode is supported. - name: 'loadBalancingScheme' type: Enum description: | diff --git a/mmv1/products/networkservices/MulticastConsumerAssociation.yaml b/mmv1/products/networkservices/MulticastConsumerAssociation.yaml index 28fb2e6a21f0..ac8d5f79364a 100644 --- a/mmv1/products/networkservices/MulticastConsumerAssociation.yaml +++ b/mmv1/products/networkservices/MulticastConsumerAssociation.yaml @@ -15,6 +15,10 @@ name: MulticastConsumerAssociation description: Create a multicast consumer association in the specified location of the current project. +references: + guides: + 'Create Multicast Consumer Association': 'https://docs.cloud.google.com/vpc/docs/multicast/enable-consumer-network#add-consumer' + api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastConsumerAssociations' base_url: projects/{{project}}/locations/{{location}}/multicastConsumerAssociations update_mask: true self_link: projects/{{project}}/locations/{{location}}/multicastConsumerAssociations/{{multicast_consumer_association_id}} @@ -82,7 +86,7 @@ properties: type: String description: |- The resource name of the multicast domain activation that is in the - same zone as this multicast producer association. + same zone as this multicast consumer association. Use the following format: // `projects/*/locations/*/multicastDomainActivations/*`. immutable: true diff --git a/mmv1/products/networkservices/MulticastDomain.yaml b/mmv1/products/networkservices/MulticastDomain.yaml index 0587bd4e83c1..74f88959297d 100644 --- a/mmv1/products/networkservices/MulticastDomain.yaml +++ b/mmv1/products/networkservices/MulticastDomain.yaml @@ -14,12 +14,17 @@ --- name: MulticastDomain description: Create a multicast domain in the current project. +references: + guides: + 'Create Multicast Domain': 'https://docs.cloud.google.com/vpc/docs/multicast/create-domains#create-domain' + api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastDomains' base_url: projects/{{project}}/locations/{{location}}/multicastDomains update_mask: true self_link: projects/{{project}}/locations/{{location}}/multicastDomains/{{multicast_domain_id}} create_url: projects/{{project}}/locations/{{location}}/multicastDomains?multicastDomainId={{multicast_domain_id}} +update_verb: PATCH import_format: - projects/{{project}}/locations/{{location}}/multicastDomains/{{multicast_domain_id}} examples: @@ -29,7 +34,6 @@ examples: network_name: test-md-network domain_name: test-md-domain autogen_async: true -immutable: true async: operation: timeouts: @@ -100,7 +104,7 @@ properties: immutable: true - name: createTime type: String - description: '[Output only] The timestamp when the multicast domain was created.' + description: 'The timestamp when the multicast domain was created.' output: true - name: description type: String @@ -125,14 +129,32 @@ properties: - name: uniqueId type: String description: |- - [Output only] The Google-generated UUID for the resource. This value is + The Google-generated UUID for the resource. This value is unique across all multicast domain resources. If a domain is deleted and another with the same name is created, the new domain is assigned a different unique_id. output: true + - name: state + type: NestedObject + description: The multicast resource's state. + output: true + properties: + - name: state + type: String + description: |- + The state of the multicast resource. + Possible values: + CREATING + ACTIVE + DELETING + DELETE_FAILED + UPDATING + UPDATE_FAILED + INACTIVE + output: true - name: updateTime type: String description: |- - [Output only] The timestamp when the multicast domain was most recently + The timestamp when the multicast domain was most recently updated. output: true diff --git a/mmv1/products/networkservices/MulticastDomainGroup.yaml b/mmv1/products/networkservices/MulticastDomainGroup.yaml index 0473458b1b8c..d4ea7e370090 100644 --- a/mmv1/products/networkservices/MulticastDomainGroup.yaml +++ b/mmv1/products/networkservices/MulticastDomainGroup.yaml @@ -14,6 +14,10 @@ --- name: MulticastDomainGroup description: Create a multicast domain group in the current project. +references: + guides: + 'Create Multicast Domain Group': 'https://docs.cloud.google.com/vpc/docs/multicast/create-domains#create-domain-group' + api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastDomainGroups' base_url: projects/{{project}}/locations/{{location}}/multicastDomainGroups update_mask: true self_link: @@ -72,7 +76,7 @@ parameters: properties: - name: createTime type: String - description: '[Output only] The timestamp when the multicast domain group was created.' + description: 'The timestamp when the multicast domain group was created.' output: true - name: description type: String @@ -83,7 +87,7 @@ properties: - name: multicastDomains type: Array description: |- - [Output only] Multicast domains associated with the group. + Multicast domains associated with the group. There can be at most 2 multicast domains in a group. output: true item_type: @@ -112,10 +116,11 @@ properties: UPDATING UPDATE_FAILED INACTIVE + output: true - name: uniqueId type: String description: |- - [Output only] The Google-generated UUID for the resource. This value is + The Google-generated UUID for the resource. This value is unique across all multicast domain group resources. If a domain is deleted and another with the same name is created, the new domain is assigned a different unique_id. @@ -123,6 +128,6 @@ properties: - name: updateTime type: String description: |- - [Output only] The timestamp when the multicast domain group was most + The timestamp when the multicast domain group was most recently updated. output: true diff --git a/mmv1/products/networkservices/MulticastGroupConsumerActivation.yaml b/mmv1/products/networkservices/MulticastGroupConsumerActivation.yaml new file mode 100644 index 000000000000..d0c5d3b742d8 --- /dev/null +++ b/mmv1/products/networkservices/MulticastGroupConsumerActivation.yaml @@ -0,0 +1,152 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: MulticastGroupConsumerActivation +description: Create a multicast group consumer activation in the specified + location of the current project. +references: + guides: + 'Create Multicast Group Consumer Activation': 'https://docs.cloud.google.com/vpc/docs/multicast/enable-consumer-network#activate-consumer' + api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastGroupConsumerActivations' +base_url: projects/{{project}}/locations/{{location}}/multicastGroupConsumerActivations +update_mask: true +self_link: projects/{{project}}/locations/{{location}}/multicastGroupConsumerActivations/{{multicast_group_consumer_activation_id}} +create_url: projects/{{project}}/locations/{{location}}/multicastGroupConsumerActivations?multicastGroupConsumerActivationId={{multicast_group_consumer_activation_id}} +update_verb: PATCH +id_format: projects/{{project}}/locations/{{location}}/multicastGroupConsumerActivations/{{multicast_group_consumer_activation_id}} +import_format: + - projects/{{project}}/locations/{{location}}/multicastGroupConsumerActivations/{{multicast_group_consumer_activation_id}} +examples: + - name: network_services_multicast_group_consumer_activation_basic + primary_resource_id: mgca_test + vars: + network_name: test-network-mgca + domain_name: test-domain-mgca + domain_activation_name: test-domain-activation-mgca + consumer_association_name: test-consumer-association-mgca + internal_range_name: test-internal-range-mgca + group_range_name: test-group-range-mgca + group_range_activation_name: test-mgra-mgca + group_consumer_activation_name: test-mgca-mgca +autogen_async: true +async: + operation: + timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 + base_url: '{{op_id}}' + actions: + - create + - delete + - update + type: OpAsync + result: + resource_inside_response: true + include_project: false +autogen_status: TXVsdGljYXN0R3JvdXBDb25zdW1lckFjdGl2YXRpb24= +parameters: + - name: location + type: String + description: Resource ID segment making up resource `name`. It identifies the resource within its parent collection as described in https://google.aip.dev/122. + immutable: true + url_param_only: true + required: true + - name: multicastGroupConsumerActivationId + type: String + description: |- + A unique name for the multicast group consumer activation. + The name is restricted to letters, numbers, and hyphen, with the first + character a letter, and the last a letter or a number. The name must not + exceed 48 characters. + immutable: true + url_param_only: true + required: true +properties: + - name: createTime + type: String + description: |- + The timestamp when the multicast group consumer activation + was created. + output: true + - name: description + type: String + description: An optional text description of the multicast group consumer activation. + - name: labels + type: KeyValueLabels + description: Labels as key-value pairs + - name: logConfig + type: NestedObject + description: The logging configuration. + properties: + - name: enabled + type: Boolean + description: Whether to enable logging or not. + - name: multicastConsumerAssociation + type: String + description: |- + The resource name of the multicast consumer association that is in the + same zone as this multicast group consumer activation. + Use the following format: + `projects/*/locations/*/multicastConsumerAssociations/*`. + immutable: true + required: true + - name: multicastGroupRangeActivation + type: String + description: |- + The resource name of the multicast group range activation created by the + admin in the same zone as this multicast group consumer activation. Use the + following format: + // `projects/*/locations/*/multicastGroupRangeActivations/*`. + immutable: true + required: true + - name: name + type: String + description: |- + Identifier. The resource name of the multicast group consumer activation. + Use the following format: + `projects/*/locations/*/multicastGroupConsumerActivations/*`. + output: true + - name: state + type: NestedObject + description: The multicast resource's state. + output: true + properties: + - name: state + type: String + description: |- + The state of the multicast resource. + Possible values: + CREATING + ACTIVE + DELETING + DELETE_FAILED + UPDATING + UPDATE_FAILED + INACTIVE + output: true + - name: uniqueId + type: String + description: |- + The Google-generated UUID for the resource. This value is + unique across all multicast group consumer activation resources. If a group + consumer activation is deleted and another with the same name is created, + the new group consumer activation is assigned a different unique_id. + output: true + - name: updateTime + type: String + description: |- + The timestamp when the multicast group consumer activation + was most recently updated. + output: true diff --git a/mmv1/products/networkservices/MulticastGroupProducerActivation.yaml b/mmv1/products/networkservices/MulticastGroupProducerActivation.yaml new file mode 100644 index 000000000000..bceb46b58224 --- /dev/null +++ b/mmv1/products/networkservices/MulticastGroupProducerActivation.yaml @@ -0,0 +1,144 @@ +# Copyright 2025 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +name: MulticastGroupProducerActivation +description: Create a multicast group producer activation in the specified + location of the current project. +references: + guides: + 'Create Multicast Group Producer Activation': 'https://docs.cloud.google.com/vpc/docs/multicast/enable-producer-network#activate-producer' + api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastGroupProducerActivations' +base_url: projects/{{project}}/locations/{{location}}/multicastGroupProducerActivations +update_mask: true +self_link: projects/{{project}}/locations/{{location}}/multicastGroupProducerActivations/{{multicast_group_producer_activation_id}} +create_url: projects/{{project}}/locations/{{location}}/multicastGroupProducerActivations?multicastGroupProducerActivationId={{multicast_group_producer_activation_id}} +update_verb: PATCH +id_format: projects/{{project}}/locations/{{location}}/multicastGroupProducerActivations/{{multicast_group_producer_activation_id}} +import_format: + - projects/{{project}}/locations/{{location}}/multicastGroupProducerActivations/{{multicast_group_producer_activation_id}} +examples: + - name: network_services_multicast_group_producer_activation_basic + primary_resource_id: mgpa_test + vars: + network_name: test-network-mgpa + domain_name: test-domain-mgpa + domain_activation_name: test-domain-activation-mgpa + producer_association_name: test-producer-association-mgpa + internal_range_name: test-internal-range-mgpa + group_range_name: test-group-range-mgpa + group_range_activation_name: test-mgra-mgpa + group_producer_activation_name: test-mgpa-mgpa +autogen_async: true +async: + operation: + timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 + base_url: '{{op_id}}' + actions: + - create + - delete + - update + type: OpAsync + result: + resource_inside_response: true + include_project: false +autogen_status: TXVsdGljYXN0R3JvdXBQcm9kdWNlckFjdGl2YXRpb24= +parameters: + - name: location + type: String + description: Resource ID segment making up resource `name`. It identifies the resource within its parent collection as described in https://google.aip.dev/122. + immutable: true + url_param_only: true + required: true + - name: multicastGroupProducerActivationId + type: String + description: |- + A unique name for the multicast group producer activation. + The name is restricted to letters, numbers, and hyphen, with the first + character a letter, and the last a letter or a number. The name must not + exceed 48 characters. + immutable: true + url_param_only: true + required: true +properties: + - name: createTime + type: String + description: |- + The timestamp when the multicast group producer activation was created. + output: true + - name: description + type: String + description: An optional text description of the multicast group producer activation. + - name: labels + type: KeyValueLabels + description: Labels as key-value pairs + - name: multicastGroupRangeActivation + type: String + description: |- + The resource name of the multicast group range activationcreated by the + admin in the same zone as this multicast group producer activation. Use the + following format: + // `projects/*/locations/*/multicastGroupRangeActivations/*`. + immutable: true + required: true + - name: multicastProducerAssociation + type: String + description: |- + The resource name of the multicast producer association that is in the + same zone as this multicast group producer activation. + Use the following format: + `projects/*/locations/*/multicastProducerAssociations/*`. + immutable: true + required: true + - name: name + type: String + description: |- + Identifier. The resource name of the multicast group producer activation. + Use the following format: + `projects/*/locations/*/multicastGroupProducerActivations/*`. + output: true + - name: state + type: NestedObject + description: The multicast resource's state. + output: true + properties: + - name: state + type: String + description: |- + The state of the multicast resource. + Possible values: + CREATING + ACTIVE + DELETING + DELETE_FAILED + UPDATING + UPDATE_FAILED + INACTIVE + output: true + - name: uniqueId + type: String + description: |- + The Google-generated UUID for the resource. This value is + unique across all multicast group producer activation resources. If a group + producer activation is deleted and another with the same name is created, + the new group producer activation is assigned a different unique_id. + output: true + - name: updateTime + type: String + description: |- + The timestamp when the multicast group producer activation + was most recently updated. + output: true diff --git a/mmv1/products/networkservices/MulticastGroupRange.yaml b/mmv1/products/networkservices/MulticastGroupRange.yaml index d41e2b245cf9..164d63e0d6df 100644 --- a/mmv1/products/networkservices/MulticastGroupRange.yaml +++ b/mmv1/products/networkservices/MulticastGroupRange.yaml @@ -14,6 +14,10 @@ --- name: MulticastGroupRange description: Create a multicast group range in the current project. +references: + guides: + 'Create Multicast Group Range': 'https://docs.cloud.google.com/vpc/docs/multicast/create-group-ranges#create_a_group_range' + api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastGroupRanges' base_url: projects/{{project}}/locations/{{location}}/multicastGroupRanges update_mask: true self_link: projects/{{project}}/locations/{{location}}/multicastGroupRanges/{{multicast_group_range_id}} diff --git a/mmv1/products/networkservices/MulticastGroupRangeActivation.yaml b/mmv1/products/networkservices/MulticastGroupRangeActivation.yaml index 5173af884cbc..151de2046543 100644 --- a/mmv1/products/networkservices/MulticastGroupRangeActivation.yaml +++ b/mmv1/products/networkservices/MulticastGroupRangeActivation.yaml @@ -17,7 +17,7 @@ description: Create a multicast group range activation in the specified location of the current project. references: guides: - 'Create Multicast Producer Association': 'https://docs.cloud.google.com/vpc/docs/multicast/create-group-ranges#activate_the_group_range' + 'Create Multicast Group Range Activation': 'https://docs.cloud.google.com/vpc/docs/multicast/create-group-ranges#activate_the_group_range' api: 'https://docs.cloud.google.com/vpc/docs/multicast/reference/rest/v1/projects.locations.multicastGroupRangeActivations' base_url: projects/{{project}}/locations/{{location}}/multicastGroupRangeActivations update_mask: true diff --git a/mmv1/products/privateca/CaPool.yaml b/mmv1/products/privateca/CaPool.yaml index db4c21a95177..5bca8026e3e6 100644 --- a/mmv1/products/privateca/CaPool.yaml +++ b/mmv1/products/privateca/CaPool.yaml @@ -48,6 +48,7 @@ iam_policy: iam_conditions_request_type: 'QUERY_PARAM_NESTED' example_config_body: 'templates/terraform/iam/example_config_body/privateca_ca_pool.tf.tmpl' custom_code: + tgc_decoder: 'templates/tgc_next/decoders/privateca_capool.go.tmpl' examples: - name: 'privateca_capool_basic' primary_resource_id: 'default' @@ -70,6 +71,8 @@ examples: my_certificate: 'my-certificate' exclude_test: true exclude_docs: true +include_in_tgc_next: true +tgc_include_handwritten_tests: true parameters: - name: 'location' type: String diff --git a/mmv1/products/pubsub/Subscription.yaml b/mmv1/products/pubsub/Subscription.yaml index 874aa4be4a88..2f496d6eabe8 100644 --- a/mmv1/products/pubsub/Subscription.yaml +++ b/mmv1/products/pubsub/Subscription.yaml @@ -30,6 +30,7 @@ update_url: 'projects/{{project}}/subscriptions/{{name}}' update_verb: 'PATCH' update_mask: true include_in_tgc_next: true +tgc_include_handwritten_tests: true timeouts: insert_minutes: 20 update_minutes: 20 @@ -198,6 +199,7 @@ properties: description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. + is_missing_in_cai: true - name: 'dropUnknownFields' type: Boolean description: | @@ -276,6 +278,7 @@ properties: send_empty_value: true description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. + is_missing_in_cai: true - name: 'useTopicSchema' type: Boolean send_empty_value: true @@ -370,6 +373,7 @@ properties: Pub/Sub message attributes to `:` headers of the HTTP request. required: true send_empty_value: true + is_missing_in_cai: true - name: 'ackDeadlineSeconds' type: Integer description: | @@ -436,6 +440,7 @@ properties: Example - "3.5s". required: true diff_suppress_func: 'comparePubsubSubscriptionExpirationPolicy' + is_missing_in_cai: true - name: 'filter' type: String description: | diff --git a/mmv1/products/pubsub/Topic.yaml b/mmv1/products/pubsub/Topic.yaml index e56f46553c2c..757ded85b294 100644 --- a/mmv1/products/pubsub/Topic.yaml +++ b/mmv1/products/pubsub/Topic.yaml @@ -55,6 +55,7 @@ tgc_ignore_terraform_encoder: true error_retry_predicates: - 'transport_tpg.PubsubTopicProjectNotReady' include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'pubsub_topic_basic' primary_resource_id: 'example' diff --git a/mmv1/products/redis/Cluster.yaml b/mmv1/products/redis/Cluster.yaml index 925b69de454c..14076e8adda7 100644 --- a/mmv1/products/redis/Cluster.yaml +++ b/mmv1/products/redis/Cluster.yaml @@ -128,6 +128,18 @@ sweeper: - region: "us-east1" - region: "europe-west1" examples: + - name: 'redis_cluster_ha_with_labels' + primary_resource_id: 'cluster-ha-with-labels' + vars: + cluster_name: 'ha-cluster' + policy_name: 'my-policy' + subnet_name: 'my-subnet' + network_name: 'my-network' + deletion_protection_enabled: 'true' + test_vars_overrides: + 'deletion_protection_enabled': 'false' + oics_vars_overrides: + 'deletion_protection_enabled': 'false' - name: 'redis_cluster_ha' primary_resource_id: 'cluster-ha' vars: @@ -232,6 +244,9 @@ parameters: ignore_read: true default_from_api: true properties: + - name: 'labels' + type: KeyValueLabels + description: Resource labels to represent user provided metadata. - name: 'createTime' type: Time description: | @@ -355,6 +370,7 @@ properties: If not provided, REDIS_HIGHMEM_MEDIUM will be used as default required: false default_from_api: true + is_missing_in_cai: true enum_values: - 'REDIS_SHARED_CORE_NANO' - 'REDIS_HIGHMEM_MEDIUM' @@ -508,6 +524,7 @@ properties: type: NestedObject description: Persistence config (RDB, AOF) for the cluster. default_from_api: true + is_missing_in_cai: true properties: - name: 'mode' type: Enum diff --git a/mmv1/products/secretmanager/Secret.yaml b/mmv1/products/secretmanager/Secret.yaml index 8e15e746422f..2be80e1c64ac 100644 --- a/mmv1/products/secretmanager/Secret.yaml +++ b/mmv1/products/secretmanager/Secret.yaml @@ -39,6 +39,7 @@ iam_policy: parent_resource_attribute: 'secret_id' iam_conditions_request_type: 'QUERY_PARAM_NESTED' include_in_tgc_next: true +tgc_include_handwritten_tests: true custom_code: constants: 'templates/terraform/constants/secret_manager_secret.go.tmpl' pre_update: 'templates/terraform/pre_update/secret_manager_secret.go.tmpl' diff --git a/mmv1/products/secretmanagerregional/RegionalSecret.yaml b/mmv1/products/secretmanagerregional/RegionalSecret.yaml index a914575581f7..a16b189a004b 100644 --- a/mmv1/products/secretmanagerregional/RegionalSecret.yaml +++ b/mmv1/products/secretmanagerregional/RegionalSecret.yaml @@ -45,6 +45,7 @@ custom_code: pre_update: 'templates/terraform/pre_update/secret_manager_regional_secret.go.tmpl' pre_delete: 'templates/terraform/pre_delete/regional_secret.go.tmpl' include_in_tgc_next: true +tgc_include_handwritten_tests: true examples: - name: 'regional_secret_config_basic' primary_resource_id: 'regional-secret-basic' diff --git a/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml b/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml index 844919651f93..980e165dfdd4 100644 --- a/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml +++ b/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml @@ -35,7 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 custom_code: - custom_import: 'templates/terraform/custom_import/self_link_as_name_set_project.go.tmpl' + custom_import: 'templates/terraform/custom_import/self_link_as_name_set_project_location_and_config_id.go.tmpl' sweeper: url_substitutions: - region: "global" @@ -48,7 +48,6 @@ examples: test_env_vars: project: 'PROJECT_NAME' ignore_read_extra: - - 'location' - 'project' parameters: - name: 'configId' @@ -61,7 +60,7 @@ parameters: - name: 'location' type: String description: | - Location ID of the parent organization. Only global is supported at the moment. + Location ID for the parent project. Defaults to `global` if location is not provided. url_param_only: true required: false immutable: true diff --git a/mmv1/products/sourcerepo/Repository.yaml b/mmv1/products/sourcerepo/Repository.yaml index 89e03ae6e8ce..90b64ef1f5bd 100644 --- a/mmv1/products/sourcerepo/Repository.yaml +++ b/mmv1/products/sourcerepo/Repository.yaml @@ -39,6 +39,7 @@ iam_policy: method_name_separator: ':' parent_resource_attribute: 'repository' custom_diff_suppress: 'templates/terraform/iam/sourcerepo_diff_suppress.go.tmpl' + custom_import_state_id_funcs: 'templates/terraform/iam/sourcerepo_state_id_funcs.go.tmpl' custom_code: constants: 'templates/terraform/constants/source_repo_repository.go.tmpl' update_encoder: 'templates/terraform/update_encoder/source_repo_repository.tmpl' diff --git a/mmv1/products/spanner/Instance.yaml b/mmv1/products/spanner/Instance.yaml index 6338b260ad21..247608b7ec8e 100644 --- a/mmv1/products/spanner/Instance.yaml +++ b/mmv1/products/spanner/Instance.yaml @@ -30,6 +30,7 @@ import_format: - '{{name}}' cai_asset_name_format: 'projects/{{project}}/instances/{{name}}' include_in_tgc_next: true +tgc_include_handwritten_tests: true tgc_ignore_terraform_encoder: true tgc_ignore_terraform_decoder: true timeouts: @@ -131,6 +132,7 @@ properties: The number of processing units allocated to this instance. Exactly one of either num_nodes, processing_units or autoscaling_config must be present in terraform except when instance_type = FREE_INSTANCE. default_from_api: true + is_missing_in_cai: true at_least_one_of: - 'num_nodes' - 'processing_units' @@ -179,6 +181,7 @@ properties: only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit. + is_missing_in_cai: true properties: - name: 'minProcessingUnits' type: Integer @@ -235,7 +238,14 @@ properties: Specifies the target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). - + - name: 'totalCpuUtilizationPercent' + type: Integer + description: | + The target total cpu utilization percentage that the autoscaler should be trying to achieve for the instance. + This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + If not specified or set to 0, the autoscaler will skip scaling based on total cpu utilization. + The value should be higher than high_priority_cpu_utilization_percent if present. + is_missing_in_cai: true - name: 'asymmetricAutoscalingOptions' type: Array description: | diff --git a/mmv1/products/storageinsights/DatasetConfig.yaml b/mmv1/products/storageinsights/DatasetConfig.yaml index 382395fc32dd..775505e0b7fc 100644 --- a/mmv1/products/storageinsights/DatasetConfig.yaml +++ b/mmv1/products/storageinsights/DatasetConfig.yaml @@ -119,6 +119,11 @@ properties: description: | Number of days of history that must be retained. required: true + - name: 'activityDataRetentionPeriodDays' + type: Integer + default_from_api: true + description: | + Number of days of activity data that must be retained. If not specified, retentionPeriodDays will be used. Set to 0 to turn off the activity data. - name: 'link' type: NestedObject description: | diff --git a/mmv1/products/vertexai/FeatureOnlineStore.yaml b/mmv1/products/vertexai/FeatureOnlineStore.yaml index a4ff4a9f19a4..6846ae986597 100644 --- a/mmv1/products/vertexai/FeatureOnlineStore.yaml +++ b/mmv1/products/vertexai/FeatureOnlineStore.yaml @@ -119,6 +119,9 @@ properties: - 'bigtable' - 'optimized' properties: + - name: 'enableDirectBigtableAccess' + type: Boolean + description: 'Optional. If true, enable direct access to the Bigtable instance.' - name: 'autoScaling' type: NestedObject description: Autoscaling config applied to Bigtable Instance. @@ -139,6 +142,10 @@ properties: When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%. default_from_api: true + - name: 'zone' + type: String + description: The zone where the Bigtable instance will be created. + default_from_api: true - name: 'optimized' type: NestedObject description: @@ -152,7 +159,7 @@ properties: - 'bigtable' - 'optimized' properties: - # Meant to be an empty object with no properties - see here : https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.featureOnlineStores#Optimized + # Meant to be an empty object with no properties - see here : https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.featureOnlineStores#Optimized [] - name: 'dedicatedServingEndpoint' type: NestedObject @@ -201,3 +208,13 @@ properties: description: | Enable embedding management. immutable: true + - name: 'encryptionSpec' + type: NestedObject + description: | + If set, both of the online and offline data storage will be secured by this key. + properties: + - name: 'kmsKeyName' + type: String + description: | + The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created. + required: true diff --git a/mmv1/products/vertexai/IndexEndpoint.yaml b/mmv1/products/vertexai/IndexEndpoint.yaml index 909fad0341fa..12dd7cbb4e54 100644 --- a/mmv1/products/vertexai/IndexEndpoint.yaml +++ b/mmv1/products/vertexai/IndexEndpoint.yaml @@ -131,6 +131,20 @@ properties: immutable: true item_type: type: String + - name: 'pscAutomationConfigs' + type: Array + description: List of projects and networks where the PSC endpoints will be created. This field is used by Online Inference(Prediction) only. + item_type: + type: NestedObject + properties: + - name: 'projectId' + type: String + required: true + description: Project id used to create forwarding rule. + - name: 'network' + type: String + required: true + description: 'The full name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/get): projects/{project}/global/networks/{network}.' - name: 'publicEndpointEnabled' type: Boolean description: If true, the deployed index will be accessible through public endpoint. diff --git a/mmv1/products/vertexai/ReasoningEngine.yaml b/mmv1/products/vertexai/ReasoningEngine.yaml index 783addf587a2..a2935a26dfdc 100644 --- a/mmv1/products/vertexai/ReasoningEngine.yaml +++ b/mmv1/products/vertexai/ReasoningEngine.yaml @@ -44,7 +44,6 @@ examples: name: 'reasoning-engine' - name: 'vertex_ai_reasoning_engine_source_based_deployment' primary_resource_id: 'reasoning_engine' - exclude_docs: true vars: name: 'reasoning-engine' - name: 'vertex_ai_reasoning_engine_full' @@ -217,16 +216,16 @@ properties: - name: 'minInstances' type: 'Integer' description: | - Optional. The maximum number of application instances that can be - launched to handle increased traffic. Defaults to 100. - Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable - range is [1, 100]. + Optional. The minimum number of application instances that will be + kept running at all times. Defaults to 1. Range: [0, 10]. default_from_api: true - name: 'maxInstances' type: 'Integer' description: | - Optional. The minimum number of application instances that will be - kept running at all times. Defaults to 1. Range: [0, 10]. + Optional. The maximum number of application instances that can be + launched to handle increased traffic. Defaults to 100. + Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable + range is [1, 100]. default_from_api: true - name: 'containerConcurrency' type: 'Integer' diff --git a/mmv1/products/vmwareengine/NetworkPeering.yaml b/mmv1/products/vmwareengine/NetworkPeering.yaml index 099d68aac473..2db92cefae45 100644 --- a/mmv1/products/vmwareengine/NetworkPeering.yaml +++ b/mmv1/products/vmwareengine/NetworkPeering.yaml @@ -26,6 +26,7 @@ update_verb: 'PATCH' import_format: - 'projects/{{project}}/locations/global/networkPeerings/{{name}}' include_in_tgc_next: true +tgc_include_handwritten_tests: true timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/vpcaccess/Connector.yaml b/mmv1/products/vpcaccess/Connector.yaml index edaf7890a72a..e82613e61613 100644 --- a/mmv1/products/vpcaccess/Connector.yaml +++ b/mmv1/products/vpcaccess/Connector.yaml @@ -65,6 +65,9 @@ examples: network_name: 'default' test_vars_overrides: 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-access-connector")' +tgc_tests: + - name: TestAccVPCAccessConnector_vpcAccessConnectorThroughput_usingThroughputOrInstancesLimits + skip: 'For instances vs throughput, prefer instances during cai2hcl conversion as throughput is discouraged.' parameters: - name: 'region' type: String diff --git a/mmv1/products/workbench/Instance.yaml b/mmv1/products/workbench/Instance.yaml index b40817be340f..c25cd25e2ee4 100644 --- a/mmv1/products/workbench/Instance.yaml +++ b/mmv1/products/workbench/Instance.yaml @@ -28,6 +28,7 @@ update_mask: true import_format: - 'projects/{{project}}/locations/{{location}}/instances/{{name}}' include_in_tgc_next: true +tgc_include_handwritten_tests: true timeouts: insert_minutes: 40 update_minutes: 20 diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 71937dd32400..d1203b545488 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -17,18 +17,22 @@ import ( "bytes" "fmt" "go/format" + "io/fs" "os" "path/filepath" "text/template" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" + "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/metadata" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "github.com/golang/glog" + "gopkg.in/yaml.v3" ) type TemplateData struct { OutputFolder string VersionName string + templateFS fs.FS // TODO rewrite: is this needed? // # Information about the local environment @@ -41,8 +45,8 @@ var BETA_VERSION = "beta" var ALPHA_VERSION = "alpha" var PRIVATE_VERSION = "private" -func NewTemplateData(outputFolder string, versionName string) *TemplateData { - td := TemplateData{OutputFolder: outputFolder, VersionName: versionName} +func NewTemplateData(outputFolder string, versionName string, templateFS fs.FS) *TemplateData { + td := TemplateData{OutputFolder: outputFolder, VersionName: versionName, templateFS: templateFS} return &td } @@ -73,11 +77,15 @@ func (td *TemplateData) GenerateFWResourceFile(filePath string, resource api.Res } func (td *TemplateData) GenerateMetadataFile(filePath string, resource api.Resource) { - templatePath := "templates/terraform/metadata.yaml.tmpl" - templates := []string{ - templatePath, + metadata := metadata.FromResource(resource) + bytes, err := yaml.Marshal(metadata) + if err != nil { + glog.Exit("error marshalling yaml %v: %v", filePath) + } + err = os.WriteFile(filePath, bytes, 0644) + if err != nil { + glog.Exit(err) } - td.GenerateFile(filePath, templatePath, resource, false, templates...) } func (td *TemplateData) GenerateDataSourceFile(filePath string, resource api.Resource) { @@ -319,11 +327,11 @@ func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, g funcMap := template.FuncMap{ "TemplatePath": func() string { return templatePath }, } - for k, v := range google.TemplateFunctions { + for k, v := range google.TemplateFunctions(td.templateFS) { funcMap[k] = v } - tmpl, err := template.New(templateFileName).Funcs(funcMap).ParseFiles(templates...) + tmpl, err := template.New(templateFileName).Funcs(funcMap).ParseFS(td.templateFS, templates...) if err != nil { glog.Exit(fmt.Sprintf("error parsing %s for filepath %s ", templateFileName, filePath), err) } diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index fe855de691e0..af3d58946fe9 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -49,9 +49,11 @@ type Terraform struct { Product *api.Product StartTime time.Time + + templateFS fs.FS } -func NewTerraform(product *api.Product, versionName string, startTime time.Time) Terraform { +func NewTerraform(product *api.Product, versionName string, startTime time.Time, templateFS fs.FS) Terraform { t := Terraform{ ResourceCount: 0, IAMResourceCount: 0, @@ -59,6 +61,7 @@ func NewTerraform(product *api.Product, versionName string, startTime time.Time) TargetVersionName: versionName, Version: *product.VersionObjOrClosest(versionName), StartTime: startTime, + templateFS: templateFS, } t.Product.SetPropertiesBasedOnVersion(&t.Version) @@ -98,7 +101,7 @@ func (t *Terraform) GenerateObjects(outputFolder, resourceToGenerate string, gen } func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPath string, generateCode, generateDocs bool) { - templateData := NewTemplateData(outputFolder, t.TargetVersionName) + templateData := NewTemplateData(outputFolder, t.TargetVersionName, t.templateFS) if !object.IsExcluded() { log.Printf("Generating %s resource", object.Name) @@ -302,7 +305,7 @@ func (t *Terraform) GenerateProduct(outputFolder string) { } targetFilePath := path.Join(targetFolder, "product.go") - templateData := NewTemplateData(outputFolder, t.TargetVersionName) + templateData := NewTemplateData(outputFolder, t.TargetVersionName, t.templateFS) templateData.GenerateProductFile(targetFilePath, *t.Product) } @@ -320,7 +323,7 @@ func (t *Terraform) GenerateOperation(outputFolder string) { log.Println(fmt.Errorf("error creating parent directory %v: %v", targetFolder, err)) } targetFilePath := path.Join(targetFolder, fmt.Sprintf("%s_operation.go", google.Underscore(t.Product.Name))) - templateData := NewTemplateData(outputFolder, t.TargetVersionName) + templateData := NewTemplateData(outputFolder, t.TargetVersionName, t.templateFS) templateData.GenerateOperationFile(targetFilePath, *asyncObjects[0]) } @@ -510,7 +513,10 @@ func (t Terraform) getCommonCopyFiles(versionName string, generateCode, generate func (t Terraform) getCopyFilesInFolder(folderPath, targetDir string) map[string]string { m := make(map[string]string, 0) - filepath.WalkDir(folderPath, func(path string, di fs.DirEntry, err error) error { + fs.WalkDir(t.templateFS, folderPath, func(path string, di fs.DirEntry, err error) error { + if err != nil { + return err + } if !di.IsDir() && !strings.HasSuffix(di.Name(), ".tmpl") && !strings.HasSuffix(di.Name(), ".erb") { // Exception files if di.Name() == "gha-branch-renaming.png" || di.Name() == "clock-timings-of-branch-making-and-usage.png" { return nil @@ -543,7 +549,7 @@ func (t Terraform) CopyFileList(outputFolder string, files map[string]string, ge log.Fatalf("%s was already modified during this run at %s", targetFile, info.ModTime().String()) } - sourceByte, err := os.ReadFile(source) + sourceByte, err := fs.ReadFile(t.templateFS, source) if err != nil { log.Fatalf("Cannot read source file %s while copying: %s", source, err) } @@ -577,7 +583,7 @@ func (t Terraform) CopyFileList(outputFolder string, files map[string]string, ge func (t Terraform) CompileCommonFiles(outputFolder string, products []*api.Product, overridePath string) { t.generateResourcesForVersion(products) files := t.getCommonCompileFiles(t.TargetVersionName) - templateData := NewTemplateData(outputFolder, t.TargetVersionName) + templateData := NewTemplateData(outputFolder, t.TargetVersionName, t.templateFS) t.CompileFileList(outputFolder, files, *templateData, products) } @@ -621,7 +627,10 @@ func (t Terraform) getCommonCompileFiles(versionName string) map[string]string { func (t Terraform) getCompileFilesInFolder(folderPath, targetDir string) map[string]string { m := make(map[string]string, 0) - filepath.WalkDir(folderPath, func(path string, di fs.DirEntry, err error) error { + fs.WalkDir(t.templateFS, folderPath, func(path string, di fs.DirEntry, err error) error { + if err != nil { + return err + } if !di.IsDir() && strings.HasSuffix(di.Name(), ".tmpl") { fname := strings.TrimPrefix(path, "third_party/terraform/") fname = strings.TrimSuffix(fname, ".tmpl") diff --git a/mmv1/provider/terraform_oics.go b/mmv1/provider/terraform_oics.go index 176798e2b24d..801d53375c2a 100644 --- a/mmv1/provider/terraform_oics.go +++ b/mmv1/provider/terraform_oics.go @@ -17,6 +17,7 @@ package provider import ( "fmt" + "io/fs" "log" "os" "path" @@ -34,14 +35,17 @@ type TerraformOiCS struct { Product *api.Product StartTime time.Time + + templateFS fs.FS } -func NewTerraformOiCS(product *api.Product, versionName string, startTime time.Time) TerraformOiCS { +func NewTerraformOiCS(product *api.Product, versionName string, startTime time.Time, templateFS fs.FS) TerraformOiCS { toics := TerraformOiCS{ Product: product, TargetVersionName: versionName, Version: *product.VersionObjOrClosest(versionName), StartTime: startTime, + templateFS: templateFS, } toics.Product.SetPropertiesBasedOnVersion(&toics.Version) @@ -67,7 +71,7 @@ func (toics TerraformOiCS) GenerateObjects(outputFolder, resourceToGenerate stri } func (toics TerraformOiCS) GenerateObject(object api.Resource, outputFolder, resourceToGenerate string, generateCode, generateDocs bool) { - templateData := NewTemplateData(outputFolder, toics.TargetVersionName) + templateData := NewTemplateData(outputFolder, toics.TargetVersionName, toics.templateFS) if !object.IsExcluded() { log.Printf("Generating %s resource", object.Name) @@ -85,7 +89,7 @@ func (toics TerraformOiCS) GenerateResourceLegacy(object api.Resource, templateD continue } - example.SetOiCSHCLText() + example.SetOiCSHCLText(toics.templateFS) targetFolder := path.Join(outputFolder, example.Name) @@ -138,7 +142,7 @@ func (toics TerraformOiCS) GenerateResource(object api.Resource, templateData Te continue } - step.SetOiCSHCLText() + step.SetOiCSHCLText(toics.templateFS) targetFolder := path.Join(outputFolder, step.Name) diff --git a/mmv1/provider/terraform_tgc.go b/mmv1/provider/terraform_tgc.go index 9e8f22e7fbc0..9ded8d9b4c6a 100644 --- a/mmv1/provider/terraform_tgc.go +++ b/mmv1/provider/terraform_tgc.go @@ -19,7 +19,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io/fs" "log" "os" "path" @@ -49,14 +49,17 @@ type TerraformGoogleConversion struct { Product *api.Product StartTime time.Time + + templateFS fs.FS } -func NewTerraformGoogleConversion(product *api.Product, versionName string, startTime time.Time) TerraformGoogleConversion { +func NewTerraformGoogleConversion(product *api.Product, versionName string, startTime time.Time, templateFS fs.FS) TerraformGoogleConversion { t := TerraformGoogleConversion{ Product: product, TargetVersionName: versionName, Version: *product.VersionObjOrClosest(versionName), StartTime: startTime, + templateFS: templateFS, } t.Product.SetPropertiesBasedOnVersion(&t.Version) @@ -98,7 +101,7 @@ func (tgc TerraformGoogleConversion) GenerateObject(object api.Resource, outputF return } - templateData := NewTemplateData(outputFolder, tgc.TargetVersionName) + templateData := NewTemplateData(outputFolder, tgc.TargetVersionName, tgc.templateFS) if !object.IsExcluded() { tgc.GenerateResource(object, *templateData, outputFolder, generateCode, generateDocs) @@ -186,21 +189,21 @@ func (tgc TerraformGoogleConversion) CompileCommonFiles(outputFolder string, pro log.Printf("Compiling common files for tgc.") tgc.generateCaiIamResources(products) - tgc.NonDefinedTests = retrieveFullManifestOfNonDefinedTests() + tgc.NonDefinedTests = retrieveFullManifestOfNonDefinedTests(tgc.templateFS) - files := retrieveFullListOfTestFiles() + files := retrieveFullListOfTestFiles(tgc.templateFS) for _, file := range files { tgc.Tests = append(tgc.Tests, strings.Split(file, ".")[0]) } tgc.Tests = slices.Compact(tgc.Tests) testSource := make(map[string]string) - for target, source := range retrieveTestSourceCodeWithLocation(".tmpl") { + for target, source := range retrieveTestSourceCodeWithLocation(tgc.templateFS, ".tmpl") { target := strings.Replace(target, "go.tmpl", "go", 1) testSource[target] = source } - templateData := NewTemplateData(outputFolder, tgc.TargetVersionName) + templateData := NewTemplateData(outputFolder, tgc.TargetVersionName, tgc.templateFS) tgc.CompileFileList(outputFolder, testSource, *templateData, products) resourceConverters := map[string]string{ @@ -237,18 +240,18 @@ func (tgc TerraformGoogleConversion) CompileFileList(outputFolder string, files } } -func retrieveFullManifestOfNonDefinedTests() []string { +func retrieveFullManifestOfNonDefinedTests(fs fs.FS) []string { var tests []string fileMap := make(map[string]bool) - files := retrieveFullListOfTestFiles() + files := retrieveFullListOfTestFiles(fs) for _, file := range files { tests = append(tests, strings.Split(file, ".")[0]) fileMap[file] = true } tests = slices.Compact(tests) - nonDefinedTests := google.Diff(tests, retrieveListOfManuallyDefinedTests()) + nonDefinedTests := google.Diff(tests, retrieveListOfManuallyDefinedTests(fs)) nonDefinedTests = google.Reject(nonDefinedTests, func(file string) bool { return strings.HasSuffix(file, "_without_default_project") }) @@ -269,10 +272,10 @@ func retrieveFullManifestOfNonDefinedTests() []string { } // Gets all of the test files in the folder third_party/tgc/tests/data -func retrieveFullListOfTestFiles() []string { +func retrieveFullListOfTestFiles(fsys fs.FS) []string { var testFiles []string - files, err := ioutil.ReadDir("third_party/tgc/tests/data") + files, err := fs.ReadDir(fsys, "third_party/tgc/tests/data") if err != nil { log.Fatal(err) } @@ -285,9 +288,9 @@ func retrieveFullListOfTestFiles() []string { } // Gets all of files in the folder third_party/tgc/tests/data -func retrieveFullListOfTestTilesWithLocation() map[string]string { +func retrieveFullListOfTestTilesWithLocation(fs fs.FS) map[string]string { testFiles := make(map[string]string) - files := retrieveFullListOfTestFiles() + files := retrieveFullListOfTestFiles(fs) for _, file := range files { target := fmt.Sprintf("testdata/templates/%s", file) source := fmt.Sprintf("third_party/tgc/tests/data/%s", file) @@ -296,10 +299,10 @@ func retrieveFullListOfTestTilesWithLocation() map[string]string { return testFiles } -func retrieveTestSourceCodeWithLocation(suffix string) map[string]string { +func retrieveTestSourceCodeWithLocation(fsys fs.FS, suffix string) map[string]string { var fileNames []string path := "third_party/tgc/tests/source" - files, err := ioutil.ReadDir(path) + files, err := fs.ReadDir(fsys, path) if err != nil { log.Fatal(err) } @@ -321,15 +324,15 @@ func retrieveTestSourceCodeWithLocation(suffix string) map[string]string { return testSource } -func retrieveListOfManuallyDefinedTests() []string { - m1 := retrieveListOfManuallyDefinedTestsFromFile("third_party/tgc/tests/source/cli_test.go.tmpl") - m2 := retrieveListOfManuallyDefinedTestsFromFile("third_party/tgc/tests/source/read_test.go.tmpl") +func retrieveListOfManuallyDefinedTests(fs fs.FS) []string { + m1 := retrieveListOfManuallyDefinedTestsFromFile(fs, "third_party/tgc/tests/source/cli_test.go.tmpl") + m2 := retrieveListOfManuallyDefinedTestsFromFile(fs, "third_party/tgc/tests/source/read_test.go.tmpl") return google.Concat(m1, m2) } // Reads the content of the file and then finds all of the tests in the contents -func retrieveListOfManuallyDefinedTestsFromFile(file string) []string { - data, err := os.ReadFile(file) +func retrieveListOfManuallyDefinedTestsFromFile(fsys fs.FS, file string) []string { + data, err := fs.ReadFile(fsys, file) if err != nil { log.Fatalf("Cannot open the file: %v", file) } @@ -350,8 +353,8 @@ func (tgc TerraformGoogleConversion) CopyCommonFiles(outputFolder string, genera return } - tgc.CopyFileList(outputFolder, retrieveFullListOfTestTilesWithLocation()) - tgc.CopyFileList(outputFolder, retrieveTestSourceCodeWithLocation(".go")) + tgc.CopyFileList(outputFolder, retrieveFullListOfTestTilesWithLocation(tgc.templateFS)) + tgc.CopyFileList(outputFolder, retrieveTestSourceCodeWithLocation(tgc.templateFS, ".go")) resourceConverters := map[string]string{ "../caiasset/asset.go": "third_party/tgc/caiasset/asset.go", @@ -462,7 +465,7 @@ func (tgc TerraformGoogleConversion) CopyFileList(outputFolder string, files map log.Fatalf("%s was already modified during this run at %s", targetFile, info.ModTime().String()) } - sourceByte, err := os.ReadFile(source) + sourceByte, err := fs.ReadFile(tgc.templateFS, source) if err != nil { log.Fatalf("Cannot read source file %s while copying: %s", source, err) } diff --git a/mmv1/provider/terraform_tgc_cai2hcl.go b/mmv1/provider/terraform_tgc_cai2hcl.go index e08aa563cda6..968fa9581f08 100644 --- a/mmv1/provider/terraform_tgc_cai2hcl.go +++ b/mmv1/provider/terraform_tgc_cai2hcl.go @@ -15,6 +15,7 @@ package provider import ( "fmt" + "io/fs" "log" "os" "time" @@ -33,14 +34,17 @@ type CaiToTerraformConversion struct { Product *api.Product StartTime time.Time + + templateFS fs.FS } -func NewCaiToTerraformConversion(product *api.Product, versionName string, startTime time.Time) CaiToTerraformConversion { +func NewCaiToTerraformConversion(product *api.Product, versionName string, startTime time.Time, templateFS fs.FS) CaiToTerraformConversion { t := CaiToTerraformConversion{ Product: product, TargetVersionName: versionName, Version: *product.VersionObjOrClosest(versionName), StartTime: startTime, + templateFS: templateFS, } t.Product.SetPropertiesBasedOnVersion(&t.Version) diff --git a/mmv1/provider/terraform_tgc_next.go b/mmv1/provider/terraform_tgc_next.go index 37247817467f..5b9967e0614c 100644 --- a/mmv1/provider/terraform_tgc_next.go +++ b/mmv1/provider/terraform_tgc_next.go @@ -19,6 +19,7 @@ import ( "bytes" "errors" "fmt" + "io/fs" "log" "os" "path" @@ -54,6 +55,8 @@ type TerraformGoogleConversionNext struct { Product *api.Product StartTime time.Time + + templateFS fs.FS } type ResourceIdentifier struct { @@ -65,13 +68,14 @@ type ResourceIdentifier struct { IdentityParam string } -func NewTerraformGoogleConversionNext(product *api.Product, versionName string, startTime time.Time) TerraformGoogleConversionNext { +func NewTerraformGoogleConversionNext(product *api.Product, versionName string, startTime time.Time, templateFS fs.FS) TerraformGoogleConversionNext { t := TerraformGoogleConversionNext{ Product: product, TargetVersionName: versionName, Version: *product.VersionObjOrClosest(versionName), StartTime: startTime, ResourcesByCaiResourceType: make(map[string][]ResourceIdentifier), + templateFS: templateFS, } t.Product.SetPropertiesBasedOnVersion(&t.Version) @@ -102,7 +106,7 @@ func (tgc TerraformGoogleConversionNext) GenerateObject(object api.Resource, out return } - templateData := NewTemplateData(outputFolder, tgc.TargetVersionName) + templateData := NewTemplateData(outputFolder, tgc.TargetVersionName, tgc.templateFS) if !object.IsExcluded() { tgc.GenerateResource(object, *templateData, outputFolder, generateCode, generateDocs) @@ -176,7 +180,7 @@ func (tgc TerraformGoogleConversionNext) CompileCommonFiles(outputFolder string, "pkg/cai2hcl/converters/convert_resource.go": "templates/tgc_next/cai2hcl/convert_resource.go.tmpl", } - templateData := NewTemplateData(outputFolder, tgc.TargetVersionName) + templateData := NewTemplateData(outputFolder, tgc.TargetVersionName, tgc.templateFS) tgc.CompileFileList(outputFolder, resourceConverters, *templateData, products) } @@ -245,9 +249,10 @@ func (tgc TerraformGoogleConversionNext) CopyCommonFiles(outputFolder string, ge "pkg/version/version.go": "third_party/terraform/version/version.go", // services - "pkg/services/compute/image.go": "third_party/terraform/services/compute/image.go", - "pkg/services/compute/disk_type.go": "third_party/terraform/services/compute/disk_type.go", - "pkg/services/kms/kms_utils.go": "third_party/terraform/services/kms/kms_utils.go", + "pkg/services/compute/image.go": "third_party/terraform/services/compute/image.go", + "pkg/services/compute/disk_type.go": "third_party/terraform/services/compute/disk_type.go", + "pkg/services/kms/kms_utils.go": "third_party/terraform/services/kms/kms_utils.go", + "pkg/services/privateca/privateca_utils.go": "third_party/terraform/services/privateca/privateca_utils.go", } tgc.CopyFileList(outputFolder, resourceConverters) } @@ -279,7 +284,7 @@ func (tgc TerraformGoogleConversionNext) CopyFileList(outputFolder string, files log.Fatalf("%s was already modified during this run at %s", targetFile, info.ModTime().String()) } - sourceByte, err := os.ReadFile(source) + sourceByte, err := fs.ReadFile(tgc.templateFS, source) if err != nil { log.Fatalf("Cannot read source file %s while copying: %s", source, err) } @@ -352,11 +357,10 @@ func (tgc TerraformGoogleConversionNext) addTestsFromHandwrittenTests(object *ap if object.ProductMetadata == nil { return nil } - product := object.ProductMetadata - productName := google.Underscore(product.Name) - resourceFullName := fmt.Sprintf("%s_%s", productName, google.Underscore(object.Name)) + productName := strings.ToLower(tgc.Product.Name) + resourceFullName := tgc.ResourceGoFilename(*object) handwrittenTestFilePath := fmt.Sprintf("third_party/terraform/services/%s/resource_%s_test.go", productName, resourceFullName) - data, err := os.ReadFile(handwrittenTestFilePath) + data, err := fs.ReadFile(tgc.templateFS, handwrittenTestFilePath) for err != nil { if errors.Is(err, os.ErrNotExist) { if strings.HasSuffix(handwrittenTestFilePath, ".tmpl") { @@ -364,7 +368,7 @@ func (tgc TerraformGoogleConversionNext) addTestsFromHandwrittenTests(object *ap return nil } handwrittenTestFilePath += ".tmpl" - data, err = os.ReadFile(handwrittenTestFilePath) + data, err = fs.ReadFile(tgc.templateFS, handwrittenTestFilePath) } else { return fmt.Errorf("error reading handwritten test file %s: %v", handwrittenTestFilePath, err) } @@ -396,6 +400,42 @@ func (tgc TerraformGoogleConversionNext) addTestsFromHandwrittenTests(object *ap return nil } +// Similar to FullResourceName, but override-aware to prevent things like ending in _test. +// Non-Go files should just use FullResourceName. +func (tgc *TerraformGoogleConversionNext) ResourceGoFilename(object api.Resource) string { + // early exit if no override is set + if object.FilenameOverride == "" { + return tgc.FullResourceName(object) + } + + resName := object.FilenameOverride + + var productName string + if tgc.Product.LegacyName != "" { + productName = tgc.Product.LegacyName + } else { + productName = google.Underscore(tgc.Product.Name) + } + + return fmt.Sprintf("%s_%s", productName, resName) +} + +func (tgc *TerraformGoogleConversionNext) FullResourceName(object api.Resource) string { + // early exit- resource-level legacy names override the product too + if object.LegacyName != "" { + return strings.Replace(object.LegacyName, "google_", "", 1) + } + + var productName string + if tgc.Product.LegacyName != "" { + productName = tgc.Product.LegacyName + } else { + productName = google.Underscore(tgc.Product.Name) + } + + return fmt.Sprintf("%s_%s", productName, google.Underscore(object.Name)) +} + // Generates the list of resources, and gets the count of resources. // The resource object has the format // diff --git a/mmv1/templates/terraform/constants/network_endpoints.go.tmpl b/mmv1/templates/terraform/constants/network_endpoints.go.tmpl index 0e27fc894073..c86acf706b3c 100644 --- a/mmv1/templates/terraform/constants/network_endpoints.go.tmpl +++ b/mmv1/templates/terraform/constants/network_endpoints.go.tmpl @@ -34,7 +34,9 @@ func NetworkEndpointsNetworkEndpointConvertToAny(endpoint NetworkEndpointsNetwor m := make(map[string]interface{}) m["ip_address"] = endpoint.IPAddress m["port"] = endpoint.Port - m["instance"] = endpoint.Instance + if endpoint.Instance != "" { + m["instance"] = endpoint.Instance + } return m } diff --git a/mmv1/templates/terraform/constants/network_services_lb_route_extension.go.tmpl b/mmv1/templates/terraform/constants/network_services_lb_route_extension.go.tmpl new file mode 100644 index 000000000000..00952028ab72 --- /dev/null +++ b/mmv1/templates/terraform/constants/network_services_lb_route_extension.go.tmpl @@ -0,0 +1,16 @@ +func ValidateSupportedEvent(i interface{}, k string) (warnings []string, errors []error) { + str, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + validEvents := map[string]bool{ + "REQUEST_HEADERS": true, + "REQUEST_BODY": true, + "REQUEST_TRAILERS": true, + } + if !validEvents[str] { + errors = append(errors, fmt.Errorf("value %s in %s is invalid, must be one of: REQUEST_HEADERS, REQUEST_BODY, REQUEST_TRAILERS", str, k)) + } + return +} diff --git a/mmv1/templates/terraform/constants/workbench_instance.go.tmpl b/mmv1/templates/terraform/constants/workbench_instance.go.tmpl index a291928448bd..fdddf2411bfe 100644 --- a/mmv1/templates/terraform/constants/workbench_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/workbench_instance.go.tmpl @@ -75,6 +75,7 @@ var WorkbenchInstanceProvidedMetadata = []string{ "enable-guest-attributes", "enable-oslogin", "proxy-registration-url", + "enable-jupyterlab4", } func WorkbenchInstanceMetadataDiffSuppress(k, old, new string, d *schema.ResourceData) bool { diff --git a/mmv1/templates/terraform/custom_expand/alloydb_instance_observability_config.go.tmpl b/mmv1/templates/terraform/custom_expand/alloydb_instance_observability_config.go.tmpl new file mode 100644 index 000000000000..2fda09d1b2b9 --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/alloydb_instance_observability_config.go.tmpl @@ -0,0 +1,107 @@ +func expandAlloydbInstanceObservabilityConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandAlloydbInstanceObservabilityConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if transformedEnabled != nil { + transformed["enabled"] = transformedEnabled + } + + transformedPreserveComments, err := expandAlloydbInstanceObservabilityConfigPreserveComments(original["preserve_comments"], d, config) + if err != nil { + return nil, err + } else if transformedPreserveComments != nil { + transformed["preserveComments"] = transformedPreserveComments + } + + transformedTrackWaitEvents, err := expandAlloydbInstanceObservabilityConfigTrackWaitEvents(original["track_wait_events"], d, config) + if err != nil { + return nil, err + } else if transformedTrackWaitEvents != nil { + transformed["trackWaitEvents"] = transformedTrackWaitEvents + } + + transformedMaxQueryStringLength, err := expandAlloydbInstanceObservabilityConfigMaxQueryStringLength(original["max_query_string_length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxQueryStringLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxQueryStringLength"] = transformedMaxQueryStringLength + } + + transformedRecordApplicationTags, err := expandAlloydbInstanceObservabilityConfigRecordApplicationTags(original["record_application_tags"], d, config) + if err != nil { + return nil, err + } else if transformedRecordApplicationTags != nil { + transformed["recordApplicationTags"] = transformedRecordApplicationTags + } + + transformedQueryPlansPerMinute, err := expandAlloydbInstanceObservabilityConfigQueryPlansPerMinute(original["query_plans_per_minute"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryPlansPerMinute); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryPlansPerMinute"] = transformedQueryPlansPerMinute + } + + transformedTrackActiveQueries, err := expandAlloydbInstanceObservabilityConfigTrackActiveQueries(original["track_active_queries"], d, config) + if err != nil { + return nil, err + } else if transformedTrackActiveQueries != nil { + transformed["trackActiveQueries"] = transformedTrackActiveQueries + } + + transformedAssistiveExperiencesEnabled, err := expandAlloydbInstanceObservabilityConfigAssistiveExperiencesEnabled(original["assistive_experiences_enabled"], d, config) + if err != nil { + return nil, err + } else if transformedAssistiveExperiencesEnabled != nil { + transformed["assistiveExperiencesEnabled"] = transformedAssistiveExperiencesEnabled + } + log.Printf("vkanishk: expandAlloydbInstanceObservabilityConfig transformed %v", transformed) + return transformed, nil +} + + +func expandAlloydbInstanceObservabilityConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigPreserveComments(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigTrackWaitEvents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigTrackWaitEventTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigMaxQueryStringLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigRecordApplicationTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigQueryPlansPerMinute(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigTrackActiveQueries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceObservabilityConfigAssistiveExperiencesEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/templates/terraform/custom_expand/network_connectivity_mcdt_services_state_timeline_expand.go.tmpl b/mmv1/templates/terraform/custom_expand/network_connectivity_mcdt_services_state_timeline_expand.go.tmpl new file mode 100644 index 000000000000..a1639992460d --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/network_connectivity_mcdt_services_state_timeline_expand.go.tmpl @@ -0,0 +1,65 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return make(map[string]interface{}), nil + } + + l, ok := v.([]interface{}) + if !ok { + return nil, fmt.Errorf("expected 'services' to be a list, got %T", v) + } + + req := make(map[string]interface{}) + for _, raw := range l { + if raw == nil { + continue + } + + original, ok := raw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected a service item to be a map, got %T", raw) + } + + serviceName, ok := original["service_name"].(string) + if !ok || serviceName == "" { + // Skip items without a valid service name + continue + } + + var apiStates []interface{} + if states, ok := original["states"].([]interface{}); ok { + for _, stateRaw := range states { + state, ok := stateRaw.(map[string]interface{}) + if !ok { + continue + } + + apiState := make(map[string]interface{}) + if s, ok := state["state"].(string); ok { + apiState["state"] = s + } + if et, ok := state["effective_time"].(string); ok { + apiState["effectiveTime"] = et + } + apiStates = append(apiStates, apiState) + } + } + + req[serviceName] = map[string]interface{}{ + "states": apiStates, + } + } + + return req, nil +} diff --git a/mmv1/templates/terraform/custom_expand/resource_from_self_link_nullable.go.tmpl b/mmv1/templates/terraform/custom_expand/resource_from_self_link_nullable.go.tmpl new file mode 100644 index 000000000000..a86aeba96a5d --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/resource_from_self_link_nullable.go.tmpl @@ -0,0 +1,7 @@ +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if tpgresource.IsEmptyValue(reflect.ValueOf(v.(string))) { + return nil, nil + } + + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} diff --git a/mmv1/templates/terraform/custom_flatten/alloydb_instance_connectionpoolconfig_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/alloydb_instance_connectionpoolconfig_flatten.go.tmpl new file mode 100644 index 000000000000..5376e2937ab8 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/alloydb_instance_connectionpoolconfig_flatten.go.tmpl @@ -0,0 +1,64 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return flattenAlloyDBInstanceEmptyConnectionPoolConfig(v, d, config) + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenAlloydbInstanceConnectionPoolConfigEnabled(original["enabled"], d, config) + transformed["pooler_count"] = + flattenAlloydbInstanceConnectionPoolConfigPoolerCount(original["poolerCount"], d, config) + transformed["flags"] = + flattenAlloydbInstanceConnectionPoolConfigFlags(original["flags"], d, config) + return []interface{}{transformed} +} + +func flattenAlloyDBInstanceEmptyConnectionPoolConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // The API returns an nil/empty value for connectionPoolConfig.enabled when + // it's set to false. So keep the user's value to avoid a permadiff. + return []interface{}{ + map[string]interface{}{ + "enabled": d.Get("connection_pool_config.0.enabled"), + }, + } +} + +func flattenAlloydbInstanceConnectionPoolConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceConnectionPoolConfigPoolerCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbInstanceConnectionPoolConfigFlags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_flatten/network_connectivity_mcdt_services_state_timeline_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/network_connectivity_mcdt_services_state_timeline_flatten.go.tmpl new file mode 100644 index 000000000000..3b696ad0c637 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/network_connectivity_mcdt_services_state_timeline_flatten.go.tmpl @@ -0,0 +1,65 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + + servicesMap, ok := v.(map[string]interface{}) + if !ok { + return nil + } + + transformed := make([]interface{}, 0, len(servicesMap)) + + // Sort the service names to ensure a consistent ordering. + serviceNames := make([]string, 0, len(servicesMap)) + for name := range servicesMap { + serviceNames = append(serviceNames, name) + } + sort.Strings(serviceNames) + + for _, serviceName := range serviceNames { + stateTimelineRaw := servicesMap[serviceName] + stateTimeline, ok := stateTimelineRaw.(map[string]interface{}) + if !ok { + continue + } + + var flattenedStates []interface{} + if states, ok := stateTimeline["states"].([]interface{}); ok { + for _, stateRaw := range states { + state, ok := stateRaw.(map[string]interface{}) + if !ok { + continue + } + flattenedState := make(map[string]interface{}) + if s, ok := state["state"].(string); ok { + flattenedState["state"] = s + } + if et, ok := state["effectiveTime"].(string); ok { + flattenedState["effective_time"] = et + } + flattenedStates = append(flattenedStates, flattenedState) + } + } + + flattenedService := map[string]interface{}{ + "service_name": serviceName, + "states": flattenedStates, + } + transformed = append(transformed, flattenedService) + } + + return transformed +} diff --git a/mmv1/templates/terraform/custom_flatten/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl b/mmv1/templates/terraform/custom_flatten/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl index be921aead132..7e2a4b325343 100644 --- a/mmv1/templates/terraform/custom_flatten/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl @@ -28,6 +28,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso flattenVertexAIIndexEndpointPrivateServiceConnectConfigEnablePrivateServiceConnect(original["enablePrivateServiceConnect"], d, config) transformed["project_allowlist"] = flattenVertexAIIndexEndpointPrivateServiceConnectConfigProjectAllowlist(original["projectAllowlist"], d, config) + transformed["psc_automation_configs"] = d.Get("private_service_connect_config.0.psc_automation_configs") return []interface{}{transformed} } diff --git a/mmv1/templates/terraform/custom_import/apigee_security_feedback.go.tmpl b/mmv1/templates/terraform/custom_import/apigee_security_feedback.go.tmpl new file mode 100644 index 000000000000..ec8c598161b9 --- /dev/null +++ b/mmv1/templates/terraform/custom_import/apigee_security_feedback.go.tmpl @@ -0,0 +1,32 @@ +config := meta.(*transport_tpg.Config) + +// current import_formats cannot import fields with forward slashes in their value +if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err +} + +nameParts := strings.Split(d.Get("name").(string), "/") +if len(nameParts) == 4 { + // `organizations/{{"{{"}}org_name{{"}}"}}/securityFeedback/{{"{{"}}feedback_id{{"}}"}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("feedback_id", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting feedback_id: %s", err) + } +} else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{"{{"}}org_name{{"}}"}}/securityFeedback/{{"{{"}}name{{"}}"}}") +} + +// Replace import id for the resource id +id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}org_id{{"}}"}}/securityFeedback/{{"{{"}}feedback_id{{"}}"}}") +if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) +} +d.SetId(id) + +return []*schema.ResourceData{d}, nil diff --git a/mmv1/templates/terraform/custom_import/self_link_as_name_set_project_location_and_config_id.go.tmpl b/mmv1/templates/terraform/custom_import/self_link_as_name_set_project_location_and_config_id.go.tmpl new file mode 100644 index 000000000000..d855c1431470 --- /dev/null +++ b/mmv1/templates/terraform/custom_import/self_link_as_name_set_project_location_and_config_id.go.tmpl @@ -0,0 +1,40 @@ +{{/* + The license inside this block applies to this file + Copyright 2025 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 6 { + return nil, fmt.Errorf( + "Unexpected format of ID (%s), expected projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/notificationConfigs/{{"{{"}}config_id{{"}}"}}", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + if err := d.Set("location", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + + if err := d.Set("config_id", stringParts[5]); err != nil { + return nil, fmt.Errorf("Error setting config_id: %s", err) + } + + return []*schema.ResourceData{d}, nil diff --git a/mmv1/templates/terraform/custom_update/biglake_iceberg_catalog_update.go.tmpl b/mmv1/templates/terraform/custom_update/biglake_iceberg_catalog_update.go.tmpl new file mode 100644 index 000000000000..dad11dbd0b49 --- /dev/null +++ b/mmv1/templates/terraform/custom_update/biglake_iceberg_catalog_update.go.tmpl @@ -0,0 +1,70 @@ +userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) +if err != nil { + return err +} + +billingProject := "" + +project, err := tpgresource.GetProject(d, config) +if err != nil { + return fmt.Errorf("Error fetching project for IcebergCatalog: %s", err) +} +billingProject = project + +obj := make(map[string]interface{}) +credentialModeProp, err := expandBiglakeIcebergIcebergCatalogCredentialMode(d.Get("credential_mode"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("credential_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, credentialModeProp)) { + obj["credential-mode"] = credentialModeProp +} + +url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}BiglakeIcebergBasePath{{"}}"}}iceberg/v1/restcatalog/extensions/projects/{{"{{"}}project{{"}}"}}/catalogs/{{"{{"}}name{{"}}"}}") +if err != nil { + return err +} + +log.Printf("[DEBUG] Updating IcebergCatalog %q: %#v", d.Id(), obj) +headers := make(http.Header) +updateMask := []string{} + +// The custom logic is that server only respects property name not the json name for updateMask. +// This will apply to all future updateable fields if they have a kebab-case json name override. +// This does not apply to any field with a camelCase or snake_case name. +if d.HasChange("credential_mode") { + updateMask = append(updateMask, "credential_mode") +} +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} + +// err == nil indicates that the billing_project value was found +if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp +} + +// if updateMask is empty we are not updating anything so skip the post +if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating IcebergCatalog %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating IcebergCatalog %q: %#v", d.Id(), res) + } + +} + +return resourceBiglakeIcebergIcebergCatalogRead(d, meta) diff --git a/mmv1/templates/terraform/encoders/service_directory_metadata_to_annotations.go.tmpl b/mmv1/templates/terraform/encoders/service_directory_metadata_to_annotations.go.tmpl index 0930b918bb49..0d1df7cca688 100644 --- a/mmv1/templates/terraform/encoders/service_directory_metadata_to_annotations.go.tmpl +++ b/mmv1/templates/terraform/encoders/service_directory_metadata_to_annotations.go.tmpl @@ -1,5 +1,5 @@ if obj["metadata"] == nil { - return nil, nil + return obj, nil } obj["annotations"] = obj["metadata"].(map[string]string) diff --git a/mmv1/templates/terraform/encoders/spanner_instance_update.go.tmpl b/mmv1/templates/terraform/encoders/spanner_instance_update.go.tmpl index 7d721b8fdbdb..5354ec7d6eb4 100644 --- a/mmv1/templates/terraform/encoders/spanner_instance_update.go.tmpl +++ b/mmv1/templates/terraform/encoders/spanner_instance_update.go.tmpl @@ -52,6 +52,9 @@ if d.HasChange("autoscaling_config") { if d.HasChange("autoscaling_config.0.autoscaling_targets.0.storage_utilization_percent") { updateMask = append(updateMask, "autoscalingConfig.autoscalingTargets.storageUtilizationPercent") } + if d.HasChange("autoscaling_config.0.autoscaling_targets.0.total_cpu_utilization_percent") { + updateMask = append(updateMask, "autoscalingConfig.autoscalingTargets.totalCpuUtilizationPercent") + } if d.HasChange("autoscaling_config.0.asymmetric_autoscaling_options") { updateMask = append(updateMask, "autoscalingConfig.asymmetricAutoscalingOptions") } diff --git a/mmv1/templates/terraform/examples/apigee_api_product_with_attributes.tf.tmpl b/mmv1/templates/terraform/examples/apigee_api_product_with_attributes.tf.tmpl index 8c8d1b7b65fe..dd93d80e1623 100644 --- a/mmv1/templates/terraform/examples/apigee_api_product_with_attributes.tf.tmpl +++ b/mmv1/templates/terraform/examples/apigee_api_product_with_attributes.tf.tmpl @@ -32,6 +32,11 @@ resource "google_apigee_instance" "apigee_instance" { peering_cidr_range = "SLASH_22" } +resource "google_apigee_environment" "env_dev" { + name = "dev" + org_id = google_apigee_organization.apigee_org.id +} + resource "google_apigee_api_product" "full_api_product" { org_id = google_apigee_organization.apigee_org.id name = "{{index $.Vars "product_name"}}" @@ -46,7 +51,7 @@ resource "google_apigee_api_product" "full_api_product" { quota_time_unit = "day" quota_counter_scope = "PROXY" - environments = ["dev", "hom"] + environments = ["dev"] # Set them in reverse order to test set scopes = [ @@ -190,6 +195,7 @@ resource "google_apigee_api_product" "full_api_product" { } depends_on = [ - google_apigee_instance.apigee_instance + google_apigee_instance.apigee_instance, + google_apigee_environment.env_dev ] } diff --git a/mmv1/templates/terraform/examples/apigee_api_product_with_attributes_test.tf.tmpl b/mmv1/templates/terraform/examples/apigee_api_product_with_attributes_test.tf.tmpl index ac91919ad5eb..6ed6b4f523be 100644 --- a/mmv1/templates/terraform/examples/apigee_api_product_with_attributes_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/apigee_api_product_with_attributes_test.tf.tmpl @@ -73,6 +73,11 @@ resource "google_apigee_instance" "apigee_instance" { peering_cidr_range = "SLASH_22" } +resource "google_apigee_environment" "env_dev" { + name = "dev" + org_id = google_apigee_organization.apigee_org.id +} + resource "google_apigee_api_product" "{{$.PrimaryResourceId}}" { org_id = google_apigee_organization.apigee_org.id name = "full-api-product" @@ -87,7 +92,7 @@ resource "google_apigee_api_product" "{{$.PrimaryResourceId}}" { quota_time_unit = "day" quota_counter_scope = "PROXY" - environments = ["dev", "hom"] + environments = ["dev"] scopes = [ "read:weather", "write:reports" @@ -229,6 +234,7 @@ resource "google_apigee_api_product" "{{$.PrimaryResourceId}}" { } depends_on = [ - google_apigee_instance.apigee_instance + google_apigee_instance.apigee_instance, + google_apigee_environment.env_dev ] } diff --git a/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation.tf.tmpl b/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation.tf.tmpl index ba85ea349890..e20e2ca4a352 100644 --- a/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation.tf.tmpl +++ b/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation.tf.tmpl @@ -32,6 +32,17 @@ resource "google_apigee_instance" "apigee_instance" { peering_cidr_range = "SLASH_22" } +resource "google_apigee_environment" "env_dev" { + name = "dev" + org_id = google_apigee_organization.apigee_org.id +} + +resource "google_apigee_api" "test_apigee_api" { + name = "hello-world" + org_id = google_apigee_organization.apigee_org.name + config_bundle = "apigee_api_bundle.zip" +} + resource "google_apigee_api_product" "full_api_product" { org_id = google_apigee_organization.apigee_org.id name = "{{index $.Vars "product_name"}}" @@ -46,7 +57,7 @@ resource "google_apigee_api_product" "full_api_product" { value = "private" } - environments = ["dev", "hom"] + environments = ["dev"] proxies = ["hello-world"] api_resources = [ "/", @@ -63,6 +74,8 @@ resource "google_apigee_api_product" "full_api_product" { quota_counter_scope = "PROXY" depends_on = [ - google_apigee_instance.apigee_instance + google_apigee_instance.apigee_instance, + google_apigee_environment.env_dev, + google_apigee_api.test_apigee_api ] } diff --git a/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation_test.tf.tmpl b/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation_test.tf.tmpl index 4948c768becb..11de032edfcb 100644 --- a/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/apigee_api_product_with_legacy_operation_test.tf.tmpl @@ -73,6 +73,17 @@ resource "google_apigee_instance" "apigee_instance" { peering_cidr_range = "SLASH_22" } +resource "google_apigee_environment" "env_dev" { + name = "dev" + org_id = google_apigee_organization.apigee_org.id +} + +resource "google_apigee_api" "test_apigee_api" { + name = "hello-world" + org_id = google_apigee_organization.apigee_org.name + config_bundle = "./test-fixtures/apigee_api_bundle.zip" +} + resource "google_apigee_api_product" "{{$.PrimaryResourceId}}" { org_id = google_apigee_organization.apigee_org.id name = "legacy-operation-api-product" @@ -87,7 +98,7 @@ resource "google_apigee_api_product" "{{$.PrimaryResourceId}}" { value = "private" } - environments = ["dev", "hom"] + environments = ["dev"] proxies = ["hello-world"] api_resources = [ "/", @@ -104,6 +115,8 @@ resource "google_apigee_api_product" "{{$.PrimaryResourceId}}" { quota_counter_scope = "PROXY" depends_on = [ - google_apigee_instance.apigee_instance + google_apigee_instance.apigee_instance, + google_apigee_environment.env_dev, + google_apigee_api.test_apigee_api ] } diff --git a/mmv1/templates/terraform/examples/apigee_security_feedback_basic.tf.tmpl b/mmv1/templates/terraform/examples/apigee_security_feedback_basic.tf.tmpl new file mode 100644 index 000000000000..9583085f533c --- /dev/null +++ b/mmv1/templates/terraform/examples/apigee_security_feedback_basic.tf.tmpl @@ -0,0 +1,55 @@ +data "google_client_config" "current" {} + +resource "google_compute_network" "apigee_network" { + name = "apigee-network" +} + +resource "google_compute_global_address" "apigee_range" { + name = "apigee-range" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.apigee_network.id +} + +resource "google_service_networking_connection" "apigee_vpc_connection" { + network = google_compute_network.apigee_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.apigee_range.name] +} + +resource "google_apigee_organization" "apigee_org" { + analytics_region = "us-central1" + project_id = data.google_client_config.current.project + authorized_network = google_compute_network.apigee_network.id + depends_on = [google_service_networking_connection.apigee_vpc_connection] +} + +resource "google_apigee_addons_config" "apigee_org_security_addons_config" { + org = google_apigee_organization.apigee_org.name + addons_config { + api_security_config { + enabled = true + } + } +} + +resource "google_apigee_security_feedback" "security_feedback" { + feedback_id = "{{index $.Vars "security_feedback_id"}}" + org_id = google_apigee_organization.apigee_org.id + display_name = "terraform test display name" + feedback_type = "EXCLUDED_DETECTION" + reason = "INTERNAL_SYSTEM" + comment = "terraform test comment" + feedback_contexts { + attribute = "ATTRIBUTE_ENVIRONMENTS" + values = [google_apigee_environment.apigee_environment.name] + } + feedback_contexts { + attribute = "ATTRIBUTE_IP_ADDRESS_RANGES" + values = ["10.0.0.0", "172.16.0.0/12"] + } + depends_on = [ + google_apigee_addons_config.apigee_org_security_addons_config + ] +} diff --git a/mmv1/templates/terraform/examples/apigee_security_feedback_basic_test.tf.tmpl b/mmv1/templates/terraform/examples/apigee_security_feedback_basic_test.tf.tmpl new file mode 100644 index 000000000000..30c11738e285 --- /dev/null +++ b/mmv1/templates/terraform/examples/apigee_security_feedback_basic_test.tf.tmpl @@ -0,0 +1,103 @@ +resource "google_project" "project" { + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "{{index $.TestEnvVars "org_id"}}" + billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" +} + +resource "time_sleep" "wait_60_seconds" { + create_duration = "60s" + depends_on = [google_project.project] +} + +resource "google_project_service" "apigee" { + project = google_project.project.project_id + service = "apigee.googleapis.com" + depends_on = [time_sleep.wait_60_seconds] +} + +resource "google_project_service" "servicenetworking" { + project = google_project.project.project_id + service = "servicenetworking.googleapis.com" + depends_on = [google_project_service.apigee] +} +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_project_service.servicenetworking] +} +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" + depends_on = [google_project_service.servicenetworking] +} + +resource "google_compute_network" "apigee_network" { + name = "apigee-network" + project = google_project.project.project_id + depends_on = [google_project_service.compute, time_sleep.wait_120_seconds] +} + +resource "google_compute_global_address" "apigee_range" { + name = "apigee-range" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.apigee_network.id + project = google_project.project.project_id +} + +resource "google_service_networking_connection" "apigee_vpc_connection" { + network = google_compute_network.apigee_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.apigee_range.name] + depends_on = [google_project_service.servicenetworking] +} + +resource "google_apigee_organization" "apigee_org" { + analytics_region = "us-central1" + project_id = google_project.project.project_id + authorized_network = google_compute_network.apigee_network.id + depends_on = [ + google_service_networking_connection.apigee_vpc_connection, + google_project_service.apigee, + ] +} + +resource "google_apigee_environment" "apigee_environment" { + org_id = google_apigee_organization.apigee_org.id + name = "tf-test-env-%{random_suffix}" + description = "Apigee Environment" + display_name = "environment-1" +} + +resource "google_apigee_addons_config" "apigee_org_security_addons_config" { + org = google_apigee_organization.apigee_org.name + addons_config { + api_security_config { + enabled = true + } + } +} + +resource "google_apigee_security_feedback" "{{$.PrimaryResourceId}}" { + org_id = google_apigee_organization.apigee_org.id + feedback_id = "tf-test-feedback-id" + display_name = "Exclude internal test systems" + feedback_type = "EXCLUDED_DETECTION" + reason = "INTERNAL_SYSTEM" + comment = "Excluding IP ranges and environments used by internal QA." + + feedback_contexts { + attribute = "ATTRIBUTE_ENVIRONMENTS" + values = [google_apigee_environment.apigee_environment.name] + } + feedback_contexts { + attribute = "ATTRIBUTE_IP_ADDRESS_RANGES" + values = ["10.0.0.0", "172.16.0.0/12"] + } + + depends_on = [ + google_apigee_addons_config.apigee_org_security_addons_config + ] +} diff --git a/mmv1/templates/terraform/examples/apphub_boundary_basic.tf.tmpl b/mmv1/templates/terraform/examples/apphub_boundary_basic.tf.tmpl new file mode 100644 index 000000000000..1ec58f0cb3f2 --- /dev/null +++ b/mmv1/templates/terraform/examples/apphub_boundary_basic.tf.tmpl @@ -0,0 +1,4 @@ +resource "google_apphub_boundary" "{{$.PrimaryResourceId}}" { + location = "global" + crm_node = "projects/{{index $.TestEnvVars "crm_node_project_number"}}" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/backup_dr_backup_plan_for_disk_resource.tf.tmpl b/mmv1/templates/terraform/examples/backup_dr_backup_plan_for_disk_resource.tf.tmpl index f4185401e77d..04e33482af5f 100644 --- a/mmv1/templates/terraform/examples/backup_dr_backup_plan_for_disk_resource.tf.tmpl +++ b/mmv1/templates/terraform/examples/backup_dr_backup_plan_for_disk_resource.tf.tmpl @@ -11,6 +11,7 @@ resource "google_backup_dr_backup_plan" "{{$.PrimaryResourceId}}" { backup_plan_id = "{{index $.Vars "backup_plan_id"}}" resource_type = "compute.googleapis.com/Disk" backup_vault = google_backup_dr_backup_vault.my_backup_vault.id + max_custom_on_demand_retention_days = 30 backup_rules { rule_id = "rule-1" diff --git a/mmv1/templates/terraform/examples/backup_dr_backup_plan_simple.tf.tmpl b/mmv1/templates/terraform/examples/backup_dr_backup_plan_simple.tf.tmpl index d62bac5b70a9..8c150eaa56f7 100644 --- a/mmv1/templates/terraform/examples/backup_dr_backup_plan_simple.tf.tmpl +++ b/mmv1/templates/terraform/examples/backup_dr_backup_plan_simple.tf.tmpl @@ -9,6 +9,7 @@ resource "google_backup_dr_backup_plan" "{{$.PrimaryResourceId}}" { backup_plan_id = "{{index $.Vars "backup_plan_id"}}" resource_type = "compute.googleapis.com/Instance" backup_vault = google_backup_dr_backup_vault.my_backup_vault.id + max_custom_on_demand_retention_days = 30 backup_rules { rule_id = "rule-1" diff --git a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl index 1dae3fea8d2d..75b3e08047ac 100644 --- a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl @@ -11,6 +11,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "{{ $.ImportPath }}/acctest" "{{ $.ImportPath }}/envvar" @@ -50,7 +51,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMBindingStateID("{{ $.IamTerraformName }}_binding.foo"), ImportState: true, ImportStateVerify: true, }, @@ -62,7 +63,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMBindingStateID("{{ $.IamTerraformName }}_binding.foo"), ImportState: true, ImportStateVerify: true, }, @@ -97,7 +98,7 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_member.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMMemberStateID("{{ $.IamTerraformName }}_member.foo"), ImportState: true, ImportStateVerify: true, }, @@ -138,7 +139,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_policy.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }}", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMPolicyStateID("{{ $.IamTerraformName }}_policy.foo"), ImportState: true, ImportStateVerify: true, }, @@ -149,7 +150,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_policy.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }}", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMPolicyStateID("{{ $.IamTerraformName }}_policy.foo"), ImportState: true, ImportStateVerify: true, }, @@ -183,7 +184,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withCondition(t *testing.T) {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} %s", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMBindingStateID("{{ $.IamTerraformName }}_binding.foo"), ImportState: true, ImportStateVerify: true, }, @@ -219,19 +220,19 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withAndWithoutCondition(t *t {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMBindingStateID("{{ $.IamTerraformName }}_binding.foo"), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_binding.foo2", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} %s", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMBindingStateID("{{ $.IamTerraformName }}_binding.foo2"), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_binding.foo3", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} %s", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMBindingStateID("{{ $.IamTerraformName }}_binding.foo3"), ImportState: true, ImportStateVerify: true, }, @@ -266,7 +267,7 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated_withCondition(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_member.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com %s", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMMemberStateID("{{ $.IamTerraformName }}_member.foo"), ImportState: true, ImportStateVerify: true, }, @@ -302,19 +303,19 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated_withAndWithoutCondition(t *te {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_member.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMMemberStateID("{{ $.IamTerraformName }}_member.foo"), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_member.foo2", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com %s", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMMemberStateID("{{ $.IamTerraformName }}_member.foo2"), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_member.foo3", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com %s", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMMemberStateID("{{ $.IamTerraformName }}_member.foo3"), ImportState: true, ImportStateVerify: true, }, @@ -369,7 +370,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated_withCondition(t *testing.T) { {{- if not $.IamPolicy.ExcludeImportTest }} { ResourceName: "{{ $.IamTerraformName }}_policy.foo", - ImportStateId: fmt.Sprintf("{{ $.IamImportFormat }}", {{ if ne $.IamImportQualifiersForTest "" }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), + ImportStateIdFunc: generate{{ $.ResourceName }}IAMPolicyStateID("{{ $.IamTerraformName }}_policy.foo"), ImportState: true, ImportStateVerify: true, }, @@ -647,3 +648,73 @@ resource "{{ $.IamTerraformName }}_policy" "foo" { `, context) } {{- end }}{{/* if $.IamPolicy.IamConditionsRequestType */}} + +{{- if $.IamPolicy.CustomImportStateIDFuncs }} +{{ customTemplate $ $.IamPolicy.CustomImportStateIDFuncs true }} +{{- else }} +func generate{{ $.ResourceName }}IAMPolicyStateID(iamResourceAddr string) func (*terraform.State) (string, error) { + return func (state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + {{- range $p := $.IamImportParams }} + {{- if contains $.IamImportFormatTemplate (printf "{{%%%s}}" $p) }} + {{ $p }} := rawState["{{ underscore $p }}"] + {{- else }} + {{ $p }} := tpgresource.GetResourceNameFromSelfLink(rawState["{{ underscore $p }}"]) + {{- end }} + {{- end }} + return acctest.BuildIAMImportId(fmt.Sprintf("{{ $.IamImportFormat }}", {{ join $.IamImportParams ", " }}), "", "", rawState["condition.0.title"]), nil + } +} + +func generate{{ $.ResourceName }}IAMBindingStateID(iamResourceAddr string) func (*terraform.State) (string, error) { + return func (state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + {{- range $p := $.IamImportParams }} + {{- if contains $.IamImportFormatTemplate (printf "{{%%%s}}" $p) }} + {{ $p }} := rawState["{{ underscore $p }}"] + {{- else }} + {{ $p }} := tpgresource.GetResourceNameFromSelfLink(rawState["{{ underscore $p }}"]) + {{- end }} + {{- end }} + return acctest.BuildIAMImportId(fmt.Sprintf("{{ $.IamImportFormat }}", {{ join $.IamImportParams ", " }}), rawState["role"], "", rawState["condition.0.title"]), nil + } +} + +func generate{{ $.ResourceName }}IAMMemberStateID(iamResourceAddr string) func (*terraform.State) (string, error) { + return func (state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + {{- range $p := $.IamImportParams }} + {{- if contains $.IamImportFormatTemplate (printf "{{%%%s}}" $p) }} + {{ $p }} := rawState["{{ underscore $p }}"] + {{- else }} + {{ $p }} := tpgresource.GetResourceNameFromSelfLink(rawState["{{ underscore $p }}"]) + {{- end }} + {{- end }} + return acctest.BuildIAMImportId(fmt.Sprintf("{{ $.IamImportFormat }}", {{ join $.IamImportParams ", " }}), rawState["role"], rawState["member"], rawState["condition.0.title"]), nil + } +} +{{- end }} diff --git a/mmv1/templates/terraform/examples/biglake_iceberg_catalog.tf.tmpl b/mmv1/templates/terraform/examples/biglake_iceberg_catalog.tf.tmpl new file mode 100644 index 000000000000..f19c52514db3 --- /dev/null +++ b/mmv1/templates/terraform/examples/biglake_iceberg_catalog.tf.tmpl @@ -0,0 +1,14 @@ +resource "google_storage_bucket" "bucket_for_{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "name"}}" + location = "us-central1" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_biglake_iceberg_catalog" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "name"}}" + catalog_type = "CATALOG_TYPE_GCS_BUCKET" + depends_on = [ + google_storage_bucket.bucket_for_{{$.PrimaryResourceId}} + ] +} diff --git a/mmv1/templates/terraform/examples/cloudrunv2_service_zip_deploy.tf.tmpl b/mmv1/templates/terraform/examples/cloudrunv2_service_zip_deploy.tf.tmpl new file mode 100644 index 000000000000..225a90214d1a --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudrunv2_service_zip_deploy.tf.tmpl @@ -0,0 +1,40 @@ +resource "google_storage_bucket" "sourcebucket" { + provider = google-beta + name = "${data.google_project.project.project_id}-tf-test-gcf-source%{random_suffix}" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "source_tar" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.sourcebucket.name + source = "./test-fixtures/cr-zip-nodejs-hello.tar.gz" +} + +resource "google_cloud_run_v2_service" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "cloud_run_service_name"}}" + location = "us-central1" + deletion_protection = false + + template { + containers { + image = "scratch" + base_image_uri = "us-central1-docker.pkg.dev/serverless-runtimes/google-24-full/runtimes/nodejs24" + command = ["node"] + args = ["index.js"] + source_code { + cloud_storage_source { + bucket = google_storage_bucket.sourcebucket.name + object = google_storage_bucket_object.source_tar.name + generation = google_storage_bucket_object.source_tar.generation + } + } + } + } +} + +data "google_project" "project" { + provider = google-beta +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/cloudrunv2_worker_pool_custom_audiences.tf.tmpl b/mmv1/templates/terraform/examples/cloudrunv2_worker_pool_custom_audiences.tf.tmpl deleted file mode 100644 index f81cd898e482..000000000000 --- a/mmv1/templates/terraform/examples/cloudrunv2_worker_pool_custom_audiences.tf.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -resource "google_cloud_run_v2_worker_pool" "{{$.PrimaryResourceId}}" { - name = "{{index $.Vars "cloud_run_worker_pool_name"}}" - location = "us-central1" - deletion_protection = false - launch_stage = "BETA" - - custom_audiences = ["aud1"] - template { - containers { - image = "us-docker.pkg.dev/cloudrun/container/worker-pool" - } - } -} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/colab_runtime_template_full.tf.tmpl b/mmv1/templates/terraform/examples/colab_runtime_template_full.tf.tmpl index ffe8e7289448..4371f6fbde11 100644 --- a/mmv1/templates/terraform/examples/colab_runtime_template_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/colab_runtime_template_full.tf.tmpl @@ -53,4 +53,17 @@ resource "google_colab_runtime_template" "{{$.PrimaryResourceId}}" { encryption_spec { kms_key_name = "{{index $.Vars "key_name"}}" } + + software_config { + env { + name = "TEST" + value = 1 + } + + post_startup_script_config { + post_startup_script = "echo 'hello world'" + post_startup_script_url = "gs://colab-enterprise-pss-secure/secure_pss.sh" + post_startup_script_behavior = "RUN_ONCE" + } + } } diff --git a/mmv1/templates/terraform/examples/compute_cross_site_network_basic.tf.tmpl b/mmv1/templates/terraform/examples/compute_cross_site_network_basic.tf.tmpl index 6860582a71c4..0aa6b8c36b14 100644 --- a/mmv1/templates/terraform/examples/compute_cross_site_network_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/compute_cross_site_network_basic.tf.tmpl @@ -1,9 +1,7 @@ data "google_project" "project" { - provider = google-beta } resource "google_compute_cross_site_network" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" description = "{{index $.Vars "description"}}" - provider = google-beta } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/compute_interconnect_attachment_custom_ranges.tf.tmpl b/mmv1/templates/terraform/examples/compute_interconnect_attachment_custom_ranges.tf.tmpl index 135350dde7b0..91b2180d8aa8 100644 --- a/mmv1/templates/terraform/examples/compute_interconnect_attachment_custom_ranges.tf.tmpl +++ b/mmv1/templates/terraform/examples/compute_interconnect_attachment_custom_ranges.tf.tmpl @@ -10,7 +10,6 @@ resource "google_compute_interconnect_attachment" "{{$.PrimaryResourceId}}" { candidate_customer_router_ip_address = "192.169.0.2/29" candidate_cloud_router_ipv6_address = "748d:2f23:6651:9455:828b:ca81:6fe0:fed1/125" candidate_customer_router_ipv6_address = "748d:2f23:6651:9455:828b:ca81:6fe0:fed2/125" - provider = google-beta } resource "google_compute_router" "foobar" { @@ -19,11 +18,9 @@ resource "google_compute_router" "foobar" { bgp { asn = 16550 } - provider = google-beta } resource "google_compute_network" "foobar" { name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = false - provider = google-beta } diff --git a/mmv1/templates/terraform/examples/compute_region_health_source_basic.tf.tmpl b/mmv1/templates/terraform/examples/compute_region_health_source_basic.tf.tmpl new file mode 100644 index 000000000000..b55cc97fb7d5 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_region_health_source_basic.tf.tmpl @@ -0,0 +1,32 @@ +resource "google_compute_region_health_aggregation_policy" "hap" { + provider = google-beta + name = "{{index $.Vars "name"}}-hap" + description = "health aggregation policy for health source" + region = "us-central1" +} + +resource "google_compute_health_check" "default" { + provider = google-beta + name = "{{index $.Vars "name"}}-hc" + http_health_check { + port = 80 + } +} + +resource "google_compute_region_backend_service" "default" { + provider = google-beta + name = "{{index $.Vars "name"}}-bs" + region = "us-central1" + health_checks = [google_compute_health_check.default.id] + load_balancing_scheme = "INTERNAL" +} + +resource "google_compute_region_health_source" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "name"}}" + description = "{{index $.Vars "description"}}" + region = "us-central1" + source_type = "BACKEND_SERVICE" + sources = [google_compute_region_backend_service.default.id] + health_aggregation_policy = google_compute_region_health_aggregation_policy.hap.id +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/compute_wire_group_basic.tf.tmpl b/mmv1/templates/terraform/examples/compute_wire_group_basic.tf.tmpl index 82a68779eecc..33ade70243f2 100644 --- a/mmv1/templates/terraform/examples/compute_wire_group_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/compute_wire_group_basic.tf.tmpl @@ -1,18 +1,15 @@ data "google_project" "project" { -provider = google-beta } resource "google_compute_cross_site_network" "example-cross-site-network" { name = "{{index $.Vars "cross_site_network"}}" description = "Example cross site network" - provider = google-beta } resource "google_compute_wire_group" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" description = "{{index $.Vars "description"}}" cross_site_network = "{{index $.Vars "cross_site_network"}}" - provider = google-beta depends_on = [ google_compute_cross_site_network.example-cross-site-network ] @@ -21,8 +18,5 @@ resource "google_compute_wire_group" "{{$.PrimaryResourceId}}" { fault_response = "NONE" bandwidth_allocation = "ALLOCATE_PER_WIRE" } - wire_group_properties { - type = "WIRE" - } admin_enabled = true } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/compute_wire_group_basic_beta.tf.tmpl b/mmv1/templates/terraform/examples/compute_wire_group_basic_beta.tf.tmpl new file mode 100644 index 000000000000..6b2e00872c75 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_wire_group_basic_beta.tf.tmpl @@ -0,0 +1,28 @@ +data "google_project" "project" { +provider = "google-beta" +} + +resource "google_compute_cross_site_network" "example-cross-site-network" { + provider = "google-beta" + name = "{{index $.Vars "cross_site_network"}}" + description = "Example cross site network" +} + +resource "google_compute_wire_group" "{{$.PrimaryResourceId}}" { + provider = "google-beta" + name = "{{index $.Vars "name"}}" + description = "{{index $.Vars "description"}}" + cross_site_network = "{{index $.Vars "cross_site_network"}}" + depends_on = [ + google_compute_cross_site_network.example-cross-site-network + ] + wire_properties { + bandwidth_unmetered = 10 + fault_response = "NONE" + bandwidth_allocation = "ALLOCATE_PER_WIRE" + } + wire_group_properties { + type = "WIRE" + } + admin_enabled = true +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dataplex_datascan_onetime_discovery.tf.tmpl b/mmv1/templates/terraform/examples/dataplex_datascan_onetime_discovery.tf.tmpl new file mode 100644 index 000000000000..041f1186f8e2 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataplex_datascan_onetime_discovery.tf.tmpl @@ -0,0 +1,26 @@ +resource "google_dataplex_datascan" "{{$.PrimaryResourceId}}" { + location = "us-central1" + data_scan_id = "{{index $.Vars "datascan_name"}}" + + data { + resource = "//storage.googleapis.com/projects/${google_storage_bucket.tf_test_bucket.project}/buckets/${google_storage_bucket.tf_test_bucket.name}" + } + + execution_spec { + trigger { + one_time { + ttl_after_scan_completion = "120s" + } + } + } + + data_discovery_spec {} + + project = "{{index $.TestEnvVars "project_name"}}" +} + +resource "google_storage_bucket" "tf_test_bucket" { + name = "tf-test-bucket-name-%{random_suffix}" + location = "{{index $.TestEnvVars "location"}}" + uniform_bucket_level_access = true +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dataplex_datascan_onetime_documentation.tf.tmpl b/mmv1/templates/terraform/examples/dataplex_datascan_onetime_documentation.tf.tmpl new file mode 100644 index 000000000000..1ce9553777bd --- /dev/null +++ b/mmv1/templates/terraform/examples/dataplex_datascan_onetime_documentation.tf.tmpl @@ -0,0 +1,82 @@ +resource "google_bigquery_dataset" "tf_dataplex_test_dataset" { + dataset_id = "tf_dataplex_test_dataset_id_%{random_suffix}" + default_table_expiration_ms = 3600000 +} + +resource "google_bigquery_table" "tf_dataplex_test_table" { + dataset_id = google_bigquery_dataset.tf_dataplex_test_dataset.dataset_id + table_id = "tf_dataplex_test_table_id_%{random_suffix}" + deletion_protection = false + schema = < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "{{index $.Vars "hc_name"}}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "{{index $.Vars "mig_name"}}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "{{index $.Vars "fw_allow_iap_hc_name"}}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "{{index $.Vars "fw_allow_ilb_to_backends_name"}}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} + +resource "google_network_services_lb_route_extension" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "lb_route_extension_name"}}" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + supported_events = ["REQUEST_HEADERS"] + observability_mode = true + } + } + + labels = { + foo = "bar" + } +} + +# test instance +resource "google_compute_instance" "vm_test" { + name = "{{index $.Vars "vm_test_name"}}" + zone = "us-west1-b" + machine_type = "e2-small" + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + } + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "{{index $.Vars "callouts_instance_name"}}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false + + depends_on = [ + google_compute_instance.vm_test + ] +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "{{index $.Vars "callouts_instance_group"}}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "{{index $.Vars "callouts_hc_name"}}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "{{index $.Vars "callouts_backend_name"}}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} diff --git a/mmv1/templates/terraform/examples/network_services_multicast_group_consumer_activation_basic.tf.tmpl b/mmv1/templates/terraform/examples/network_services_multicast_group_consumer_activation_basic.tf.tmpl new file mode 100644 index 000000000000..3777fddded38 --- /dev/null +++ b/mmv1/templates/terraform/examples/network_services_multicast_group_consumer_activation_basic.tf.tmpl @@ -0,0 +1,56 @@ +resource "google_compute_network" "network" { + name = "{{index $.Vars "network_name"}}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "{{index $.Vars "domain_name"}}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "{{index $.Vars "domain_activation_name"}}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_consumer_association" "consumer_association" { + multicast_consumer_association_id = "{{index $.Vars "consumer_association_name"}}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "{{index $.Vars "internal_range_name"}}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "{{index $.Vars "group_range_name"}}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "{{index $.Vars "group_range_activation_name"}}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_consumer_activation" {{$.PrimaryResourceId}} { + multicast_group_consumer_activation_id = "{{index $.Vars "group_consumer_activation_name"}}" + location = "us-central1-b" + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_consumer_association = google_network_services_multicast_consumer_association.consumer_association.id +} diff --git a/mmv1/templates/terraform/examples/network_services_multicast_group_producer_activation_basic.tf.tmpl b/mmv1/templates/terraform/examples/network_services_multicast_group_producer_activation_basic.tf.tmpl new file mode 100644 index 000000000000..dfd734505060 --- /dev/null +++ b/mmv1/templates/terraform/examples/network_services_multicast_group_producer_activation_basic.tf.tmpl @@ -0,0 +1,56 @@ +resource "google_compute_network" "network" { + name = "{{index $.Vars "network_name"}}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "{{index $.Vars "domain_name"}}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "{{index $.Vars "domain_activation_name"}}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_producer_association" "producer_association" { + multicast_producer_association_id = "{{index $.Vars "producer_association_name"}}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "{{index $.Vars "internal_range_name"}}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "{{index $.Vars "group_range_name"}}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "{{index $.Vars "group_range_activation_name"}}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_producer_activation" {{$.PrimaryResourceId}} { + multicast_group_producer_activation_id = "{{index $.Vars "group_producer_activation_name"}}" + location = "us-central1-b" + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_producer_association = google_network_services_multicast_producer_association.producer_association.id +} diff --git a/mmv1/templates/terraform/examples/organization_security_policy_association_basic.tf.tmpl b/mmv1/templates/terraform/examples/organization_security_policy_association_basic.tf.tmpl index b35d91e16fdc..b423c1fd4a8f 100644 --- a/mmv1/templates/terraform/examples/organization_security_policy_association_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/organization_security_policy_association_basic.tf.tmpl @@ -1,19 +1,16 @@ resource "google_folder" "security_policy_target" { - provider = google-beta display_name = "tf-test-secpol-%{random_suffix}" parent = "organizations/{{index $.TestEnvVars "org_id"}}" deletion_protection = false } resource "google_compute_organization_security_policy" "policy" { - provider = google-beta - display_name = "tf-test%{random_suffix}" + short_name = "tf-test%{random_suffix}" parent = google_folder.security_policy_target.name - type = "FIREWALL" + type = "CLOUD_ARMOR" } resource "google_compute_organization_security_policy_association" "{{$.PrimaryResourceId}}" { - provider = google-beta name = "tf-test%{random_suffix}" attachment_id = google_compute_organization_security_policy.{{$.PrimaryResourceId}}.parent policy_id = google_compute_organization_security_policy.{{$.PrimaryResourceId}}.id diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha_with_labels.tf.tmpl b/mmv1/templates/terraform/examples/redis_cluster_ha_with_labels.tf.tmpl new file mode 100644 index 000000000000..bc02b1ff2378 --- /dev/null +++ b/mmv1/templates/terraform/examples/redis_cluster_ha_with_labels.tf.tmpl @@ -0,0 +1,61 @@ +resource "google_redis_cluster" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "cluster_name"}}" + shard_count = 3 + labels = { + my_key = "my_val" + other_key = "other_val" + } + psc_configs { + network = google_compute_network.consumer_net.id + } + region = "us-central1" + replica_count = 1 + node_type = "REDIS_SHARED_CORE_NANO" + transit_encryption_mode = "TRANSIT_ENCRYPTION_MODE_DISABLED" + authorization_mode = "AUTH_MODE_DISABLED" + redis_configs = { + maxmemory-policy = "volatile-ttl" + } + deletion_protection_enabled = {{index $.Vars "deletion_protection_enabled"}} + + zone_distribution_config { + mode = "MULTI_ZONE" + } + maintenance_policy { + weekly_maintenance_window { + day = "MONDAY" + start_time { + hours = 1 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] +} + +resource "google_network_connectivity_service_connection_policy" "default" { + name = "{{index $.Vars "policy_name"}}" + location = "us-central1" + service_class = "gcp-memorystore-redis" + description = "my basic service connection policy" + network = google_compute_network.consumer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.consumer_subnet.id] + } +} + +resource "google_compute_subnetwork" "consumer_subnet" { + name = "{{index $.Vars "subnet_name"}}" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.consumer_net.id +} + +resource "google_compute_network" "consumer_net" { + name = "{{index $.Vars "network_name"}}" + auto_create_subnetworks = false +} diff --git a/mmv1/templates/terraform/examples/scc_v2_project_notification_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/scc_v2_project_notification_config_basic.tf.tmpl index 97a92ed39c6a..c9faf84731e6 100644 --- a/mmv1/templates/terraform/examples/scc_v2_project_notification_config_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/scc_v2_project_notification_config_basic.tf.tmpl @@ -5,7 +5,6 @@ resource "google_pubsub_topic" "scc_v2_project_notification" { resource "google_scc_v2_project_notification_config" "{{$.PrimaryResourceId}}" { config_id = "{{index $.Vars "config_id"}}" project = "{{index $.TestEnvVars "project"}}" - location = "global" description = "My custom Cloud Security Command Center Finding Notification Configuration" pubsub_topic = google_pubsub_topic.scc_v2_project_notification.id diff --git a/mmv1/templates/terraform/examples/snapshot_basic_2.tf.tmpl b/mmv1/templates/terraform/examples/snapshot_basic_2.tf.tmpl new file mode 100644 index 000000000000..8b891dea1934 --- /dev/null +++ b/mmv1/templates/terraform/examples/snapshot_basic_2.tf.tmpl @@ -0,0 +1,26 @@ +resource "google_compute_snapshot" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "snapshot_name"}}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + labels = { + my_label = "value" + } + storage_locations = ["us-central1"] + guest_flush = true +} + +data "google_compute_image" "debian" { + provider = google-beta + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + provider = google-beta + name = "{{index $.Vars "disk_name"}}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} diff --git a/mmv1/templates/terraform/examples/storage_insights_dataset_config_excludes.tf.tmpl b/mmv1/templates/terraform/examples/storage_insights_dataset_config_excludes.tf.tmpl index 18777c4b5a21..29585cb0d9e5 100644 --- a/mmv1/templates/terraform/examples/storage_insights_dataset_config_excludes.tf.tmpl +++ b/mmv1/templates/terraform/examples/storage_insights_dataset_config_excludes.tf.tmpl @@ -2,6 +2,7 @@ resource "google_storage_insights_dataset_config" "{{$.PrimaryResourceId}}" { location = "us-central1" dataset_config_id = "{{$.Vars.dataset_config_id}}" retention_period_days = 1 + activity_data_retention_period_days = 2 organization_scope = true identity { type = "IDENTITY_TYPE_PER_PROJECT" diff --git a/mmv1/templates/terraform/flatten_property_method.go.tmpl b/mmv1/templates/terraform/flatten_property_method.go.tmpl index cd7140f9ca4d..0031cbaf35a1 100644 --- a/mmv1/templates/terraform/flatten_property_method.go.tmpl +++ b/mmv1/templates/terraform/flatten_property_method.go.tmpl @@ -19,12 +19,12 @@ {{- else -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { {{- if or (and $.IgnoreRead (not $.ResourceMetadata.IsTgcCompiler)) $.ClientSide }} - return d.Get("{{ $.TerraformLineage }}") + return d.Get("{{ join $.Lineage ".0." }}") {{- else if $.IsA "NestedObject" }} if v == nil { return nil } - {{- if not $.AllowEmptyObject }} + {{- if and (not $.AllowEmptyObject) (not $.ResourceMetadata.IsTgcCompiler) }} original := v.(map[string]interface{}) if len(original) == 0 { return nil @@ -109,7 +109,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso } transformed := make(map[string]interface{}) - if l, ok := d.GetOkExists("{{ $.TerraformLineage }}"); ok { + if l, ok := d.GetOkExists("{{ join $.Lineage ".0." }}"); ok { for k := range l.(map[string]interface{}) { transformed[k] = v.(map[string]interface{})[k] } @@ -165,6 +165,15 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso return false } return v + {{- else if and ($.ResourceMetadata.IsTgcCompiler) ($.IsA "String") ($.Required) }} + if v == nil { + return "unknown" + } + transformed := v.(string) + if transformed == "" { + return "unknown" + } + return v {{- else }} return v {{- end }} diff --git a/mmv1/templates/terraform/iam/iap_web_appengine_state_id_funcs.go.tmpl b/mmv1/templates/terraform/iam/iap_web_appengine_state_id_funcs.go.tmpl new file mode 100644 index 000000000000..4a075dfe3ce0 --- /dev/null +++ b/mmv1/templates/terraform/iam/iap_web_appengine_state_id_funcs.go.tmpl @@ -0,0 +1,58 @@ +func generateIapWebTypeAppEngineIAMPolicyStateID(iamResourceAddr string) func(*terraform.State) (string, error) { + return func(state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + project := tpgresource.GetResourceNameFromSelfLink(rawState["project"]) + appId := appIdShortName(rawState["app_id"]) + return acctest.BuildIAMImportId(fmt.Sprintf("projects/%s/iap_web/appengine-%s", project, appId), "", "", rawState["condition.0.title"]), nil + } +} + +func generateIapWebTypeAppEngineIAMBindingStateID(iamResourceAddr string) func(*terraform.State) (string, error) { + return func(state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + project := tpgresource.GetResourceNameFromSelfLink(rawState["project"]) + appId := appIdShortName(rawState["app_id"]) + return acctest.BuildIAMImportId(fmt.Sprintf("projects/%s/iap_web/appengine-%s", project, appId), rawState["role"], "", rawState["condition.0.title"]), nil + } +} + +func generateIapWebTypeAppEngineIAMMemberStateID(iamResourceAddr string) func(*terraform.State) (string, error) { + return func(state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + project := tpgresource.GetResourceNameFromSelfLink(rawState["project"]) + appId := appIdShortName(rawState["app_id"]) + return acctest.BuildIAMImportId(fmt.Sprintf("projects/%s/iap_web/appengine-%s", project, appId), rawState["role"], rawState["member"], rawState["condition.0.title"]), nil + } +} + +func appIdShortName(appId string) string { + appIdParts := strings.SplitN(appId, "appengine-", 2) + if len(appIdParts) == 2 { + return appIdParts[1] + } + return appId +} diff --git a/mmv1/templates/terraform/iam/sourcerepo_state_id_funcs.go.tmpl b/mmv1/templates/terraform/iam/sourcerepo_state_id_funcs.go.tmpl new file mode 100644 index 000000000000..5e63fc8e0baa --- /dev/null +++ b/mmv1/templates/terraform/iam/sourcerepo_state_id_funcs.go.tmpl @@ -0,0 +1,58 @@ +func generateSourceRepoRepositoryIAMPolicyStateID(iamResourceAddr string) func(*terraform.State) (string, error) { + return func(state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + project := tpgresource.GetResourceNameFromSelfLink(rawState["project"]) + repository := repositoryShortName(rawState["repository"]) + return acctest.BuildIAMImportId(fmt.Sprintf("projects/%s/repos/%s", project, repository), "", "", rawState["condition.0.title"]), nil + } +} + +func generateSourceRepoRepositoryIAMBindingStateID(iamResourceAddr string) func(*terraform.State) (string, error) { + return func(state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + project := tpgresource.GetResourceNameFromSelfLink(rawState["project"]) + repository := repositoryShortName(rawState["repository"]) + return acctest.BuildIAMImportId(fmt.Sprintf("projects/%s/repos/%s", project, repository), rawState["role"], "", rawState["condition.0.title"]), nil + } +} + +func generateSourceRepoRepositoryIAMMemberStateID(iamResourceAddr string) func(*terraform.State) (string, error) { + return func(state *terraform.State) (string, error) { + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[iamResourceAddr]; ok { + rawState = v.Primary.Attributes + } + } + } + fmt.Printf("raw state %s\n", rawState) + project := tpgresource.GetResourceNameFromSelfLink(rawState["project"]) + repository := repositoryShortName(rawState["repository"]) + return acctest.BuildIAMImportId(fmt.Sprintf("projects/%s/repos/%s", project, repository), rawState["role"], rawState["member"], rawState["condition.0.title"]), nil + } +} + +func repositoryShortName(repository string) string { + repositoryParts := strings.SplitN(repository, "/repos/", 2) + if len(repositoryParts) == 2 { + return repositoryParts[1] + } + return repository +} diff --git a/mmv1/templates/terraform/metadata.yaml.tmpl b/mmv1/templates/terraform/metadata.yaml.tmpl deleted file mode 100644 index 7fd7cd41dfac..000000000000 --- a/mmv1/templates/terraform/metadata.yaml.tmpl +++ /dev/null @@ -1,36 +0,0 @@ -resource: '{{ $.TerraformName }}' -generation_type: 'mmv1' -source_file: '{{ $.SourceYamlFile }}' -api_service_name: '{{ $.ProductMetadata.ServiceName }}' -api_version: '{{ or $.ProductMetadata.ServiceVersion $.ServiceVersion }}' -api_resource_type_kind: '{{ or $.ApiResourceTypeKind $.Name }}' -{{- if $.CAIFormatOverride }} -cai_asset_name_format: '{{ $.CAIFormatOverride }}' -{{- end }} -{{- if gt (len $.ApiVariantPatterns) 0 }} -api_variant_patterns: - {{- range $v := $.ApiVariantPatterns }} - - '{{ $v }}' - {{- end }} -{{- end }} -{{- if $.AutogenStatus }} -autogen_status: true -{{- end }} -fields: -{{- range $p := $.LeafProperties }} - {{- if $p.ProviderOnly }} - - field: '{{ $p.MetadataLineage }}' - provider_only: true - {{- else}} - - api_field: '{{ $p.MetadataApiLineage }}' - {{- if ne $p.MetadataLineage $p.MetadataDefaultLineage }} - field: '{{ $p.MetadataLineage }}' - {{- end}} - {{- if $p.IsJsonField }} - json: true - {{- end }} - {{- end}} -{{- end }} -{{- if $.HasSelfLink }} - - api_field: 'selfLink' -{{- end }} diff --git a/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl index 236c0bcb8fee..400611e658f3 100644 --- a/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl @@ -4,7 +4,7 @@ {{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $np -}} {{- end -}} {{- else if and $.NestedProperties (not $.WriteOnlyProperties) }} -The `{{ underscore $.Name }}` block {{ if $.Output }}contains{{ else }}supports{{ end }}: +The `{{ underscore $.Name }}` block {{ if $.Output }}contains{{ else }}supports{{ end }}: {{ "" }} {{- if $.IsA "Map" }} * `{{ underscore $.KeyName }}` - (Required) The identifier for this object. Format specified above. diff --git a/mmv1/templates/terraform/nested_property_write_only_documentation.html.markdown.tmpl b/mmv1/templates/terraform/nested_property_write_only_documentation.html.markdown.tmpl index 91a279a03dfa..a02f6ebedf58 100644 --- a/mmv1/templates/terraform/nested_property_write_only_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/nested_property_write_only_documentation.html.markdown.tmpl @@ -1,5 +1,5 @@ {{- if $.WriteOnlyProperties }} -The `{{ underscore $.Name }}` block {{ if $.Output }}contains{{ else }}supports{{ end }}: +The `{{ underscore $.Name }}` block {{ if $.Output }}contains{{ else }}supports{{ end }}: {{ "" }} {{- if $.NestedProperties }} {{- range $np := $.NestedProperties }} diff --git a/mmv1/templates/terraform/pre_create/dataplex_entry.go.tmpl b/mmv1/templates/terraform/pre_create/dataplex_entry.go.tmpl new file mode 100644 index 000000000000..66e67a1fb61a --- /dev/null +++ b/mmv1/templates/terraform/pre_create/dataplex_entry.go.tmpl @@ -0,0 +1,6 @@ +if v, ok := d.GetOkExists("entry_group_id"); ok && strings.HasPrefix(v.(string), "@") { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"allow_missing": "false", + "updateMask": "aspects"}) +} else { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"allow_missing": "true"}) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/dataplex_entry.go.tmpl b/mmv1/templates/terraform/pre_delete/dataplex_entry.go.tmpl new file mode 100644 index 000000000000..2cceaf0c47bb --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/dataplex_entry.go.tmpl @@ -0,0 +1,5 @@ +if v, ok := d.GetOkExists("entry_group_id"); ok && strings.HasPrefix(v.(string), "@") { + // Ingestion based resources need to be removed from terraform state but cannot be deleted in Dataplex. + d.SetId("") + return nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/detach_disk.tmpl b/mmv1/templates/terraform/pre_delete/detach_disk.tmpl index 84a9a6eebf86..fdf262f5da92 100644 --- a/mmv1/templates/terraform/pre_delete/detach_disk.tmpl +++ b/mmv1/templates/terraform/pre_delete/detach_disk.tmpl @@ -112,7 +112,8 @@ if v, ok := readRes["users"].([]interface{}); ok { if disks, ok := instanceRes["disks"].([]interface{}); ok { for _, diskInterface := range disks { disk := diskInterface.(map[string]interface{}) - if tpgresource.CompareSelfLinkOrResourceName("", disk["source"].(string), self, nil) { + // source is nil for scratch disks + if source := disk["source"]; source != nil && tpgresource.CompareSelfLinkOrResourceName("", source.(string), self, nil) { detachCalls = append(detachCalls, detachArgs{ project: instanceProject, zone: tpgresource.GetResourceNameFromSelfLink(instanceRes["zone"].(string)), diff --git a/mmv1/templates/terraform/pre_update/dataplex_entry.go.tmpl b/mmv1/templates/terraform/pre_update/dataplex_entry.go.tmpl index ff2b3ff90915..364d6a70872e 100644 --- a/mmv1/templates/terraform/pre_update/dataplex_entry.go.tmpl +++ b/mmv1/templates/terraform/pre_update/dataplex_entry.go.tmpl @@ -37,3 +37,5 @@ if d.HasChange("aspects") { return err } } + +url, err = transport_tpg.AddQueryParams(url, map[string]string{"allow_missing": "false"}) \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/network_endpoints.go.tmpl b/mmv1/templates/terraform/pre_update/network_endpoints.go.tmpl index 72ed12375f8a..52683ca98446 100644 --- a/mmv1/templates/terraform/pre_update/network_endpoints.go.tmpl +++ b/mmv1/templates/terraform/pre_update/network_endpoints.go.tmpl @@ -57,6 +57,10 @@ if err != nil { return err } +if len(lastPage) == 0 { + return resourceComputeNetworkEndpointsRead(d, meta) +} + obj = map[string]interface{}{ "networkEndpoints": lastPage, -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/pre_update/storage_insights_dataset_config.go.tmpl b/mmv1/templates/terraform/pre_update/storage_insights_dataset_config.go.tmpl index 456ef222d86d..7c60200eb99b 100644 --- a/mmv1/templates/terraform/pre_update/storage_insights_dataset_config.go.tmpl +++ b/mmv1/templates/terraform/pre_update/storage_insights_dataset_config.go.tmpl @@ -8,6 +8,10 @@ if d.HasChange("retention_period_days") { updateMask = append(updateMask, "retentionPeriodDays") } +if d.HasChange("activity_data_retention_period_days") { + updateMask = append(updateMask, "activityDataRetentionPeriodDays") +} + if d.HasChange("description") { updateMask = append(updateMask, "description") } diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index 67828315b33f..266ba6812c43 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -24,7 +24,7 @@ ~> **Note:** One of `{{ slice (underscore $.Name) 0 (sub (len (underscore $.Name)) 3) }}` or `{{ underscore $.Name }}` can only be set. {{- end }} {{- if and (not $.FlattenObject) $.NestedProperties }} - Structure is [documented below](#nested_{{ $.LineageAsSnakeCase}}). + Structure is [documented below](#nested_{{ join $.Lineage "_" }}). {{- end }} {{- if $.DeprecationMessage }} diff --git a/mmv1/templates/tgc_next/custom_flatten/cloudtasks_queue_max_retry_duration.go.tmpl b/mmv1/templates/tgc_next/custom_flatten/cloudtasks_queue_max_retry_duration.go.tmpl new file mode 100644 index 000000000000..6a766b4afe07 --- /dev/null +++ b/mmv1/templates/tgc_next/custom_flatten/cloudtasks_queue_max_retry_duration.go.tmpl @@ -0,0 +1,6 @@ +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || v == "" { + return "0s" + } + return v +} \ No newline at end of file diff --git a/mmv1/templates/tgc_next/custom_flatten/monitoring_alert_policy_cross_series_reducer.go.tmpl b/mmv1/templates/tgc_next/custom_flatten/monitoring_alert_policy_cross_series_reducer.go.tmpl new file mode 100644 index 000000000000..29ba3cd61b28 --- /dev/null +++ b/mmv1/templates/tgc_next/custom_flatten/monitoring_alert_policy_cross_series_reducer.go.tmpl @@ -0,0 +1,6 @@ +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || v == "" { + return "REDUCE_NONE" + } + return v +} diff --git a/mmv1/templates/tgc_next/custom_flatten/secret_version_secret_data.go.tmpl b/mmv1/templates/tgc_next/custom_flatten/secret_version_secret_data.go.tmpl index 3a07e0fe1e8d..867c0eda2e04 100644 --- a/mmv1/templates/tgc_next/custom_flatten/secret_version_secret_data.go.tmpl +++ b/mmv1/templates/tgc_next/custom_flatten/secret_version_secret_data.go.tmpl @@ -2,7 +2,9 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso if v == nil { // payload is missing in CAI asset, but it is required in Terraform provider. transformed := map[string]interface{} { - "secret_data": "hidden", + "payload": map[string]interface{} { + "secretData": "hidden", + }, } return []interface{}{transformed} } diff --git a/mmv1/templates/tgc_next/decoders/alloydb_cluster.go.tmpl b/mmv1/templates/tgc_next/decoders/alloydb_cluster.go.tmpl deleted file mode 100644 index b7391ceaf51a..000000000000 --- a/mmv1/templates/tgc_next/decoders/alloydb_cluster.go.tmpl +++ /dev/null @@ -1,8 +0,0 @@ -// password is missing in CAI asset, but password is required in Terraform -if res["initialUser"] == nil && res["clusterType"] == "PRIMARY" { - res["initialUser"] = map[string]interface{}{ - "password": "hidden", - } -} - -return res, hclData, nil \ No newline at end of file diff --git a/mmv1/templates/tgc_next/decoders/compute_image.go.tmpl b/mmv1/templates/tgc_next/decoders/compute_image.go.tmpl deleted file mode 100644 index 5c2548cb77cb..000000000000 --- a/mmv1/templates/tgc_next/decoders/compute_image.go.tmpl +++ /dev/null @@ -1,6 +0,0 @@ -// These fields are missing in CAI assets, but requried in Terraform -if rawDisk, ok := res["rawDisk"].(map[string]interface{}); ok { - rawDisk["source"] = "unknown" -} - -return res, hclData, nil \ No newline at end of file diff --git a/mmv1/templates/tgc_next/decoders/compute_route.tmpl b/mmv1/templates/tgc_next/decoders/compute_route.tmpl new file mode 100644 index 000000000000..78ca9c9260bb --- /dev/null +++ b/mmv1/templates/tgc_next/decoders/compute_route.tmpl @@ -0,0 +1,17 @@ +if v, ok := res["nextHopInstance"]; ok { + val, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, meta.(*transport_tpg.Config), true) + if err != nil { + return nil, nil, err + } + hclData["next_hop_instance_zone"] = val.Zone + res["nextHopInstance"] = val.RelativeLink() +} + +// next_hop_ip is Computed + Optional and conflict with next_hop_ilb +if _, ok := res["nextHopIp"]; ok { + if _, ok := res["nextHopIlb"]; ok { + delete(res, "nextHopIp") + } +} + +return res, hclData, nil diff --git a/mmv1/templates/tgc_next/decoders/iam_workload_identity_pool_provider.go.tmpl b/mmv1/templates/tgc_next/decoders/iam_workload_identity_pool_provider.go.tmpl index 069d423c451d..d47fd785979c 100644 --- a/mmv1/templates/tgc_next/decoders/iam_workload_identity_pool_provider.go.tmpl +++ b/mmv1/templates/tgc_next/decoders/iam_workload_identity_pool_provider.go.tmpl @@ -1,11 +1,8 @@ // These fields are missing in CAI assets, but requried in Terraform -if saml, ok := res["saml"].(map[string]interface{}); ok { - saml["idpMetadataXml"] = "unknown" -} - if x509, ok := res["x509"].(map[string]interface{}); ok { if trustStore, ok := x509["trustStore"].(map[string]interface{}); ok { if trustAnchors, ok := trustStore["trustAnchors"].([]interface{}); ok { + // trust_anchors is missing in CAI assets, but requried in Terraform for _, trustAnchorRaw := range trustAnchors { if trustAnchor, ok := trustAnchorRaw.(map[string]interface{}); ok { trustAnchor["pemCertificate"] = "hidden" diff --git a/mmv1/templates/tgc_next/decoders/privateca_capool.go.tmpl b/mmv1/templates/tgc_next/decoders/privateca_capool.go.tmpl new file mode 100644 index 000000000000..d16a557e9e9e --- /dev/null +++ b/mmv1/templates/tgc_next/decoders/privateca_capool.go.tmpl @@ -0,0 +1,20 @@ + if ip, ok := res["issuancePolicy"]; ok && ip != nil { + if ipMap, ok := ip.(map[string]interface{}); ok { + if bv, ok := ipMap["baselineValues"]; ok && bv != nil { + if bvMap, ok := bv.(map[string]interface{}); ok { + if ae, ok := bvMap["additionalExtensions"]; ok && ae != nil { + if aeList, ok := ae.([]interface{}); ok { + for _, item := range aeList { + if itemMap, ok := item.(map[string]interface{}); ok { + if _, ok := itemMap["critical"]; !ok { + itemMap["critical"] = false + } + } + } + } + } + } + } + } + } + return res, hclData, nil diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index 4660649d59ce..7481be1fc8bf 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -91,6 +91,11 @@ var ServicesListBeta = mapOf( "displayName" to "Biglake", "path" to "./google-beta/services/biglake" ), + "biglakeiceberg" to mapOf( + "name" to "biglakeiceberg", + "displayName" to "BiglakeIceberg", + "path" to "./google-beta/services/biglakeiceberg" + ), "bigquery" to mapOf( "name" to "bigquery", "displayName" to "Bigquery", diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index 81279eb12db0..40e8a74fbc21 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -91,6 +91,11 @@ var ServicesListGa = mapOf( "displayName" to "Biglake", "path" to "./google/services/biglake" ), + "biglakeiceberg" to mapOf( + "name" to "biglakeiceberg", + "displayName" to "BiglakeIceberg", + "path" to "./google/services/biglakeiceberg" + ), "bigquery" to mapOf( "name" to "bigquery", "displayName" to "Bigquery", diff --git a/mmv1/third_party/terraform/acctest/resource_test_utils.go b/mmv1/third_party/terraform/acctest/resource_test_utils.go index 76d14ad32359..3fb077176c18 100644 --- a/mmv1/third_party/terraform/acctest/resource_test_utils.go +++ b/mmv1/third_party/terraform/acctest/resource_test_utils.go @@ -111,6 +111,21 @@ func TestCheckAttributeValuesEqual(i *string, j *string) resource.TestCheckFunc } } +// ConditionTitleIfPresent returns empty string if condition is not preset and " {condition.0.title}" if it is. +func BuildIAMImportId(name, role, member, condition string) string { + ret := name + if role != "" { + ret += " " + role + } + if member != "" { + ret += " " + member + } + if condition != "" { + ret += " " + condition + } + return ret +} + // testStringValue returns string values from string pointers, handling nil pointers. func testStringValue(sPtr *string) string { if sPtr == nil { diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index c76d94ff0608..cd9bde1d5906 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -29,16 +29,16 @@ require ( github.com/hashicorp/terraform-plugin-testing v1.13.3 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 - github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.10.0 + github.com/sirupsen/logrus v1.8.3 + github.com/stretchr/testify v1.11.1 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/net v0.46.0 - golang.org/x/oauth2 v0.33.0 - google.golang.org/api v0.256.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 - google.golang.org/grpc v1.76.0 - google.golang.org/protobuf v1.36.10 + golang.org/x/net v0.48.0 + golang.org/x/oauth2 v0.34.0 + google.golang.org/api v0.258.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v2 v2.4.0 ) @@ -56,13 +56,13 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect - github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect + github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect - github.com/go-jose/go-jose/v4 v4.1.2 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/glog v1.2.5 // indirect @@ -95,29 +95,28 @@ require ( github.com/oklog/run v1.1.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/zclconf/go-cty v1.17.0 // indirect - github.com/zeebo/errs v1.4.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.37.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - golang.org/x/crypto v0.43.0 // indirect - golang.org/x/mod v0.28.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/text v0.30.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.37.0 // indirect + golang.org/x/tools v0.39.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index a3f01a903571..2aee79af2fd8 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -46,8 +46,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= -github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= @@ -62,10 +62,10 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= -github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= -github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= -github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -86,8 +86,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= -github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= -github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -246,19 +246,20 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.3 h1:DBBfY8eMYazKEJHb3JKpSPfpgd2mBCoNFlQx6C5fftU= +github.com/sirupsen/logrus v1.8.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -275,24 +276,22 @@ github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0 github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -302,8 +301,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -313,8 +312,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -325,24 +324,23 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -352,21 +350,22 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -378,16 +377,16 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/api v0.258.0 h1:IKo1j5FBlN74fe5isA2PVozN3Y5pwNKriEgAXPOkDAc= +google.golang.org/api v0.258.0/go.mod h1:qhOMTQEZ6lUps63ZNq9jhODswwjkjYYguA7fA3TBFww= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -397,21 +396,21 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b h1:ULiyYQ0FdsJhwwZUwbaXpZF5yUE3h+RA+gxvBu37ucc= -google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:oDOGiMSXHL4sDTJvFvIB9nRQCGdLP1o/iVaqQK8zB+M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= -google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -421,6 +420,7 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.tmpl b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.tmpl index 17f2199b16a2..f498c41fcf3b 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.tmpl +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.tmpl @@ -42,6 +42,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_artifact_registry_tag": artifactregistry.DataSourceArtifactRegistryTag(), "google_artifact_registry_tags": artifactregistry.DataSourceArtifactRegistryTags(), "google_artifact_registry_version": artifactregistry.DataSourceArtifactRegistryVersion(), + "google_artifact_registry_versions": artifactregistry.DataSourceArtifactRegistryVersions(), "google_apphub_discovered_workload": apphub.DataSourceApphubDiscoveredWorkload(), "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), "google_apphub_application": apphub.DataSourceGoogleApphubApplication(), @@ -79,6 +80,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_cloud_identity_group_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupMemberships(), "google_cloud_identity_group_transitive_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupTransitiveMemberships(), "google_cloud_identity_group_lookup": cloudidentity.DataSourceGoogleCloudIdentityGroupLookup(), + "google_cloud_identity_policies": cloudidentity.DataSourceGoogleCloudIdentityPolicies(), "google_cloud_identity_policy": cloudidentity.DataSourceGoogleCloudIdentityPolicy(), "google_cloud_quotas_quota_info": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfo(), "google_cloud_quotas_quota_infos": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfos(), @@ -127,6 +129,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_compute_region_instance_group_manager": compute.DataSourceGoogleComputeRegionInstanceGroupManager(), "google_compute_region_instance_template": compute.DataSourceGoogleComputeRegionInstanceTemplate(), "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), + "google_compute_region_security_policy": compute.DataSourceGoogleComputeRegionSecurityPolicy(), "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), "google_compute_region_ssl_policy": compute.DataSourceGoogleRegionComputeSslPolicy(), "google_compute_reservation": compute.DataSourceGoogleComputeReservation(), @@ -140,6 +143,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), + "google_compute_storage_pool": compute.DataSourceGoogleComputeStoragePool(), "google_compute_storage_pool_types": compute.DataSourceGoogleComputeStoragePoolTypes(), "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), "google_compute_subnetworks": compute.DataSourceGoogleComputeSubnetworks(), diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_test.go b/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_test.go index 1c7aee210b8f..c663467fba85 100644 --- a/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_test.go +++ b/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_test.go @@ -20,6 +20,10 @@ import ( func testAccAccessContextManagerGcpUserAccessBinding_basicTest(t *testing.T) { t.Parallel() + // Skip in VCR until the test issue is resolved + // https://github.com/hashicorp/terraform-provider-google/issues/24833 + acctest.SkipIfVcr(t) + context := map[string]interface{}{ "org_id": envvar.GetTestOrgFromEnv(t), "org_domain": envvar.GetTestOrgDomainFromEnv(t), diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go index 450593138658..d21675805288 100644 --- a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go @@ -74,6 +74,27 @@ data "google_compute_network" "default" { `, context) } +func testAccAlloydbInstance_deleteInstance(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + } + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } + + deletion_protection = false +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + func testAccAlloydbInstance_update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_alloydb_instance" "default" { @@ -666,7 +687,7 @@ resource "google_alloydb_instance" "default" { client_connection_config { require_connectors = %{require_connectors} - } + } } resource "google_alloydb_cluster" "default" { @@ -703,7 +724,7 @@ resource "google_alloydb_instance" "default" { ssl_config { ssl_mode = "%{ssl_mode}" } - } + } } resource "google_alloydb_cluster" "default" { @@ -837,7 +858,7 @@ resource "google_alloydb_instance" "default" { enable_public_ip = %{enable_public_ip} enable_outbound_public_ip = %{enable_outbound_public_ip} %{authorized_external_networks} - } + } } resource "google_alloydb_cluster" "default" { @@ -877,7 +898,7 @@ resource "google_alloydb_instance" "default" { authorized_external_networks { cidr_range = "%{cidr_range}" } - } + } } resource "google_alloydb_cluster" "default" { @@ -1217,3 +1238,472 @@ data "google_compute_global_address" "private_ip_alloc" { } `, context) } + +// This test passes if an instance is able to do the following: +// - Be created with only managed connection pooling enabled +// - Be created with managed connection pooling enabled with some flags set +// - Be updated to disable managed connection pooling +// - Be updated to only enable managed connection pooling +// - Be updated to enable managed connection pooling and its flags +// - Be update to only update a few flags +func TestAccAlloydbInstance_connectionPoolConfig(t *testing.T) { + t.Parallel() + + suffix := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-connection-pool-config") + + context := map[string]interface{}{ + "random_suffix": suffix, + "network_name": networkName, + "enabled": true, + "pool_mode": "transaction", + "max_pool_size": 1000, + "min_pool_size": 50, + "max_client_connections": 250, + "client_connection_idle_timeout": 60, + "server_connection_idle_timeout": 60, + "query_wait_timeout": 30, + "max_prepared_statements": 10, + "ignore_startup_parameters": "timezone,lc_monetary,icu_validation_level", + "server_lifetime": 600, + "stats_users": "foo,bar", + } + context2 := map[string]interface{}{ + "random_suffix": suffix, + "network_name": networkName, + "enabled": false, + } + context3 := map[string]interface{}{ + "random_suffix": suffix, + "network_name": networkName, + "enabled": true, + } + context4 := map[string]interface{}{ + "random_suffix": suffix, + "network_name": networkName, + "enabled": true, + "pool_mode": "session", + "min_pool_size": 100, + "query_wait_timeout": 120, + "ignore_startup_parameters": "timezone,icu_validation_level,client_encoding,datestyle,intervalstyle", + "stats_users": "bar,baz,qux", + } + context5 := map[string]interface{}{ + "random_suffix": suffix, + "network_name": networkName, + "enabled": true, + "pool_mode": "session", + "max_pool_size": 1000, + "min_pool_size": 100, + "max_client_connections": 250, + "client_connection_idle_timeout": 60, + "server_connection_idle_timeout": 60, + "query_wait_timeout": 120, + "max_prepared_statements": 10, + "ignore_startup_parameters": "timezone,icu_validation_level,client_encoding,datestyle,intervalstyle", + "server_lifetime": 600, + "stats_users": "bar,baz,qux", + } + context6 := map[string]interface{}{ + "random_suffix": suffix, + "network_name": networkName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_connectionPoolConfigNoFlags(context3), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "true"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_deleteInstance(context6), + Destroy: true, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigWithAllFlags(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "true"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.pool_mode", "transaction"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_pool_size", "1000"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.min_pool_size", "50"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_client_connections", "250"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.client_connection_idle_timeout", "60"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.server_connection_idle_timeout", "60"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.query_wait_timeout", "30"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_prepared_statements", "10"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.ignore_startup_parameters", "timezone,lc_monetary,icu_validation_level"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.server_lifetime", "600"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.stats_users", "foo,bar"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigNoFlags(context2), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "false"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigNoFlags(context3), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "true"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigWithAllFlags(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "true"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.pool_mode", "transaction"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_pool_size", "1000"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.min_pool_size", "50"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_client_connections", "250"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.client_connection_idle_timeout", "60"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.server_connection_idle_timeout", "60"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.query_wait_timeout", "30"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_prepared_statements", "10"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.ignore_startup_parameters", "timezone,lc_monetary,icu_validation_level"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.server_lifetime", "600"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.stats_users", "foo,bar"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigWithAllFlags(context5), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "true"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.pool_mode", "session"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_pool_size", "1000"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.min_pool_size", "100"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_client_connections", "250"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.client_connection_idle_timeout", "60"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.server_connection_idle_timeout", "60"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.query_wait_timeout", "120"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.max_prepared_statements", "10"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.ignore_startup_parameters", "timezone,icu_validation_level,client_encoding,datestyle,intervalstyle"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.server_lifetime", "600"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.stats_users", "bar,baz,qux"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigNoFlags(context2), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "false"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_connectionPoolConfigWithSomeFlags(context4), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.enabled", "true"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.pool_mode", "session"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.min_pool_size", "100"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.query_wait_timeout", "120"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.ignore_startup_parameters", "timezone,icu_validation_level,client_encoding,datestyle,intervalstyle"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "connection_pool_config.0.flags.stats_users", "bar,baz,qux"), + ), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + }, + }) +} + +func testAccAlloydbInstance_connectionPoolConfigNoFlags(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + connection_pool_config { + enabled = %{enabled} + } +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + } + + deletion_protection = false +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func testAccAlloydbInstance_connectionPoolConfigWithAllFlags(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + connection_pool_config { + enabled = %{enabled} + flags = { + "pool_mode" = "%{pool_mode}" + "max_pool_size" = %{max_pool_size} + "min_pool_size" = %{min_pool_size} + "max_client_connections" = %{max_client_connections} + "client_connection_idle_timeout" = %{client_connection_idle_timeout} + "server_connection_idle_timeout" = %{server_connection_idle_timeout} + "query_wait_timeout" = %{query_wait_timeout} + "max_prepared_statements" = %{max_prepared_statements} + "ignore_startup_parameters" = "%{ignore_startup_parameters}" + "server_lifetime" = %{server_lifetime} + "stats_users" = "%{stats_users}" + } + } +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + } + + deletion_protection = false +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func testAccAlloydbInstance_connectionPoolConfigWithSomeFlags(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + connection_pool_config { + enabled = %{enabled} + flags = { + "pool_mode" = "%{pool_mode}" + "min_pool_size" = %{min_pool_size} + "query_wait_timeout" = %{query_wait_timeout} + "ignore_startup_parameters" = "%{ignore_startup_parameters}" + "stats_users" = "%{stats_users}" + } + } +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + } + + deletion_protection = false +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func TestAccAlloydbInstance_ObservabilityConfig_Update(t *testing.T) { + t.Parallel() + random_suffix := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydb-1") + + // 1. Initial State: Everything Enabled + contextEnableAll := map[string]interface{}{ + "random_suffix": random_suffix, + "network_name": networkName, + "enabled": true, + "preserve_comments": true, + "track_wait_events": true, + "max_query_string_length": 1024, + "record_application_tags": true, + "query_plans_per_minute": 10, + "track_active_queries": true, + "assistive_experiences_enabled": false, + } + + contextDisable := map[string]interface{}{ + "random_suffix": random_suffix, + "network_name": networkName, + } + + // 3. Re-Enable Main Toggle, but Disable Sub-features (Test Case 2) + contextEnabledButSubFeaturesDisabled := map[string]interface{}{ + "random_suffix": random_suffix, + "network_name": networkName, + "enabled": true, + "preserve_comments": false, + "track_wait_events": false, + "max_query_string_length": 2048, + "record_application_tags": false, + "query_plans_per_minute": 5, + "track_active_queries": false, + "assistive_experiences_enabled": false, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_ObservabilityConfig(contextEnableAll), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.enabled", "true"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.max_query_string_length", "1024"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.track_wait_events", "true"), + ), + }, + { + Config: testAccAlloydbInstance_ObservabilityConfig_Disabled(contextDisable), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.enabled", "false"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.max_query_string_length", "10240"), // Disabled default value + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.query_plans_per_minute", "20"), // default value + ), + }, + // Step 3: Mark enabled = true and turn all the other booleans to false + { + Config: testAccAlloydbInstance_ObservabilityConfig(contextEnabledButSubFeaturesDisabled), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.enabled", "true"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.preserve_comments", "false"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.track_wait_events", "false"), + resource.TestCheckResourceAttr("google_alloydb_instance.default", "observability_config.0.max_query_string_length", "2048"), + ), + }, + }, + }) +} + +func testAccAlloydbInstance_ObservabilityConfig_Disabled(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + machine_config { + cpu_count = 2 + } + observability_config { + enabled = false + } +} +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + } + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } + deletion_protection = false +} +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} +func testAccAlloydbInstance_ObservabilityConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + machine_config { + cpu_count = 2 + } + observability_config { + enabled = %{enabled} + preserve_comments = %{preserve_comments} + track_wait_events = %{track_wait_events} + max_query_string_length = %{max_query_string_length} + record_application_tags = %{record_application_tags} + query_plans_per_minute = %{query_plans_per_minute} + track_active_queries = %{track_active_queries} + assistive_experiences_enabled = %{assistive_experiences_enabled} + } +} +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + } + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } + deletion_protection = false +} +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/apigee/resource_apigee_api_product_update_test.go b/mmv1/third_party/terraform/services/apigee/resource_apigee_api_product_update_test.go index fb1cc182568d..e5d3748c3560 100644 --- a/mmv1/third_party/terraform/services/apigee/resource_apigee_api_product_update_test.go +++ b/mmv1/third_party/terraform/services/apigee/resource_apigee_api_product_update_test.go @@ -118,6 +118,10 @@ resource "google_apigee_instance" "apigee_instance" { org_id = google_apigee_organization.apigee_org.id peering_cidr_range = "SLASH_22" } +resource "google_apigee_environment" "env_dev" { + name = "dev" + org_id = google_apigee_organization.apigee_org.id +} resource "google_apigee_api_product" "apigee_api_product" { org_id = google_apigee_organization.apigee_org.id name = "tf-test%{random_suffix}" @@ -132,7 +136,7 @@ resource "google_apigee_api_product" "apigee_api_product" { quota_time_unit = "day" quota_counter_scope = "PROXY" - environments = ["dev", "hom"] + environments = ["dev"] scopes = [ "read:weather", "write:reports" @@ -274,7 +278,8 @@ resource "google_apigee_api_product" "apigee_api_product" { } depends_on = [ - google_apigee_instance.apigee_instance + google_apigee_instance.apigee_instance, + google_apigee_environment.env_dev ] } `, context) @@ -356,6 +361,14 @@ resource "google_apigee_developer" "apigee_developer" { google_apigee_instance.apigee_instance ] } +resource "google_apigee_environment" "env_dev" { + name = "dev" + org_id = google_apigee_organization.apigee_org.id +} +resource "google_apigee_environment" "env_hom" { + name = "hom" + org_id = google_apigee_organization.apigee_org.id +} resource "google_apigee_api_product" "apigee_api_product" { org_id = google_apigee_organization.apigee_org.id name = "tf-test%{random_suffix}" @@ -370,7 +383,7 @@ resource "google_apigee_api_product" "apigee_api_product" { quota_time_unit = "day" quota_counter_scope = "PROXY" - environments = ["dev"] + environments = ["dev", "hom"] scopes = [ "read:weather" ] @@ -511,7 +524,9 @@ resource "google_apigee_api_product" "apigee_api_product" { } depends_on = [ - google_apigee_instance.apigee_instance + google_apigee_instance.apigee_instance, + google_apigee_environment.env_dev, + google_apigee_environment.env_hom ] } `, context) diff --git a/mmv1/third_party/terraform/services/apphub/resource_apphub_boundary_test.go b/mmv1/third_party/terraform/services/apphub/resource_apphub_boundary_test.go new file mode 100644 index 000000000000..0ef2f04e60f8 --- /dev/null +++ b/mmv1/third_party/terraform/services/apphub/resource_apphub_boundary_test.go @@ -0,0 +1,85 @@ +package apphub_test + +import ( + "fmt" + "log" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +var ( + _ = fmt.Sprintf + _ = log.Print + _ = strconv.Atoi + _ = strings.Trim + _ = time.Now + _ = resource.TestMain + _ = terraform.NewState + _ = envvar.TestEnvVar + _ = tpgresource.SetLabels + _ = transport_tpg.Config{} + _ = googleapi.Error{} +) + +func TestAccApphubBoundary_apphubBoundary_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "crm_node_project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccApphubBoundary_apphubBoundaryBasicExample(context), + }, + { + ResourceName: "google_apphub_boundary.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + { + Config: testAccApphubBoundary_apphubBoundaryNoCrmNode(context), + }, + { + ResourceName: "google_apphub_boundary.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func testAccApphubBoundary_apphubBoundaryBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apphub_boundary" "example" { + location = "global" + crm_node = "projects/%{crm_node_project_number}" +} +`, context) +} + +func testAccApphubBoundary_apphubBoundaryNoCrmNode(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apphub_boundary" "example" { + location = "global" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_versions.go b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_versions.go new file mode 100644 index 000000000000..0d69d2e69f36 --- /dev/null +++ b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_versions.go @@ -0,0 +1,236 @@ +package artifactregistry + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceArtifactRegistryVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceArtifactRegistryVersionsRead, + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + }, + "repository_id": { + Type: schema.TypeString, + Required: true, + }, + "package_name": { + Type: schema.TypeString, + Required: true, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + }, + "view": { + Type: schema.TypeString, + Optional: true, + Default: "BASIC", + ValidateFunc: validateViewArtifactRegistryVersions, + }, + "project": { + Type: schema.TypeString, + Optional: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "related_tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + }, + "annotations": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func dataSourceArtifactRegistryVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + basePath, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}") + if err != nil { + return fmt.Errorf("Error setting Artifact Registry base path: %s", err) + } + + resourcePath, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/packages/{{package_name}}/versions")) + if err != nil { + return fmt.Errorf("Error setting resource path: %s", err) + } + + view := d.Get("view").(string) + + urlRequest := basePath + resourcePath + + u, err := url.Parse(urlRequest) + if err != nil { + return fmt.Errorf("Error parsing URL: %s", err) + } + + q := u.Query() + q.Set("view", view) + + filter := "" + if v, ok := d.GetOk("filter"); ok { + filter = v.(string) + q.Set("filter", filter) + } + + u.RawQuery = q.Encode() + urlRequest = u.String() + + headers := make(http.Header) + versions := make([]map[string]interface{}, 0) + pageToken := "" + + for { + u, err := url.Parse(urlRequest) + if err != nil { + return fmt.Errorf("Error parsing URL: %s", err) + } + + q := u.Query() + if pageToken != "" { + q.Set("pageToken", pageToken) + } + u.RawQuery = q.Encode() + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: u.String(), + UserAgent: userAgent, + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error listing Artifact Registry versions: %s", err) + } + + if items, ok := res["versions"].([]interface{}); ok { + for _, item := range items { + version := item.(map[string]interface{}) + + var relatedTags []map[string]interface{} + if rawTags, ok := version["relatedTags"].([]interface{}); ok { + for _, rawTag := range rawTags { + if tagMap, ok := rawTag.(map[string]interface{}); ok { + entry := map[string]interface{}{ + "name": tagMap["name"], + "version": tagMap["version"], + } + relatedTags = append(relatedTags, entry) + } + } + } + + annotations := make(map[string]string) + if anno, ok := version["annotations"].(map[string]interface{}); ok { + for k, v := range anno { + if val, ok := v.(string); ok { + annotations[k] = val + } + } + } + + getString := func(m map[string]interface{}, key string) string { + if v, ok := m[key].(string); ok { + return v + } + return "" + } + + versions = append(versions, map[string]interface{}{ + "name": getString(version, "name"), + "description": getString(version, "description"), + "related_tags": relatedTags, + "create_time": getString(version, "createTime"), + "update_time": getString(version, "updateTime"), + "annotations": annotations, + }) + } + } + + if nextToken, ok := res["nextPageToken"].(string); ok && nextToken != "" { + pageToken = nextToken + } else { + break + } + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + if err := d.Set("versions", versions); err != nil { + return fmt.Errorf("Error setting Artifact Registry versions: %s", err) + } + + d.SetId(resourcePath) + + return nil +} + +func validateViewArtifactRegistryVersions(val interface{}, key string) ([]string, []error) { + v := val.(string) + var errs []error + + if v != "BASIC" && v != "FULL" { + errs = append(errs, fmt.Errorf("%q must be either 'BASIC' or 'FULL', got %q", key, v)) + } + + return nil, errs +} diff --git a/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_versions_test.go b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_versions_test.go new file mode 100644 index 000000000000..7a6ccebcc117 --- /dev/null +++ b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_versions_test.go @@ -0,0 +1,45 @@ +package artifactregistry_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceArtifactRegistryVersions_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceArtifactRegistryVersionsConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "project"), + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "location"), + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "repository_id"), + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "package_name"), + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "versions.0.name"), + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "versions.0.create_time"), + resource.TestCheckResourceAttrSet("data.google_artifact_registry_versions.this", "versions.0.update_time"), + ), + }, + }, + }) +} + +// Test the data source against the public AR repos +// https://console.cloud.google.com/artifacts/docker/cloudrun/us/container +// https://console.cloud.google.com/artifacts/docker/go-containerregistry/us/gcr.io +const testAccDataSourceArtifactRegistryVersionsConfig = ` +data "google_artifact_registry_versions" "this" { + project = "go-containerregistry" + location = "us" + repository_id = "gcr.io" + package_name = "gcrane" + filter = "name=\"projects/go-containerregistry/locations/us/repositories/gcr.io/packages/gcrane/versions/*:b*\"" + view = "FULL" +} +` diff --git a/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_backup_plan_test.go b/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_backup_plan_test.go index 71489048da69..95625e748722 100644 --- a/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_backup_plan_test.go +++ b/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_backup_plan_test.go @@ -77,6 +77,7 @@ resource "google_backup_dr_backup_plan" "test" { lifecycle { ignore_changes = [backup_vault] } + max_custom_on_demand_retention_days = 30 backup_rules { rule_id = "rule-1" backup_retention_days = 5 diff --git a/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_plan_test.go b/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_plan_test.go index 2b01509366a9..d77e10f19456 100644 --- a/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_plan_test.go +++ b/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_plan_test.go @@ -101,6 +101,7 @@ resource "google_backup_dr_backup_plan" "bp" { backup_plan_id = "tf-test-bp-test-%{random_suffix}" resource_type = "compute.googleapis.com/Instance" backup_vault = google_backup_dr_backup_vault.my-backup-vault.name + max_custom_on_demand_retention_days = 30 backup_rules { rule_id = "rule-1" @@ -183,6 +184,7 @@ resource "google_backup_dr_backup_plan" "bp" { backup_plan_id = "tf-test-bp-test-%{random_suffix}" resource_type = "compute.googleapis.com/Instance" backup_vault = google_backup_dr_backup_vault.my-backup-vault.name + max_custom_on_demand_retention_days = 45 backup_rules { rule_id = "rule-1" diff --git a/mmv1/third_party/terraform/services/biglakeiceberg/resource_biglake_iceberg_catalog_test.go b/mmv1/third_party/terraform/services/biglakeiceberg/resource_biglake_iceberg_catalog_test.go new file mode 100644 index 000000000000..5332f14d702a --- /dev/null +++ b/mmv1/third_party/terraform/services/biglakeiceberg/resource_biglake_iceberg_catalog_test.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package biglakeiceberg_test + +import ( + "fmt" + "log" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +var ( + _ = fmt.Sprintf + _ = log.Print + _ = strconv.Atoi + _ = strings.Trim + _ = time.Now + _ = resource.TestMain + _ = terraform.NewState + _ = envvar.TestEnvVar + _ = tpgresource.SetLabels + _ = transport_tpg.Config{} + _ = googleapi.Error{} +) + +func TestAccBiglakeIcebergIcebergCatalog_biglakeIcebergCatalog_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBiglakeIcebergIcebergCatalogDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBiglakeIcebergIcebergCatalog_biglakeIcebergCatalogExample(context), + }, + { + ResourceName: "google_biglake_iceberg_catalog.my_iceberg_catalog", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name"}, + }, + { + Config: testAccBiglakeIcebergIcebergCatalog_biglakeIcebergCatalog_update(context), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("google_biglake_iceberg_catalog.my_iceberg_catalog", plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: "google_biglake_iceberg_catalog.my_iceberg_catalog", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name"}, + }, + }, + }) +} + +func testAccBiglakeIcebergIcebergCatalog_biglakeIcebergCatalog_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket_for_my_iceberg_catalog" { + name = "tf_test_my_iceberg_catalog%{random_suffix}" + location = "us-central1" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_biglake_iceberg_catalog" "my_iceberg_catalog" { + name = "tf_test_my_iceberg_catalog%{random_suffix}" + catalog_type = "CATALOG_TYPE_GCS_BUCKET" + credential_mode = "CREDENTIAL_MODE_VENDED_CREDENTIALS" + depends_on = [ + google_storage_bucket.bucket_for_my_iceberg_catalog + ] +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go b/mmv1/third_party/terraform/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go index 8efa8520ea3d..b586865c1384 100644 --- a/mmv1/third_party/terraform/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go +++ b/mmv1/third_party/terraform/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go @@ -2,16 +2,15 @@ package bigquerydatatransfer_test import ( "fmt" - "strings" - "testing" - "time" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "strings" + "testing" + "time" ) func TestBigqueryDataTransferConfig_resourceBigqueryDTCParamsCustomDiffFuncForceNewWhenGoogleCloudStorage(t *testing.T) { @@ -641,6 +640,19 @@ resource "google_pubsub_topic" "my_topic" { name = "tf-test-my-topic-%s" } +resource "google_bigquery_table" "my_table" { + deletion_protection = false + + dataset_id = google_bigquery_dataset.my_dataset.dataset_id + table_id = "my_table" + schema = < 0 { + cfg.Cert = expandRegistryCertificateConfig(certRaw[0]) + } + } + if val, ok := ls["key"]; ok { + keyRaw := val.([]interface{}) + if len(keyRaw) > 0 { + cfg.Key = expandRegistryCertificateConfig(keyRaw[0]) + } + } + return cfg +} + func expandSoleTenantConfig(v interface{}) *container.SoleTenantConfig { if v == nil { return nil @@ -2746,9 +2969,89 @@ func flattenContainerdConfig(c *container.ContainerdConfig) []map[string]interfa if c.WritableCgroups != nil { r["writable_cgroups"] = flattenWritableCgroups(c.WritableCgroups) } + if c.RegistryHosts != nil { + r["registry_hosts"] = flattenRegistryHosts(c.RegistryHosts) + } return append(result, r) } +func flattenRegistryHosts(registryHosts []*container.RegistryHostConfig) []map[string]interface{} { + items := []map[string]interface{}{} + if len(registryHosts) == 0 { + return items + } + + for _, host := range registryHosts { + item := make(map[string]interface{}) + item["server"] = host.Server + item["hosts"] = flattenHostInRegistryHosts(host.Hosts) + items = append(items, item) + } + return items +} + +func flattenHostInRegistryHosts(hosts []*container.HostConfig) []map[string]interface{} { + items := make([]map[string]interface{}, 0, len(hosts)) + if len(hosts) == 0 { + return items + } + for _, h := range hosts { + item := make(map[string]interface{}) + item["host"] = h.Host + item["capabilities"] = h.Capabilities + item["override_path"] = h.OverridePath + item["dial_timeout"] = h.DialTimeout + + if h.Header != nil { + tmp := make([]interface{}, len(h.Header)) + for i, val := range h.Header { + tmp[i] = map[string]interface{}{ + "key": val.Key, + "value": val.Value, + } + } + item["header"] = tmp + } + + if h.Ca != nil { + tmp := make([]interface{}, len(h.Ca)) + for i, val := range h.Ca { + if val != nil && val.GcpSecretManagerSecretUri != "" { + tmp[i] = map[string]interface{}{ + "gcp_secret_manager_secret_uri": val.GcpSecretManagerSecretUri, + } + } + } + item["ca"] = tmp + } + + if h.Client != nil { + tmp := make([]interface{}, len(h.Client)) + for i, val := range h.Client { + currentClient := map[string]interface{}{} + if val != nil && val.Cert != nil && val.Cert.GcpSecretManagerSecretUri != "" { + currentClient["cert"] = []interface{}{ + map[string]interface{}{ + "gcp_secret_manager_secret_uri": val.Cert.GcpSecretManagerSecretUri, + }, + } + } + if val != nil && val.Key != nil && val.Key.GcpSecretManagerSecretUri != "" { + currentClient["key"] = []interface{}{ + map[string]interface{}{ + "gcp_secret_manager_secret_uri": val.Key.GcpSecretManagerSecretUri, + }, + } + } + tmp[i] = currentClient + } + item["client"] = tmp + } + items = append(items, item) + } + return items +} + func flattenPrivateRegistryAccessConfig(c *container.PrivateRegistryAccessConfig) []map[string]interface{} { result := []map[string]interface{}{} if c == nil { diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_meta.yaml.tmpl b/mmv1/third_party/terraform/services/container/resource_container_cluster_meta.yaml.tmpl index aa00c4abb071..7b909c8137f8 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_meta.yaml.tmpl @@ -193,13 +193,15 @@ fields: - field: 'maintenance_policy.daily_maintenance_window.start_time' api_field: 'maintenancePolicy.window.dailyMaintenanceWindow.startTime' - field: 'maintenance_policy.maintenance_exclusion.end_time' - api_field: 'maintenance_policy.window.maintenance_exclusion.end_time' + api_field: 'maintenancePolicy.window.maintenanceExclusions.value.endTime' - field: 'maintenance_policy.maintenance_exclusion.exclusion_name' - api_field: 'maintenance_policy.window.maintenance_exclusion.exclusion_name' + api_field: 'maintenancePolicy.window.maintenanceExclusions.key' + - field: 'maintenance_policy.maintenance_exclusion.exclusion_options.end_time_behavior' + api_field: 'maintenancePolicy.window.maintenanceExclusions.value.maintenanceExclusionOptions.endTimeBehavior' - field: 'maintenance_policy.maintenance_exclusion.exclusion_options.scope' - api_field: 'maintenance_policy.window.maintenance_exclusion.exclusion_options.scope' + api_field: 'maintenancePolicy.window.maintenanceExclusions.value.maintenanceExclusionOptions.scope' - field: 'maintenance_policy.maintenance_exclusion.start_time' - api_field: 'maintenance_policy.window.maintenance_exclusion.start_time' + api_field: 'maintenancePolicy.window.maintenanceExclusions.value.startTime' - field: 'maintenance_policy.recurring_window.end_time' api_field: 'maintenancePolicy.window.recurringWindow.window.endTime' - field: 'maintenance_policy.recurring_window.recurrence' @@ -250,6 +252,16 @@ fields: - api_field: 'nodeConfig.containerdConfig.privateRegistryAccessConfig.certificateAuthorityDomainConfig.gcpSecretManagerCertificateConfig.secretUri' - api_field: 'nodeConfig.containerdConfig.privateRegistryAccessConfig.enabled' - api_field: 'nodeConfig.containerdConfig.writableCgroups.enabled' + - api_field: 'nodeConfig.containerdConfig.registryHosts.server' + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.host' + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.capabilities + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.overridePath + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.header.key + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.header.value + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.ca.gcpSecretManagerSecretUri + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.client.cert.gcpSecretManagerSecretUri + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.client.key.gcpSecretManagerSecretUri + - api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.dialTimeout - api_field: 'nodeConfig.diskSizeGb' - api_field: 'nodeConfig.diskType' - field: 'node_config.effective_taints.effect' @@ -324,6 +336,7 @@ fields: api_field: 'nodeConfig.linuxNodeConfig.hugepages.hugepageSize1g' - field: 'node_config.linux_node_config.hugepages_config.hugepage_size_2m' api_field: 'nodeConfig.linuxNodeConfig.hugepages.hugepageSize2m' + - api_field: 'nodeConfig.linuxNodeConfig.nodeKernelModuleLoading.policy' - api_field: 'nodeConfig.linuxNodeConfig.sysctls' - api_field: 'nodeConfig.linuxNodeConfig.transparentHugepageDefrag' - api_field: 'nodeConfig.linuxNodeConfig.transparentHugepageEnabled' @@ -450,6 +463,8 @@ fields: api_field: 'nodePools.config.containerdConfig.privateRegistryAccessConfig.certificateAuthorityDomainConfig.gcpSecretManagerCertificateConfig.secretUri' - field: 'node_pool.node_config.containerd_config.private_registry_access_config.enabled' api_field: 'nodePools.config.containerdConfig.privateRegistryAccessConfig.enabled' + - field: 'node_pool.node_config.containerd_config.writable_cgroups.enabled' + api_field: 'nodePools.config.containerdConfig.writableCgroups.enabled' - field: 'node_pool.node_config.disk_size_gb' api_field: 'nodePools.config.diskSizeGb' - field: 'node_pool.node_config.disk_type' @@ -574,6 +589,8 @@ fields: api_field: 'nodePools.config.linuxNodeConfig.hugepages.hugepageSize1g' - field: 'node_pool.node_config.linux_node_config.hugepages_config.hugepage_size_2m' api_field: 'nodePools.config.linuxNodeConfig.hugepages.hugepageSize2m' + - field: 'node_pool.node_config.linux_node_config.node_kernel_module_loading.policy' + api_field: 'nodePools.config.linuxNodeConfig.nodeKernelModuleLoading.policy' - field: 'node_pool.node_config.linux_node_config.sysctls' api_field: 'nodePools.config.linuxNodeConfig.sysctls' - field: 'node_pool.node_config.linux_node_config.transparent_hugepage_defrag' @@ -662,6 +679,10 @@ fields: api_field: 'nodePools.placementPolicy.type' - field: 'node_pool.queued_provisioning.enabled' api_field: 'nodePools.queuedProvisioning.enabled' +{{- if ne $.TargetVersionName "ga" }} + - field: 'node_pool.upgrade_settings.blue_green_settings.autoscaled_rollout_policy.wait_for_drain_duration' + api_field: 'nodePools.upgradeSettings.blueGreenSettings.autoscaledRolloutPolicy.waitForDrainDuration' +{{- end }} - field: 'node_pool.upgrade_settings.blue_green_settings.node_pool_soak_duration' api_field: 'nodePools.upgradeSettings.blueGreenSettings.nodePoolSoakDuration' - field: 'node_pool.upgrade_settings.blue_green_settings.standard_rollout_policy.batch_node_count' @@ -679,6 +700,7 @@ fields: - field: 'node_pool.version' api_field: 'nodePools.version' - api_field: 'nodePoolAutoConfig.linuxNodeConfig.cgroupMode' + - api_field: 'nodePoolAutoConfig.linuxNodeConfig.nodeKernelModuleLoading.policy' - api_field: 'nodePoolAutoConfig.networkTags.tags' - api_field: 'nodePoolAutoConfig.nodeKubeletConfig.insecureKubeletReadonlyPortEnabled' - field: 'node_pool_auto_config.resource_manager_tags' @@ -687,6 +709,16 @@ fields: - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.privateRegistryAccessConfig.certificateAuthorityDomainConfig.gcpSecretManagerCertificateConfig.secretUri' - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.privateRegistryAccessConfig.enabled' - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.writableCgroups.enabled' + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.server' + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.host' + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.capabilities + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.overridePath + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.header.key + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.header.value + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.ca.gcpSecretManagerSecretUri + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.client.cert.gcpSecretManagerSecretUri + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.client.key.gcpSecretManagerSecretUri + - api_field: 'nodePoolDefaults.nodeConfigDefaults.containerdConfig.registryHosts.hosts.dialTimeout - api_field: 'nodePoolDefaults.nodeConfigDefaults.gcfsConfig.enabled' - field: 'node_pool_defaults.node_config_defaults.insecure_kubelet_readonly_port_enabled' api_field: 'nodePoolDefaults.nodeConfigDefaults.nodeKubeletConfig.insecureKubeletReadonlyPortEnabled' @@ -733,6 +765,11 @@ fields: - api_field: 'secretManagerConfig.enabled' - api_field: 'secretManagerConfig.rotationConfig.enabled' - api_field: 'secretManagerConfig.rotationConfig.rotationInterval' +{{- if ne $.TargetVersionName "ga" }} + - api_field: 'secretSyncConfig.enabled' + - api_field: 'secretSyncConfig.rotationConfig.enabled' + - api_field: 'secretSyncConfig.rotationConfig.rotationInterval' +{{- end }} - api_field: 'securityPostureConfig.mode' - api_field: 'securityPostureConfig.vulnerabilityMode' - api_field: 'selfLink' diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.tmpl index a84b374bdcc5..ba863a44712b 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.tmpl @@ -177,6 +177,9 @@ func TestAccContainerCluster_resourceManagerTags(t *testing.T) { func TestAccContainerCluster_networkingModeRoutes(t *testing.T) { t.Parallel() + // separate shared network name because test requests a specific CIDR + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster-moderoutes") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster-moderoutes", networkName) firstClusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) secondClusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ @@ -185,7 +188,7 @@ func TestAccContainerCluster_networkingModeRoutes(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerCluster_networkingModeRoutes(firstClusterName, secondClusterName), + Config: testAccContainerCluster_networkingModeRoutes(firstClusterName, secondClusterName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.primary", "networking_mode", "ROUTES"), resource.TestCheckResourceAttr("google_container_cluster.secondary", "networking_mode", "ROUTES"), ), @@ -774,6 +777,10 @@ func TestAccContainerCluster_inTransitEncryptionConfig(t *testing.T) { func TestAccContainerCluster_networkPerformanceConfig(t *testing.T) { t.Parallel() + // Skip in VCR until the test issue is resolved + // https://github.com/hashicorp/terraform-provider-google/issues/24850 + acctest.SkipIfVcr(t) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) @@ -1768,6 +1775,10 @@ func TestAccContainerCluster_regionalWithNodeLocations(t *testing.T) { func TestAccContainerCluster_withTpu(t *testing.T) { t.Parallel() + // Skip in VCR until the test issue is resolved + // https://github.com/hashicorp/terraform-provider-google/issues/21968 + acctest.SkipIfVcr(t) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) @@ -2139,25 +2150,10 @@ func TestAccContainerCluster_withNodeConfigLinuxNodeConfig(t *testing.T) { ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, // Lastly, update the setting in-place. V1 since UNSPECIFIED is default + // From version 1.35+, cgroup mode v1 will be blocked. { Config: testAccContainerCluster_withNodeConfigLinuxNodeConfig(clusterName, networkName, subnetworkName, "CGROUP_MODE_V1", false, ""), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "google_container_cluster.with_linux_node_config", - "node_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V1", - ), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - acctest.ExpectNoDelete(), - }, - }, - }, - { - ResourceName: "google_container_cluster.with_linux_node_config", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + ExpectError: regexp.MustCompile("Node pools with cgroupv1 is not supported"), }, // Update linux config transparent hugepage { @@ -6094,6 +6090,10 @@ func TestAccContainerCluster_withGatewayApiConfig(t *testing.T) { func TestAccContainerCluster_withTPUConfig(t *testing.T) { t.Parallel() + // Skip in VCR until the test issue is resolved + // https://github.com/hashicorp/terraform-provider-google/issues/21993 + acctest.SkipIfVcr(t) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ @@ -7442,13 +7442,15 @@ resource "google_container_cluster" "primary" { `, projectID, name, networkName, subnetworkName) } -func testAccContainerCluster_networkingModeRoutes(firstName, secondName string) string { +func testAccContainerCluster_networkingModeRoutes(firstName, secondName, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "primary" { name = "%s" location = "us-central1-a" initial_node_count = 1 networking_mode = "ROUTES" + network = "%s" + subnetwork = "%s" deletion_protection = false } @@ -7456,10 +7458,12 @@ resource "google_container_cluster" "secondary" { name = "%s" location = "us-central1-a" initial_node_count = 1 + network = "%s" + subnetwork = "%s" cluster_ipv4_cidr = "10.96.0.0/14" deletion_protection = false } -`, firstName, secondName) +`, firstName, networkName, subnetworkName, secondName, networkName, subnetworkName) } func testAccContainerCluster_misc(name, networkName, subnetworkName string) string { @@ -14349,6 +14353,328 @@ resource "google_container_cluster" "primary" { `, secretID, clusterName, customDomain, networkName, subnetworkName) } +func TestAccContainerCluster_registryHosts(t *testing.T) { + // This test also checks containerd_config and its updates + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + secretID := fmt.Sprintf("tf-test-secret-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_registryHosts(secretID, clusterName, networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.registry_hosts.0.server", + "custom.example.com", + ), + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.registry_hosts.0.hosts.0.host", + "custom.mirror.com", + ), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "min_master_version"}, + }, + // The above tests the default for _new_ node pools; this tests the configuration for default-pool if + // defined within the `container_cluster` resource + { + Config: testAccContainerCluster_withNodeConfigRegistryHosts(secretID, clusterName, networkName, subnetworkName, "foo.example.com", "foo.mirror.com"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_config.0.containerd_config.0.registry_hosts.0.server", + "foo.example.com", + ), + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_config.0.containerd_config.0.registry_hosts.0.hosts.0.host", + "foo.mirror.com", + ), + ), + }, + // We're already testing going from no `node_config` to having one in the previous step, but test updating + // anyway. + { + Config: testAccContainerCluster_withNodeConfigRegistryHosts(secretID, clusterName, networkName, subnetworkName, "bar.example.org", "bar.mirror.org"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_config.0.containerd_config.0.registry_hosts.0.server", + "bar.example.org", + ), + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_config.0.containerd_config.0.registry_hosts.0.hosts.0.host", + "bar.mirror.org", + ), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "min_master_version"}, + }, + // This last test *will* force recreation, and tests a (named) node pool defined in + // `google_container_cluster.node_pool`. Deletions are expected here too. + { + Config: testAccContainerCluster_withNodePoolRegistryHosts(secretID, clusterName, nodePoolName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "min_master_version"}, + }, + }, + }) +} + +func testAccContainerCluster_registryHosts(secretID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "test_project" {} + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_secret_manager_secret" "secret_basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret_version_basic" { + secret = google_secret_manager_secret.secret_basic.id + secret_data = "dummypassword" +} + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret_basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret_version_basic] +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + node_pool_defaults { + node_config_defaults { + containerd_config { + registry_hosts { + server = "custom.example.com" + hosts { + host = "custom.mirror.com" + capabilities = ["HOST_CAPABILITY_PULL","HOST_CAPABILITY_RESOLVE"] + override_path = false + dial_timeout = "30s" + header { + key = "header_key" + value = ["header_value_1","header_value_2"] + } + ca { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + } + } + } + } + } +} +`, secretID, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolRegistryHosts(secretID, clusterName, nodePoolName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "test_project" {} + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_secret_manager_secret" "secret_basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} +resource "google_secret_manager_secret_version" "secret_version_basic" { + secret = google_secret_manager_secret.secret_basic.id + secret_data = "dummypassword" +} + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret_basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret_version_basic] +} +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + containerd_config { + registry_hosts { + server = "custom.example.com" + hosts { + host = "custom.mirror.com" + capabilities = ["HOST_CAPABILITY_PULL","HOST_CAPABILITY_PUSH"] + override_path = true + dial_timeout = "30s" + header { + key = "header_key" + value = ["header_value_1","header_value_2"] + } + ca { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + client { + cert { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + key { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + } + } + } + } + } + } + network = "%s" + subnetwork = "%s" + + deletion_protection = false +} +`, secretID, clusterName, nodePoolName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfigRegistryHosts(secretID, clusterName, networkName, subnetworkName, customServer, customMirror string) string { + return fmt.Sprintf(` +data "google_project" "test_project" {} + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_secret_manager_secret" "secret_basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} +resource "google_secret_manager_secret_version" "secret_version_basic" { + secret = google_secret_manager_secret.secret_basic.id + secret_data = "dummypassword" +} + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret_basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret_version_basic] +} +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + containerd_config { + registry_hosts { + server = "%s" + hosts { + host = "%s" + capabilities = ["HOST_CAPABILITY_PULL","HOST_CAPABILITY_PUSH"] + override_path = true + dial_timeout = "30s" + header { + key = "header_key" + value = ["header_value_1","header_value_2"] + } + ca { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + client { + cert { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + key { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret_version_basic.name + } + } + } + } + } + } + network = "%s" + subnetwork = "%s" + + deletion_protection = false +} +`, secretID, clusterName, customServer, customMirror, networkName, subnetworkName) +} + func TestAccContainerCluster_writableCgroups(t *testing.T) { t.Parallel() diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_meta.yaml.tmpl b/mmv1/third_party/terraform/services/container/resource_container_node_pool_meta.yaml.tmpl index 63b5a134f126..d040d7dc9ffe 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_meta.yaml.tmpl @@ -66,6 +66,28 @@ fields: api_field: 'config.containerdConfig.privateRegistryAccessConfig.certificateAuthorityDomainConfig.gcpSecretManagerCertificateConfig.secretUri' - field: 'node_config.containerd_config.private_registry_access_config.enabled' api_field: 'config.containerdConfig.privateRegistryAccessConfig.enabled' + - field: 'node_config.containerd_config.writable_cgroups.enabled' + api_field: 'config.containerdConfig.writableCgroups.enabled' + - field: 'node_config.containerd_config.registry_hosts.server' + api_field: 'nodeConfig.containerdConfig.registryHosts.server' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.host' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.host' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.capabilities' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.capabilities' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.override_path' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.overridePath' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.header.key' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.header.key' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.header.value' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.header.value' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.ca.gcp_secret_manager_secret_uri' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.ca.gcpSecretManagerSecretUri' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.client.cert.gcp_secret_manager_secret_uri' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.client.cert.gcpSecretManagerSecretUri' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts.client.key.gcp_secret_manager_secret_uri' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.client.key.gcpSecretManagerSecretUri' + - field: 'nodeConfig.containerd_config.registry_hosts.hosts..dial_timeout' + api_field: 'nodeConfig.containerdConfig.registryHosts.hosts.dialTimeout' - field: 'node_config.disk_size_gb' api_field: 'config.diskSizeGb' - field: 'node_config.disk_type' @@ -190,6 +212,8 @@ fields: api_field: 'config.linuxNodeConfig.hugepages.hugepageSize1g' - field: 'node_config.linux_node_config.hugepages_config.hugepage_size_2m' api_field: 'config.linuxNodeConfig.hugepages.hugepageSize2m' + - field: 'node_config.linux_node_config.node_kernel_module_loading.policy' + api_field: 'config.linuxNodeConfig.nodeKernelModuleLoading.policy' - field: 'node_config.linux_node_config.sysctls' api_field: 'config.linuxNodeConfig.sysctls' - field: 'node_config.linux_node_config.transparent_hugepage_defrag' @@ -276,6 +300,9 @@ fields: - api_field: 'placementPolicy.type' - field: 'project' - api_field: 'queuedProvisioning.enabled' +{{- if ne $.TargetVersionName "ga" }} + - api_field: 'upgradeSettings.blueGreenSettings.autoscaledRolloutPolicy.waitForDrainDuration' +{{- end }} - api_field: 'upgradeSettings.blueGreenSettings.nodePoolSoakDuration' - api_field: 'upgradeSettings.blueGreenSettings.standardRolloutPolicy.batchNodeCount' - api_field: 'upgradeSettings.blueGreenSettings.standardRolloutPolicy.batchPercentage' diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.tmpl index 92f649eb3f72..2ce6469932b0 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.tmpl @@ -2344,6 +2344,10 @@ resource "google_container_node_pool" "np" { func TestAccContainerNodePool_secondaryBootDisks(t *testing.T) { t.Parallel() + // Skip in VCR until the test issue is resolved + // https://github.com/hashicorp/terraform-provider-google/issues/23855 + acctest.SkipIfVcr(t) + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") @@ -6100,6 +6104,143 @@ resource "google_container_node_pool" "np" { `, cluster, network, subnetwork, nodepool) } +func TestAccContainerNodePool_registryHosts(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodepool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + secretID := fmt.Sprintf("tf-test-secret-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_registryHosts(secretID, cluster, nodepool, networkName, subnetworkName, "custom.example.com", "custom.mirror.com"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.registry_hosts.0.server", + "custom.example.com", + ), + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.registry_hosts.0.hosts.0.host", + "custom.mirror.com", + ), + ), + }, + { + // Make sure in-place updates work + Config: testAccContainerNodePool_registryHosts(secretID, cluster, nodepool, networkName, subnetworkName, "foo.example.org", "foo.mirror.org"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.registry_hosts.0.server", + "foo.example.org", + ), + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.registry_hosts.0.hosts.0.host", + "foo.mirror.org", + ), + ), + }, + }, + }) +} + +func testAccContainerNodePool_registryHosts(secretID, cluster, nodepool, network, subnetwork, customServer, customMirror string) string { + return fmt.Sprintf(` +data "google_project" "test_project" {} + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" +} + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + containerd_config { + registry_hosts { + server = "%s" + hosts { + host = "%s" + capabilities = ["HOST_CAPABILITY_PULL","HOST_CAPABILITY_RESOLVE"] + override_path = false + dial_timeout = "30s" + header { + key = "header_key" + value = ["header_value_1","header_value_2"] + } + ca { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + client { + cert { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + key { + gcp_secret_manager_secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + } + } + } + } +} +`, secretID, cluster, network, subnetwork, nodepool, customServer, customMirror) +} + func TestAccContainerNodePool_defaultDriverInstallation(t *testing.T) { t.Parallel() diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_datascan_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_datascan_test.go index e707502d4757..236e2e65b64d 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_datascan_test.go +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_datascan_test.go @@ -183,9 +183,7 @@ resource "google_dataplex_datascan" "full_quality" { execution_spec { trigger { - schedule { - cron = "TZ=America/New_York 1 1 * * *" - } + on_demand {} } } diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_test.go index c754cae98154..2eec16e29365 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_test.go +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_entry_test.go @@ -836,3 +836,313 @@ resource "google_dataplex_entry" "test_entry_full" { } `, context) } + +func TestAccDataplexEntry_dataplexEntryUpdateBigQuery(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_number": envvar.GetTestProjectNumberFromEnv(), + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexEntryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexEntry_dataplexEntryFullUpdateBigQueryPrepare(context), + }, + { + ResourceName: "google_dataplex_entry.test_entry_bigquery_table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"aspects", "dataset_id", "table_id", "entry_id", "location"}, + }, + { + Config: testAccDataplexEntry_dataplexEntryBigQueryUpdate(context), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("google_dataplex_entry.test_entry_bigquery_table", plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: "google_dataplex_entry.test_entry_bigquery_table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"aspects", "dataset_id", "table_id", "entry_id", "location"}, + }, + }, + }) +} + +func testAccDataplexEntry_dataplexEntryFullUpdateBigQueryPrepare(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_aspect_type" "aspect-type-full-one" { + aspect_type_id = "tf-test-aspect-type-full%{random_suffix}-one" + location = "us-central1" + project = "%{project_number}" + + metadata_template = < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "tf-test-l7-ilb-hc%{random_suffix}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "tf-test-l7-ilb-mig1%{random_suffix}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "tf-test-l7-ilb-fw-allow-iap-hc%{random_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "tf-test-l7-ilb-fw-allow-ilb-to-backends%{random_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} + +resource "google_network_services_lb_route_extension" "default" { + name = "tf-test-l7-ilb-route-ext%{random_suffix}" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + forward_headers = ["custom-header"] + supported_events = ["EVENT_TYPE_UNSPECIFIED"] + } + } + + labels = { + foo = "bar" + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "tf-test-l7-ilb-callouts-ins%{random_suffix}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "tf-test-l7-ilb-callouts-ins-group%{random_suffix}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "tf-test-l7-ilb-callouts-hc%{random_suffix}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "tf-test-l7-ilb-callouts-backend%{random_suffix}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_consumer_association_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_consumer_association_test.go index 394ef2e3e081..1adcff911be3 100644 --- a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_consumer_association_test.go +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_consumer_association_test.go @@ -1,20 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package networkservices_test import ( diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_group_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_group_test.go index b56d211c6b58..6dcb9d0c822d 100644 --- a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_group_test.go +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_group_test.go @@ -1,20 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package networkservices_test import ( diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_test.go new file mode 100644 index 000000000000..95b9611dce00 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_domain_test.go @@ -0,0 +1,87 @@ +package networkservices_test + +import ( + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" +) + +func TestAccNetworkServicesMulticastDomain_networkServicesMulticastDomainUpdateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesMulticastDomainDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesMulticastDomain_networkServicesMulticastDomainUpdateExample_full(context), + }, + { + ResourceName: "google_network_services_multicast_domain.md_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "multicast_domain_id", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesMulticastDomain_networkServicesMulticastDomainUpdateExample_diff(context), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("google_network_services_multicast_domain.md_test", plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: "google_network_services_multicast_domain.md_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "multicast_domain_id", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesMulticastDomain_networkServicesMulticastDomainUpdateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-test-md-network%{random_suffix}" + auto_create_subnetworks = false +} +resource "google_network_services_multicast_domain" md_test { + multicast_domain_id = "tf-test-test-md-domain%{random_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { + connection_type="SAME_VPC" + } + depends_on = [google_compute_network.network] +} +`, context) +} + +func testAccNetworkServicesMulticastDomain_networkServicesMulticastDomainUpdateExample_diff(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-test-md-network%{random_suffix}" + auto_create_subnetworks = false +} +resource "google_network_services_multicast_domain" md_test { + multicast_domain_id = "tf-test-test-md-domain%{random_suffix}" + location = "global" + description = "A sample domain" + labels = { + label-one = "value-one" + } + admin_network = google_compute_network.network.id + connection_config { + connection_type="SAME_VPC" + } + depends_on = [google_compute_network.network] +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_consumer_activation_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_consumer_activation_test.go new file mode 100644 index 000000000000..9efefa80bad5 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_consumer_activation_test.go @@ -0,0 +1,176 @@ +package networkservices_test + +import ( + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" +) + +func TestAccNetworkServicesMulticastGroupConsumerActivation_networkServicesMulticastGroupConsumerActivationUpdateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesMulticastGroupConsumerActivationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesMulticastGroupConsumerActivation_networkServicesMulticastGroupConsumerActivationUpdateExample_full(context), + }, + { + ResourceName: "google_network_services_multicast_group_consumer_activation.mgca_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "multicast_group_consumer_activation_id", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesMulticastGroupConsumerActivation_networkServicesMulticastGroupConsumerActivationUpdateExample_diff(context), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("google_network_services_multicast_group_consumer_activation.mgca_test", plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: "google_network_services_multicast_group_consumer_activation.mgca_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "multicast_group_consumer_activation_id", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesMulticastGroupConsumerActivation_networkServicesMulticastGroupConsumerActivationUpdateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-test-network-mgca%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "tf-test-test-domain-mgca%{random_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "tf-test-test-domain-activation-mgca%{random_suffix}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_consumer_association" "consumer_association" { + multicast_consumer_association_id = "tf-test-test-consumer-association-mgca%{random_suffix}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "tf-test-test-internal-range-mgca%{random_suffix}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "tf-test-test-group-range-mgca%{random_suffix}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "tf-test-test-mgra-mgca%{random_suffix}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_consumer_activation" mgca_test { + multicast_group_consumer_activation_id = "tf-test-test-mgca-mgca%{random_suffix}" + location = "us-central1-b" + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_consumer_association = google_network_services_multicast_consumer_association.consumer_association.id + log_config { + enabled = true + } +} +`, context) +} + +func testAccNetworkServicesMulticastGroupConsumerActivation_networkServicesMulticastGroupConsumerActivationUpdateExample_diff(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-test-network-mgca%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "tf-test-test-domain-mgca%{random_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "tf-test-test-domain-activation-mgca%{random_suffix}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_consumer_association" "consumer_association" { + multicast_consumer_association_id = "tf-test-test-consumer-association-mgca%{random_suffix}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "tf-test-test-internal-range-mgca%{random_suffix}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "tf-test-test-group-range-mgca%{random_suffix}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "tf-test-test-mgra-mgca%{random_suffix}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_consumer_activation" mgca_test { + multicast_group_consumer_activation_id = "tf-test-test-mgca-mgca%{random_suffix}" + location = "us-central1-b" + description = "my description" + labels = { + "test-label" = "test-value" + } + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_consumer_association = google_network_services_multicast_consumer_association.consumer_association.id +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_producer_activation_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_producer_activation_test.go new file mode 100644 index 000000000000..3bca1eb4c111 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_producer_activation_test.go @@ -0,0 +1,173 @@ +package networkservices_test + +import ( + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" +) + +func TestAccNetworkServicesMulticastGroupProducerActivation_networkServicesMulticastGroupProducerActivationUpdateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesMulticastGroupProducerActivationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesMulticastGroupProducerActivation_networkServicesMulticastGroupProducerActivationUpdateExample_full(context), + }, + { + ResourceName: "google_network_services_multicast_group_producer_activation.mgpa_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "multicast_group_producer_activation_id", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesMulticastGroupProducerActivation_networkServicesMulticastGroupProducerActivationUpdateExample_diff(context), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("google_network_services_multicast_group_producer_activation.mgpa_test", plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: "google_network_services_multicast_group_producer_activation.mgpa_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "multicast_group_producer_activation_id", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesMulticastGroupProducerActivation_networkServicesMulticastGroupProducerActivationUpdateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-test-network-mgpa%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "tf-test-test-domain-mgpa%{random_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "tf-test-test-domain-activation-mgpa%{random_suffix}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_producer_association" "producer_association" { + multicast_producer_association_id = "tf-test-test-producer-association-mgpa%{random_suffix}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "tf-test-test-internal-range-mgpa%{random_suffix}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "tf-test-test-group-range-mgpa%{random_suffix}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "tf-test-test-mgra-mgpa%{random_suffix}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_producer_activation" mgpa_test { + multicast_group_producer_activation_id = "tf-test-test-mgpa-mgpa%{random_suffix}" + location = "us-central1-b" + description = "my description" + labels = { + "test-label" = "test-value" + } + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_producer_association = google_network_services_multicast_producer_association.producer_association.id +} +`, context) +} + +func testAccNetworkServicesMulticastGroupProducerActivation_networkServicesMulticastGroupProducerActivationUpdateExample_diff(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-test-network-mgpa%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "tf-test-test-domain-mgpa%{random_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "tf-test-test-domain-activation-mgpa%{random_suffix}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_producer_association" "producer_association" { + multicast_producer_association_id = "tf-test-test-producer-association-mgpa%{random_suffix}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "tf-test-test-internal-range-mgpa%{random_suffix}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "tf-test-test-group-range-mgpa%{random_suffix}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "tf-test-test-mgra-mgpa%{random_suffix}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_producer_activation" mgpa_test { + multicast_group_producer_activation_id = "tf-test-test-mgpa-mgpa%{random_suffix}" + location = "us-central1-b" + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_producer_association = google_network_services_multicast_producer_association.producer_association.id +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_range_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_range_test.go index f02b573203b7..31a73834c055 100644 --- a/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_range_test.go +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_multicast_group_range_test.go @@ -1,20 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package networkservices_test import ( diff --git a/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_container_test.go.tmpl b/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_container_test.go.tmpl index 5c97e06403df..d27a91cfaf57 100644 --- a/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_container_test.go.tmpl +++ b/mmv1/third_party/terraform/services/notebooks/resource_notebooks_instance_container_test.go.tmpl @@ -26,7 +26,7 @@ func TestAccNotebooksInstance_create_container(t *testing.T) { ImportState: true, ImportStateVerify: true, ExpectNonEmptyPlan: true, - ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image"}, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image", "update_time"}, }, }, }) diff --git a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go index cf2397abd68d..37aa04ce414e 100644 --- a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go +++ b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go @@ -240,6 +240,8 @@ func TestAccPubsubSubscriptionBigQuery_serviceAccount(t *testing.T) { table := fmt.Sprintf("tf-test-table-%s", acctest.RandString(t, 10)) topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) subscriptionShort := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + serviceAccount := fmt.Sprintf("bq-test-sa-%s", acctest.RandString(t, 10)) + serviceAccount2 := fmt.Sprintf("bq-test-sa2-%s", acctest.RandString(t, 10)) acctest.BootstrapIamMembers(t, []acctest.IamMember{ { @@ -261,7 +263,7 @@ func TestAccPubsubSubscriptionBigQuery_serviceAccount(t *testing.T) { }, Steps: []resource.TestStep{ { - Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, false, "bq-test-sa"), + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, false, serviceAccount), }, { ResourceName: "google_pubsub_subscription.foo", @@ -279,7 +281,7 @@ func TestAccPubsubSubscriptionBigQuery_serviceAccount(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, true, "bq-test-sa2"), + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, true, serviceAccount2), }, { ResourceName: "google_pubsub_subscription.foo", @@ -390,6 +392,8 @@ func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { bucket := fmt.Sprintf("tf-test-bucket-%s", acctest.RandString(t, 10)) topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) subscriptionShort := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + serviceAccount := fmt.Sprintf("gcs-test-sa-%s", acctest.RandString(t, 10)) + serviceAccount2 := fmt.Sprintf("gcs-test-sa2-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -397,7 +401,7 @@ func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, "gcs-test-sa", "text"), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, serviceAccount, "text"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -415,7 +419,7 @@ func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, "gcs-test-sa2", "avro"), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, serviceAccount2, "avro"), }, { ResourceName: "google_pubsub_subscription.foo", diff --git a/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go b/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go index c71855805984..8b1ba71a9a02 100644 --- a/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go +++ b/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go @@ -1418,3 +1418,104 @@ resource "google_redis_cluster" "cluster-ms" { } `, context) } + +func TestAccRedisCluster_redisClusterHaWithLabelsUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "deletion_protection_enabled": false, + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRedisCluster_redisClusterHaWithLabelsExample(context), + }, + { + ResourceName: "google_redis_cluster.cluster-ha-with-labels", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"gcs_source", "labels", "managed_backup_source", "name", "psc_configs", "region", "terraform_labels"}, + }, + { + Config: testAccRedisCluster_redisClusterHaWithLabelsUpdate(context), + }, + { + ResourceName: "google_redis_cluster.cluster-ha-with-labels", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"gcs_source", "labels", "managed_backup_source", "name", "psc_configs", "region", "terraform_labels"}, + }, + }, + }) +} + +func testAccRedisCluster_redisClusterHaWithLabelsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_redis_cluster" "cluster-ha-with-labels" { + name = "tf-test-ha-cluster%{random_suffix}" + shard_count = 3 + labels = { + my_key = "my_val" + other_key = "other_val" + } + psc_configs { + network = google_compute_network.consumer_net.id + } + region = "us-central1" + replica_count = 1 + node_type = "REDIS_SHARED_CORE_NANO" + transit_encryption_mode = "TRANSIT_ENCRYPTION_MODE_DISABLED" + authorization_mode = "AUTH_MODE_DISABLED" + redis_configs = { + maxmemory-policy = "volatile-ttl" + } + deletion_protection_enabled = false + + zone_distribution_config { + mode = "MULTI_ZONE" + } + maintenance_policy { + weekly_maintenance_window { + day = "MONDAY" + start_time { + hours = 1 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] +} + +resource "google_network_connectivity_service_connection_policy" "default" { + name = "tf-test-my-policy%{random_suffix}" + location = "us-central1" + service_class = "gcp-memorystore-redis" + description = "my basic service connection policy" + network = google_compute_network.consumer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.consumer_subnet.id] + } +} + +resource "google_compute_subnetwork" "consumer_subnet" { + name = "tf-test-my-subnet%{random_suffix}" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.consumer_net.id +} + +resource "google_compute_network" "consumer_net" { + name = "tf-test-my-network%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go index 0d9891f3edc3..32c379f1bc57 100644 --- a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go @@ -1,3 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +// Modified 2025 by Deutsche Telekom AG + package securitycenterv2_test import ( @@ -66,7 +70,7 @@ resource "google_scc_v2_project_notification_config" "custom_notification_config project = "%{project}" description = "My custom Cloud Security Command Center Finding Notification Configuration" pubsub_topic = google_pubsub_topic.scc_v2_project_notification.id - location = "global" + location = "%{location}" streaming_config { filter = "category = \"OPEN_FIREWALL\"" @@ -86,7 +90,7 @@ resource "google_scc_v2_project_notification_config" "custom_notification_config project = "%{project}" description = "My custom Cloud Security Command Center Finding Notification Configuration" pubsub_topic = google_pubsub_topic.scc_v2_project_notification.id - location = "global" + location = "%{location}" streaming_config { filter = "" @@ -94,3 +98,113 @@ resource "google_scc_v2_project_notification_config" "custom_notification_config } `, context) } + +func testAccSecurityCenterV2ProjectNotificationConfig_withLocation(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_v2_project_notification" { + name = "tf-test-my-topic%{random_suffix}" +} + +resource "google_scc_v2_project_notification_config" "custom_notification_config" { + config_id = "tf-test-my-config%{random_suffix}" + project = "%{project}" + location = "%{location}" + description = "My custom Cloud Security Command Center Finding Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_v2_project_notification.id + + streaming_config { + filter = "category = \"OPEN_FIREWALL\"" + } +} +`, context) +} + +func TestAccSecurityCenterV2ProjectNotificationConfig_locationEu(t *testing.T) { + t.Skip("Skipping: CI project does not support data residency for other locations. This has to be setup during SCC Initalization") + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": "eu", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_withLocation(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_updateStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_emptyStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, + }, + }) +} + +func TestAccSecurityCenterV2ProjectNotificationConfig_locationUs(t *testing.T) { + t.Skip("Skipping: CI project does not support data residency for other locations. This has to be setup during SCC Initalization") + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": "us", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_withLocation(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_updateStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_emptyStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, + }, + }) +} diff --git a/mmv1/third_party/terraform/services/servicedirectory/resource_service_directory_endpoint_test.go.tmpl b/mmv1/third_party/terraform/services/servicedirectory/resource_service_directory_endpoint_test.go.tmpl index a8ae37bee1f6..4a572cb03b6b 100644 --- a/mmv1/third_party/terraform/services/servicedirectory/resource_service_directory_endpoint_test.go.tmpl +++ b/mmv1/third_party/terraform/services/servicedirectory/resource_service_directory_endpoint_test.go.tmpl @@ -91,11 +91,6 @@ resource "google_service_directory_endpoint" "example" { endpoint_id = "%s" service = google_service_directory_service.example.id - metadata = { - stage = "prod" - region = "us-central1" - } - address = "1.2.3.4" port = 5353 } diff --git a/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go index 76bb1073187a..3dfa8eabfe35 100644 --- a/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go +++ b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go @@ -543,6 +543,45 @@ func TestAccSpannerInstance_freeInstanceBasicUpdate(t *testing.T) { }) } +func TestAccSpannerInstance_autoscalingWithTotalCPUUtilizationPercent(t *testing.T) { + t.Parallel() + + displayName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerInstance_autoscalingWithTotalCPUUtilizationPercent(displayName, 2000, 4000, 65, 85, 95), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_instance.test", "state"), + resource.TestCheckResourceAttr("google_spanner_instance.test", "autoscaling_config.0.autoscaling_targets.0.total_cpu_utilization_percent", "85"), + ), + }, + { + ResourceName: "google_spanner_instance.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccSpannerInstance_autoscalingWithTotalCPUUtilizationPercent(displayName, 3000, 5000, 75, 90, 95), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_instance.test", "state"), + resource.TestCheckResourceAttr("google_spanner_instance.test", "autoscaling_config.0.autoscaling_targets.0.total_cpu_utilization_percent", "90"), + ), + }, + { + ResourceName: "google_spanner_instance.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + func testAccSpannerInstance_basic(name string) string { return fmt.Sprintf(` resource "google_spanner_instance" "basic" { @@ -701,6 +740,7 @@ resource "google_spanner_instance" "basic" { } autoscaling_targets { high_priority_cpu_utilization_percent = %v + total_cpu_utilization_percent = 85 storage_utilization_percent = %v } } @@ -905,3 +945,25 @@ resource "google_spanner_instance" "main" { } `, context) } + +func testAccSpannerInstance_autoscalingWithTotalCPUUtilizationPercent(name string, minProcessingUnits, maxProcessingUnits, highPriorityCPU, totalCPU, storageUtilization int) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "test" { + name = "%s" + config = "regional-us-central1" + display_name = "%s" + autoscaling_config { + autoscaling_limits { + max_processing_units = %d + min_processing_units = %d + } + autoscaling_targets { + high_priority_cpu_utilization_percent = %d + total_cpu_utilization_percent = %d + storage_utilization_percent = %d + } + } + edition = "ENTERPRISE" +} +`, name, name, maxProcessingUnits, minProcessingUnits, highPriorityCPU, totalCPU, storageUtilization) +} diff --git a/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go b/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go index 55b03d879137..37cb88401e32 100644 --- a/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go +++ b/mmv1/third_party/terraform/services/sql/data_source_sql_database_instances.go @@ -179,6 +179,7 @@ func flattenDatasourceGoogleDatabaseInstancesList(fetchedInstances []*sqladmin.D instance["master_instance_name"] = strings.TrimPrefix(rawInstance.MasterInstanceName, project+":") instance["project"] = project instance["self_link"] = rawInstance.SelfLink + instance["psc_service_attachment_link"] = rawInstance.PscServiceAttachmentLink instances = append(instances, instance) } diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl index 59d3874f0774..669b7679f688 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.tmpl @@ -70,6 +70,7 @@ var ( "settings.0.backup_configuration.0.point_in_time_recovery_enabled", "settings.0.backup_configuration.0.backup_retention_settings", "settings.0.backup_configuration.0.transaction_log_retention_days", + "settings.0.backup_configuration.0.backup_tier", } connectionPoolConfigKeys = []string{ @@ -163,7 +164,7 @@ func nodeCountCustomDiff(ctx context.Context, d *schema.ResourceDiff, meta inter // Otherwise when autoscaling is enabled, ignore the node count in config. return d.Clear("node_count") } - + func diskSizeCutomizeDiff(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { key := "settings.0.disk_size" @@ -415,12 +416,14 @@ API (for read pools, effective_availability_type may differ from availability_ty Type: schema.TypeBool, Optional: true, AtLeastOneOf: backupConfigurationKeys, + DiffSuppressFunc: EnhancedBackupManagerDiffSuppressFunc, Description: `True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Can only be used with MySQL.`, }, "enabled": { Type: schema.TypeBool, Optional: true, AtLeastOneOf: backupConfigurationKeys, + DiffSuppressFunc: EnhancedBackupManagerDiffSuppressFunc, Description: `True if backup configuration is enabled.`, }, "start_time": { @@ -429,6 +432,7 @@ API (for read pools, effective_availability_type may differ from availability_ty // start_time is randomly assigned if not set Computed: true, AtLeastOneOf: backupConfigurationKeys, + DiffSuppressFunc: EnhancedBackupManagerDiffSuppressFunc, Description: `HH:MM format time indicating when backup configuration starts.`, }, "location": { @@ -441,6 +445,7 @@ API (for read pools, effective_availability_type may differ from availability_ty Type: schema.TypeBool, Optional: true, AtLeastOneOf: backupConfigurationKeys, + DiffSuppressFunc: EnhancedBackupManagerDiffSuppressFunc, Description: `True if Point-in-time recovery is enabled.`, }, "transaction_log_retention_days": { @@ -448,6 +453,7 @@ API (for read pools, effective_availability_type may differ from availability_ty Computed: true, Optional: true, AtLeastOneOf: backupConfigurationKeys, + DiffSuppressFunc: EnhancedBackupManagerDiffSuppressFunc, Description: `The number of days of transaction logs we retain for point in time restore, from 1-7. (For PostgreSQL Enterprise Plus instances, from 1 to 35.)`, }, "backup_retention_settings": { @@ -455,6 +461,7 @@ API (for read pools, effective_availability_type may differ from availability_ty Optional: true, AtLeastOneOf: backupConfigurationKeys, Computed: true, + DiffSuppressFunc: EnhancedBackupManagerDiffSuppressFunc, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -472,6 +479,11 @@ API (for read pools, effective_availability_type may differ from availability_ty }, }, }, + "backup_tier": { + Type: schema.TypeString, + Computed: true, + Description: `Backup tier that manages the backups for the instance.`, + }, }, }, }, @@ -1212,6 +1224,7 @@ API (for read pools, effective_availability_type may differ from availability_ty "psa_write_endpoint": { Type: schema.TypeString, Optional: true, + Computed: true, Description: fmt.Sprintf(`If set, this field indicates this instance has a private service access (PSA) DNS endpoint that is pointing to the primary instance of the cluster. If this instance is the primary, then the DNS endpoint points to this instance. After a switchover or replica failover operation, this DNS endpoint points to the promoted instance. This is a read-only field, returned to the user as information. This field can exist even if a standalone instance doesn't have a DR replica yet or the DR replica is deleted.`), }, "failover_dr_replica_name": { @@ -1227,7 +1240,7 @@ API (for read pools, effective_availability_type may differ from availability_ty }, }, }, - Description: "A primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set only after both the primary and replica are created.", + Description: "A primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set if the primary has psa_write_endpoint set or both the primary and replica are created.", }, "server_ca_cert": { Type: schema.TypeList, @@ -1530,7 +1543,6 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) if _, ok := d.GetOk("node_count"); ok { instance.NodeCount = int64(d.Get("node_count").(int)) } - if _, ok := d.GetOk("root_password_wo_version"); ok { instance.RootPassword = tpgresource.GetRawConfigAttributeAsString(d, "root_password_wo") } else if _, ok := d.GetOk("root_password"); ok { @@ -1983,6 +1995,7 @@ func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfigu Location: _backupConfiguration["location"].(string), TransactionLogRetentionDays: int64(_backupConfiguration["transaction_log_retention_days"].(int)), PointInTimeRecoveryEnabled: _backupConfiguration["point_in_time_recovery_enabled"].(bool), + BackupTier: _backupConfiguration["backup_tier"].(string), ForceSendFields: []string{"BinaryLogEnabled", "Enabled", "PointInTimeRecoveryEnabled"}, } } @@ -2184,6 +2197,9 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("maintenance_version", instance.MaintenanceVersion); err != nil { return fmt.Errorf("Error setting maintenance_version: %s", err) } + if err := d.Set("psc_service_attachment_link", instance.PscServiceAttachmentLink); err != nil { + return fmt.Errorf("Error setting psc_service_attachment_link: %s", err) + } if err := d.Set("available_maintenance_versions", instance.AvailableMaintenanceVersions); err != nil { return fmt.Errorf("Error setting available_maintenance_version: %s", err) } @@ -2571,15 +2587,31 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) instance.NodeCount = int64(d.Get("node_count").(int)) } + if _, ok := d.GetOk("psc_service_attachment_link"); ok { + instance.PscServiceAttachmentLink = d.Get("psc_service_attachment_link").(string) + } + // Database Version is required for all calls with Google ML integration enabled or it will be rejected by the API. if d.Get("settings.0.enable_google_ml_integration").(bool) || len(_settings["connection_pool_config"].(*schema.Set).List()) > 0 { instance.DatabaseVersion = databaseVersion } failoverDrReplicaName := d.Get("replication_cluster.0.failover_dr_replica_name").(string) - if failoverDrReplicaName != "" { - instance.ReplicationCluster = &sqladmin.ReplicationCluster{ - FailoverDrReplicaName: failoverDrReplicaName, + psaWriteEndpoint := d.Get("replication_cluster.0.psa_write_endpoint").(string) + if failoverDrReplicaName != "" || psaWriteEndpoint != "" { + if failoverDrReplicaName != "" && psaWriteEndpoint != "" { + instance.ReplicationCluster = &sqladmin.ReplicationCluster{ + FailoverDrReplicaName: failoverDrReplicaName, + PsaWriteEndpoint: psaWriteEndpoint, + } + } else if failoverDrReplicaName != "" { + instance.ReplicationCluster = &sqladmin.ReplicationCluster{ + FailoverDrReplicaName: failoverDrReplicaName, + } + } else { + instance.ReplicationCluster = &sqladmin.ReplicationCluster{ + PsaWriteEndpoint: psaWriteEndpoint, + } } } @@ -2655,6 +2687,25 @@ func maintenanceVersionDiffSuppress(_, old, new string, _ *schema.ResourceData) } } +// enhancedBackupManagerDiffSuppressFunc suppresses diff changes to settings.backup_configuration +// when the SQL instance is managed by Google Cloud Backup and Disaster Recovery (DR) Service. +func EnhancedBackupManagerDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool { + // If the API marks this instance as ENHANCED-managed, suppress diffs for backup config fields. + tier, _ := d.Get("settings.0.backup_configuration.0.backup_tier").(string) + if tier == "" { + return false + } + if strings.EqualFold(tier, "ENHANCED") { + log.Printf( + "[INFO] Instance %s is managed by Google Cloud Backup and Disaster Recovery (BackupDR). "+ + "Terraform will not manage the '%s' field. "+ + "Any changes to this field in your Terraform configuration will be ignored.", d.Get("name"), k, + ) + return true + } + return false +} + func databaseVersionDiffSuppress(_, oldVersion, newVersion string, _ *schema.ResourceData) bool { // Suppress diff when newVersion is MYSQL_8_0 and oldVersion is >= MYSQL_8_0_35 for MySQL version auto-upgrade cases. if newVersion == "MYSQL_8_0" && strings.HasPrefix(oldVersion, "MYSQL_8_0_") { @@ -2891,6 +2942,7 @@ func flattenBackupConfiguration(backupConfiguration *sqladmin.BackupConfiguratio "point_in_time_recovery_enabled": backupConfiguration.PointInTimeRecoveryEnabled, "backup_retention_settings": flattenBackupRetentionSettings(backupConfiguration.BackupRetentionSettings), "transaction_log_retention_days": backupConfiguration.TransactionLogRetentionDays, + "backup_tier": backupConfiguration.BackupTier, } return []map[string]interface{}{data} diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_meta.yaml.tmpl b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_meta.yaml.tmpl index e354b447ed50..8e305f6312b5 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_meta.yaml.tmpl @@ -79,6 +79,10 @@ fields: - field: 'restore_backup_context.instance_id' - field: 'restore_backup_context.project' - api_field: 'rootPassword' + - api_field: 'rootPassword' + field: 'root_password_wo' + - field: 'root_password_wo_version' + provider_only: true - api_field: 'selfLink' - api_field: 'serverCaCert.cert' - api_field: 'serverCaCert.commonName' @@ -94,6 +98,7 @@ fields: field: 'settings.effective_availability_type' - api_field: 'settings.backupConfiguration.backupRetentionSettings.retainedBackups' - api_field: 'settings.backupConfiguration.backupRetentionSettings.retentionUnit' + - api_field: 'settings.backupConfiguration.backupTier' - api_field: 'settings.backupConfiguration.binaryLogEnabled' - api_field: 'settings.backupConfiguration.enabled' - api_field: 'settings.backupConfiguration.location' diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go.tmpl b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go.tmpl index b19f0a29479b..a0a36a42970f 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go.tmpl @@ -18,6 +18,7 @@ import ( {{- end }} "github.com/hashicorp/terraform-plugin-testing/terraform" sqladmin "google.golang.org/api/sqladmin/v1beta4" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // Fields that should be ignored in import tests because they aren't returned @@ -2704,6 +2705,10 @@ func TestAccSqlDatabaseInstance_SqlServerTimezoneUpdate(t *testing.T) { func TestAccSqlDatabaseInstance_activationPolicy(t *testing.T) { t.Parallel() + // Skip in VCR until the test issue is resolved + // https://github.com/hashicorp/terraform-provider-google/issues/24593 + acctest.SkipIfVcr(t) + instanceName := "tf-test-" + acctest.RandString(t, 10) acctest.VcrTest(t, resource.TestCase{ @@ -3063,6 +3068,84 @@ func TestAccSqlDatabaseInstance_SwitchoverSuccess(t *testing.T) { }) } +func TestAccSqlDatabaseInstance_MysqlEplusWithFailoverReplicaSetupWithPrivateNetwork(t *testing.T) { + t.Parallel() + + primaryName := "tf-test-mysql-primary-" + acctest.RandString(t, 10) + replicaName := "tf-test-mysql-replica-" + acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedServiceNetworkingConnection(t, "endpoint") + projectId := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSqlDatabaseInstance_mySqlEplusPrimaryReplicaSetupWithPrivateNetwork(projectId, primaryName, replicaName, networkName, "MYSQL_8_0"), + Check: resource.ComposeTestCheckFunc(verifyCreateOperationOnEplusWithPrivateNetwork("google_sql_database_instance.primary")), + }, + { + ResourceName: "google_sql_database_instance.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: fmt.Sprintf("%s/", projectId), + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_sql_database_instance.replica", + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: fmt.Sprintf("%s/", projectId), + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_setMySqlFailoverReplicaEplusWithPrivateNetwork(projectId, primaryName, replicaName, networkName, "MYSQL_8_0"), + Check: resource.ComposeTestCheckFunc(verifyCreateOperationOnEplusWithPrivateNetwork("google_sql_database_instance.primary")), + }, + }, + }) +} + +func TestAccSqlDatabaseInstance_PostgresEplusWithFailoverReplicaSetupWithPrivateNetwork(t *testing.T) { + t.Parallel() + + primaryName := "tf-test-postgres-primary-" + acctest.RandString(t, 10) + replicaName := "tf-test-postgres-replica-" + acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedServiceNetworkingConnection(t, "endpoint") + projectId := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSqlDatabaseInstance_postgresEplusPrimaryReplicaSetupWithPrivateNetwork(projectId, primaryName, replicaName, networkName, "POSTGRES_12"), + Check: resource.ComposeTestCheckFunc(verifyCreateOperationOnEplusWithPrivateNetwork("google_sql_database_instance.primary")), + }, + { + ResourceName: "google_sql_database_instance.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: fmt.Sprintf("%s/", projectId), + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_sql_database_instance.replica", + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: fmt.Sprintf("%s/", projectId), + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_setPostgresFailoverReplicaEplusWithPrivateNetwork(projectId, primaryName, replicaName, networkName, "POSTGRES_12"), + Check: resource.ComposeTestCheckFunc(verifyCreateOperationOnEplusWithPrivateNetwork("google_sql_database_instance.primary")), + }, + }, + }) +} + func TestAccSqlDatabaseInstance_MysqlEplusWithPrivateNetwork(t *testing.T) { t.Parallel() @@ -3925,6 +4008,467 @@ func TestAccSqlDatabaseInstance_DiskSizeAutoResizeWithDiskSize(t *testing.T) { }) } +func TestEnhancedBackupManagerDiffSuppressFunc(t *testing.T) { + cases := map[string]struct { + key string + old string + new string + backupTier string + expectSuppress bool + description string + }{ + "suppress enabled diff when backup tier is enhanced": { + key: "settings.0.backup_configuration.0.enabled", + old: "true", + new: "false", + backupTier: "ENHANCED", + expectSuppress: true, + description: "enabled should be suppressed when ENHANCED", + }, + "suppress start_time diff when backup tier is enhanced": { + key: "settings.0.backup_configuration.0.start_time", + old: "03:00", + new: "05:00", + backupTier: "ENHANCED", + expectSuppress: true, + description: "start_time should be suppressed when ENHANCED", + }, + "suppress binary_log_enabled diff when backup tier is enhanced": { + key: "settings.0.backup_configuration.0.binary_log_enabled", + old: "true", + new: "false", + backupTier: "ENHANCED", + expectSuppress: true, + description: "binary_log_enabled should be suppressed when ENHANCED", + }, + "suppress point_in_time_recovery_enabled diff when backup tier is enhanced": { + key: "settings.0.backup_configuration.0.point_in_time_recovery_enabled", + old: "false", + new: "true", + backupTier: "ENHANCED", + expectSuppress: true, + description: "point_in_time_recovery_enabled should be suppressed when ENHANCED", + }, + "suppress transaction_log_retention_days diff when backup tier is enhanced": { + key: "settings.0.backup_configuration.0.transaction_log_retention_days", + old: "7", + new: "14", + backupTier: "ENHANCED", + expectSuppress: true, + description: "transaction_log_retention_days should be suppressed when ENHANCED", + }, + "suppress retained_backups diff when backup tier is enhanced": { + key: "settings.0.backup_configuration.0.backup_retention_settings.0.retained_backups", + old: "7", + new: "14", + backupTier: "ENHANCED", + expectSuppress: true, + description: "retained_backups should be suppressed when ENHANCED", + }, + "do not suppress diff when backup tier is standard": { + key: "settings.0.backup_configuration.0.enabled", + old: "true", + new: "false", + backupTier: "STANDARD", + expectSuppress: false, + description: "enabled should NOT be suppressed when STANDARD", + }, + "do not suppress diff when backup tier is empty": { + key: "settings.0.backup_configuration.0.enabled", + old: "true", + new: "false", + backupTier: "", + expectSuppress: false, + description: "enabled should NOT be suppressed when backup_tier is empty", + }, + "do not suppress when old and new are same": { + key: "settings.0.backup_configuration.0.enabled", + old: "true", + new: "true", + backupTier: "ENHANCED", + expectSuppress: true, + description: "no diff to suppress when old and new are same", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + // Build real *schema.ResourceData using the resource schema and TestResourceDataRaw + // Put backup_tier into the nested settings->backup_configuration block (state) + rd := schema.TestResourceDataRaw(t, sql.ResourceSqlDatabaseInstance().Schema, map[string]interface{}{ + "name": "test-instance", + "settings": []interface{}{ + map[string]interface{}{ + "backup_configuration": []interface{}{ + map[string]interface{}{ + "backup_tier": tc.backupTier, + }, + }, + }, + }, + }) + + suppressed := sql.EnhancedBackupManagerDiffSuppressFunc(tc.key, tc.old, tc.new, rd) + + if suppressed != tc.expectSuppress { + t.Errorf("expected suppressed to be %v but got %v. Desc: %s", tc.expectSuppress, suppressed, tc.description) + } + }) + } +} + +func TestAccSqlDatabaseInstance_updateInstanceTierForEnhancedBackupTierInstance(t *testing.T) { + t.Parallel() + + backupVaultID := "bv-test" + location := "us-central1" + project := envvar.GetTestProjectFromEnv() + backupVault := acctest.BootstrapBackupDRVault(t, backupVaultID, location) + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": project, + "backup_vault_id": backupVaultID, + "backup_vault": backupVault, + "db_version": "MYSQL_8_0_41", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Create backup plan and associate with instance + Config: testGoogleSqlDatabaseInstance_attachGCBDR(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("google_backup_dr_backup_plan_association.backup_association", "id"), + ), + }, + { + // Update instance backup tier to ENHANCED, which should ignore backup_configuration settings + Config: testGoogleSqlDatabaseInstance_updateTierForGcbdrManagedInstance(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_sql_database_instance.instance", "settings.0.tier", "db-g1-small"), + ), + }, + }, + }) +} + +func TestAccSqlDatabaseInstance_majorVersionUpgradeForEnhancedBackupTierInstance(t *testing.T) { + t.Parallel() + + backupVault := acctest.BootstrapBackupDRVault(t, "bv-test", "us-central1") + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + "backup_vault": backupVault, + "db_version": "MYSQL_8_0_41", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Create backup plan and associate with instance + Config: testGoogleSqlDatabaseInstance_attachGCBDR(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("google_backup_dr_backup_plan_association.backup_association", "id"), + ), + }, + { + // Update instance database version to a new major version, which should ignore backup_configuration settings + Config: testGoogleSqlDatabaseInstance_majorVersionUpgradeGcbdrManagedInstance(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_sql_database_instance.instance", "database_version", "MYSQL_8_0_42"), + ), + }, + }, + }) +} + +func TestAccSqlDatabaseInstance_editionUpdateForEnhancedBackupTierInstance(t *testing.T) { + t.Parallel() + + backupVault := acctest.BootstrapBackupDRVault(t, "bv-test", "us-central1") + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + "backup_vault": backupVault, + "db_version": "MYSQL_8_0_41", + "edition": "ENTERPRISE", + "tier": "db-f1-micro", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Create backup plan and associate with instance + Config: testGoogleSqlDatabaseInstance_attachGCBDR(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("google_backup_dr_backup_plan_association.backup_association", "id"), + ), + }, + { + // Edition upgrade, which should ignore backup_configuration settings + Config: testGoogleSqlDatabaseInstance_editionUpdateForGcbdrManagedInstance(context, "ENTERPRISE_PLUS", "db-perf-optimized-N-4"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_sql_database_instance.instance", "settings.0.edition", "ENTERPRISE_PLUS"), + ), + }, + { + // Edition downgrade, which should ignore backup_configuration settings + Config: testGoogleSqlDatabaseInstance_editionUpdateForGcbdrManagedInstance(context, "ENTERPRISE", "db-f1-micro"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_sql_database_instance.instance", "settings.0.edition", "ENTERPRISE"), + ), + }, + + }, + }) +} + +func testGoogleSqlDatabaseInstance_attachGCBDR(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_sql_database_instance" "instance" { + name = "tf-test-instance-%{random_suffix}" + database_version = "%{db_version}" + region = "us-central1" + + settings { + tier = "db-f1-micro" + } + deletion_protection = false +} + +resource "google_backup_dr_backup_plan" "plan" { + location = "us-central1" + backup_plan_id = "tf-test-bp-test-%{random_suffix}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_vault = "%{backup_vault}" + + backup_rules { + rule_id = "rule-1" + backup_retention_days = 7 + + standard_schedule { + recurrence_type = "DAILY" + hourly_frequency = 6 + time_zone = "UTC" + + backup_window { + start_hour_of_day = 0 + end_hour_of_day = 23 + } + } + } +} + +resource "google_backup_dr_backup_plan_association" "backup_association" { + location = "us-central1" + backup_plan_association_id = "tf-test-bpa-test-%{random_suffix}" + resource = "projects/${data.google_project.project.project_id}/instances/${google_sql_database_instance.instance.name}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_plan = google_backup_dr_backup_plan.plan.name +} +`, context) +} + +func testGoogleSqlDatabaseInstance_updateTierForGcbdrManagedInstance(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_sql_database_instance" "instance" { + name = "tf-test-instance-%{random_suffix}" + database_version = "%{db_version}" + region = "us-central1" + + settings { + tier = "db-g1-small" + + backup_configuration { + enabled = false + binary_log_enabled = false + start_time = "05:00" + + backup_retention_settings { + retained_backups = 8 + retention_unit = "COUNT" + } + } + } + deletion_protection = false +} + +resource "google_backup_dr_backup_plan" "plan" { + location = "us-central1" + backup_plan_id = "tf-test-bp-test-%{random_suffix}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_vault = "%{backup_vault}" + + backup_rules { + rule_id = "rule-1" + backup_retention_days = 7 + + standard_schedule { + recurrence_type = "DAILY" + hourly_frequency = 6 + time_zone = "UTC" + + backup_window { + start_hour_of_day = 0 + end_hour_of_day = 23 + } + } + } +} + +resource "google_backup_dr_backup_plan_association" "backup_association" { + location = "us-central1" + backup_plan_association_id = "tf-test-bpa-test-%{random_suffix}" + resource = "projects/${data.google_project.project.project_id}/instances/${google_sql_database_instance.instance.name}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_plan = google_backup_dr_backup_plan.plan.name +} +`, context) +} + +func testGoogleSqlDatabaseInstance_majorVersionUpgradeGcbdrManagedInstance(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_sql_database_instance" "instance" { + name = "tf-test-instance-%{random_suffix}" + database_version = "MYSQL_8_0_42" + region = "us-central1" + + settings { + tier = "db-f1-micro" + + backup_configuration { + enabled = false + binary_log_enabled = false + start_time = "05:00" + + backup_retention_settings { + retained_backups = 8 + retention_unit = "COUNT" + } + } + } + deletion_protection = false +} + +resource "google_backup_dr_backup_plan" "plan" { + location = "us-central1" + backup_plan_id = "tf-test-bp-test-%{random_suffix}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_vault = "%{backup_vault}" + + backup_rules { + rule_id = "rule-1" + backup_retention_days = 7 + + standard_schedule { + recurrence_type = "DAILY" + hourly_frequency = 6 + time_zone = "UTC" + + backup_window { + start_hour_of_day = 0 + end_hour_of_day = 23 + } + } + } +} + +resource "google_backup_dr_backup_plan_association" "backup_association" { + location = "us-central1" + backup_plan_association_id = "tf-test-bpa-test-%{random_suffix}" + resource = "projects/${data.google_project.project.project_id}/instances/${google_sql_database_instance.instance.name}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_plan = google_backup_dr_backup_plan.plan.name +} +`, context) +} + +func testGoogleSqlDatabaseInstance_editionUpdateForGcbdrManagedInstance(context map[string]interface{}, edition string, tier string) string { + // Create a copy of the context map to avoid modifying the map from the caller + localContext := make(map[string]interface{}) + for k, v := range context { + localContext[k] = v + } + localContext["edition"] = edition + localContext["tier"] = tier + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_sql_database_instance" "instance" { + name = "tf-test-instance-%{random_suffix}" + database_version = "%{db_version}" + region = "us-central1" + + settings { + tier = "%{tier}" + edition = "%{edition}" + + backup_configuration { + enabled = false + binary_log_enabled = false + start_time = "05:00" + + backup_retention_settings { + retained_backups = 8 + retention_unit = "COUNT" + } + } + } + deletion_protection = false +} + +resource "google_backup_dr_backup_plan" "plan" { + location = "us-central1" + backup_plan_id = "tf-test-bp-test-%{random_suffix}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_vault = "%{backup_vault}" + + backup_rules { + rule_id = "rule-1" + backup_retention_days = 7 + + standard_schedule { + recurrence_type = "DAILY" + hourly_frequency = 6 + time_zone = "UTC" + + backup_window { + start_hour_of_day = 0 + end_hour_of_day = 23 + } + } + } +} + +resource "google_backup_dr_backup_plan_association" "backup_association" { + location = "us-central1" + backup_plan_association_id = "tf-test-bpa-test-%{random_suffix}" + resource = "projects/${data.google_project.project.project_id}/instances/${google_sql_database_instance.instance.name}" + resource_type = "sqladmin.googleapis.com/Instance" + backup_plan = google_backup_dr_backup_plan.plan.name +} +`, localContext) +} + func testGoogleSqlDatabaseInstance_setCustomSubjectAlternateName(context map[string]interface{}) string { return acctest.Nprintf(` data "google_project" "project" { @@ -4963,6 +5507,220 @@ resource "google_sql_database_instance" "original-replica" { `, project, primaryName, project, replicaName) } +func testGoogleSqlDatabaseInstance_setMySqlFailoverReplicaEplusWithPrivateNetwork(project, primaryName, replicaName, networkName, databaseVersion string) string { + + return fmt.Sprintf(` +data "google_compute_network" "servicenet" { + name = "%s" +} + +resource "google_sql_database_instance" "primary" { + project = "%s" + name = "%s" + region = "us-east1" + database_version = "%s" + instance_type = "CLOUD_SQL_INSTANCE" + deletion_protection = false + + replication_cluster { + failover_dr_replica_name = "%s" + } + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + backup_configuration { + enabled = true + binary_log_enabled = true + } + } +} + +resource "google_sql_database_instance" "replica" { + project = "%s" + name = "%s" + region = "us-west2" + database_version = "%s" + instance_type = "READ_REPLICA_INSTANCE" + master_instance_name = google_sql_database_instance.primary.name + deletion_protection = false + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + backup_configuration { + binary_log_enabled = true + } + } +} +`, networkName, project, primaryName, databaseVersion, replicaName, project, replicaName, databaseVersion) +} + +func testGoogleSqlDatabaseInstance_setPostgresFailoverReplicaEplusWithPrivateNetwork(project, primaryName, replicaName, networkName, databaseVersion string) string { + + return fmt.Sprintf(` +data "google_compute_network" "servicenet" { + name = "%s" +} + +resource "google_sql_database_instance" "primary" { + project = "%s" + name = "%s" + region = "us-east1" + database_version = "%s" + instance_type = "CLOUD_SQL_INSTANCE" + deletion_protection = false + + replication_cluster { + failover_dr_replica_name = "%s" + } + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + backup_configuration { + enabled = true + point_in_time_recovery_enabled = true + } + } +} + +resource "google_sql_database_instance" "replica" { + project = "%s" + name = "%s" + region = "us-west2" + database_version = "%s" + instance_type = "READ_REPLICA_INSTANCE" + master_instance_name = google_sql_database_instance.primary.name + deletion_protection = false + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + } +} +`, networkName, project, primaryName, databaseVersion, replicaName, project, replicaName, databaseVersion) +} + +func testGoogleSqlDatabaseInstance_mySqlEplusPrimaryReplicaSetupWithPrivateNetwork(project, primaryName, replicaName, networkName, databaseVersion string) string { + + return fmt.Sprintf(` +data "google_compute_network" "servicenet" { + name = "%s" +} + +resource "google_sql_database_instance" "primary" { + project = "%s" + name = "%s" + region = "us-east1" + database_version = "%s" + instance_type = "CLOUD_SQL_INSTANCE" + deletion_protection = false + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + backup_configuration { + enabled = true + binary_log_enabled = true + } + } +} + +resource "google_sql_database_instance" "replica" { + project = "%s" + name = "%s" + region = "us-west2" + database_version = "%s" + instance_type = "READ_REPLICA_INSTANCE" + master_instance_name = google_sql_database_instance.primary.name + deletion_protection = false + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + backup_configuration { + binary_log_enabled = true + } + } +} +`, networkName, project, primaryName, databaseVersion, project, replicaName, databaseVersion) +} + +func testGoogleSqlDatabaseInstance_postgresEplusPrimaryReplicaSetupWithPrivateNetwork(project, primaryName, replicaName, networkName, databaseVersion string) string { + + return fmt.Sprintf(` +data "google_compute_network" "servicenet" { + name = "%s" +} + +resource "google_sql_database_instance" "primary" { + project = "%s" + name = "%s" + region = "us-east1" + database_version = "%s" + instance_type = "CLOUD_SQL_INSTANCE" + deletion_protection = false + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + backup_configuration { + enabled = true + point_in_time_recovery_enabled = true + } + } +} + +resource "google_sql_database_instance" "replica" { + project = "%s" + name = "%s" + region = "us-west2" + database_version = "%s" + instance_type = "READ_REPLICA_INSTANCE" + master_instance_name = google_sql_database_instance.primary.name + deletion_protection = false + + settings { + tier = "db-perf-optimized-N-2" + edition = "ENTERPRISE_PLUS" + ip_configuration { + ipv4_enabled = false + private_network = data.google_compute_network.servicenet.self_link + } + } +} +`, networkName, project, primaryName, databaseVersion, project, replicaName, databaseVersion) +} + func googleSqlDatabaseInstance_mysqlSetFailoverReplica(project, primaryName, replicaName string, useNormalizedDrReplicaName bool) string { drReplicaName := fmt.Sprintf("%s:%s", project, replicaName) if !useNormalizedDrReplicaName { @@ -5800,6 +6558,11 @@ func verifyPscOperation(resourceName string, isPscConfigExpected bool, expectedP if !ok || allowedConsumerProjects != len(expectedAllowedConsumerProjects) { return fmt.Errorf("settings.0.ip_configuration.0.psc_config.0.allowed_consumer_projects property is not present or set as expected in state of %s", resourceName) } + + pscServiceAttachmentLink, ok := resourceAttributes["psc_service_attachment_link"] + if !ok || pscServiceAttachmentLink == "" { + return fmt.Errorf("psc_service_attachment_link property value is empty") + } } return nil diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url.go b/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url.go index 5936e8d39178..fefabfff6813 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url.go @@ -141,7 +141,12 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } } - urlData.Path = fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string)) + bucketName := d.Get("bucket").(string) + objectPath := d.Get("path").(string) + baseUrl := getGcsHostUrl(urlData, bucketName, objectPath) + + // sign path should be same in both cases as we are using v2 signature + urlData.SignPath = fmt.Sprintf("/%s/%s", bucketName, objectPath) // Load JWT Config from Google Credentials jwtConfig, err := loadJwtConfig(d, config) @@ -151,7 +156,7 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err urlData.JwtConfig = jwtConfig // Construct URL - signedUrl, err := urlData.SignedUrl() + signedUrl, err := urlData.SignedUrl(baseUrl) if err != nil { return err } @@ -208,6 +213,25 @@ func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error return nil, errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") } +func getGcsHostUrl(urlData *UrlData, bucketName, objectPath string) string { + var baseUrl string + if strings.Contains(bucketName, ".") { + // Use path-style URL as "." in the bucket name create invalid virtual hostnames + // Signed URL format https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/object + // Path format is bucket_name/object_path + urlData.Path = fmt.Sprintf("/%s/%s", bucketName, objectPath) + baseUrl = gcsBaseUrl + } else { + // default to always virtual style URL + // URL format https://tf-test-bucket-6159205297736845881.storage.googleapis.com//path/to/object + // Path format is object_path + urlData.Path = fmt.Sprintf("/%s", objectPath) + gcsUrl := strings.Split(gcsBaseUrl, "://") + baseUrl = fmt.Sprintf("%s://%s.%s", gcsUrl[0], bucketName, gcsUrl[1]) + } + return baseUrl +} + // parsePrivateKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key @@ -241,7 +265,9 @@ type UrlData struct { HttpMethod string Expires int HttpHeaders map[string]string - Path string + SignPath string + // Internally used field derived for virtual-host or path-style. + Path string } // SigningString creates a string representation of the UrlData in a form ready for signing: @@ -285,7 +311,7 @@ func (u *UrlData) SigningString() []byte { } // Storage Object path (includes bucketname) - buf.WriteString(u.Path) + buf.WriteString(u.SignPath) return buf.Bytes() } @@ -317,7 +343,7 @@ func (u *UrlData) EncodedSignature() (string, error) { } // SignedUrl constructs the final signed URL a client can use to retrieve storage object -func (u *UrlData) SignedUrl() (string, error) { +func (u *UrlData) SignedUrl(baseUrl string) (string, error) { encodedSig, err := u.EncodedSignature() if err != nil { @@ -327,7 +353,7 @@ func (u *UrlData) SignedUrl() (string, error) { // build url // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program var urlBuffer bytes.Buffer - urlBuffer.WriteString(gcsBaseUrl) + urlBuffer.WriteString(baseUrl) urlBuffer.WriteString(u.Path) urlBuffer.WriteString("?GoogleAccessId=") urlBuffer.WriteString(u.JwtConfig.Email) diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_internal_test.go b/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_internal_test.go index 46a712114ce3..1137b739bc4f 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_internal_test.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_internal_test.go @@ -30,16 +30,19 @@ const fakeCredentials = `{ // URL HTTP Method Expiration Signed URL // gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D -const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" +const testUrlPath = "path/to/file" +const testSignUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" const testUrlExpires = 1470967410 +const testBucketName = "tf-test-bucket-6159205297736845881" const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" -const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" +const testUrlExpectedUrl = "https://tf-test-bucket-6159205297736845881.storage.googleapis.com/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" func TestUrlData_Signing(t *testing.T) { urlData := &UrlData{ HttpMethod: "GET", Expires: testUrlExpires, Path: testUrlPath, + SignPath: testSignUrlPath, } // unescape and decode the expected signature expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) @@ -83,8 +86,10 @@ func TestUrlData_SignedUrl(t *testing.T) { Expires: testUrlExpires, Path: testUrlPath, JwtConfig: cfg, + SignPath: testSignUrlPath, } - result, err := urlData.SignedUrl() + baseUrl := getGcsHostUrl(urlData, testBucketName, testUrlPath) + result, err := urlData.SignedUrl(baseUrl) if err != nil { t.Errorf("Could not generated signed url: %+v", err) } diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_test.go b/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_test.go index d81f00bb9f3e..1745584e3c76 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_test.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_object_signed_url_test.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "net/http" + "strings" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -13,17 +14,41 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccStorageSignedUrl_basic(t *testing.T) { +const objectPath = "object/objname" +const stoargeApiHost = "storage.googleapis.com" + +func TestAccStorageSignedUrl_basicVirtualStyle(t *testing.T) { t.Parallel() + bucketName := acctest.TestBucketName(t) + acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testGoogleSignedUrlConfig, + Config: testGoogleSignedUrlConfig(bucketName), Check: resource.ComposeTestCheckFunc( - testAccSignedUrlExists(t, "data.google_storage_object_signed_url.blerg"), + testAccSignedUrlVirtualStyleExists(t, "data.google_storage_object_signed_url.blerg", bucketName), + ), + }, + }, + }) +} + +func TestAccStorageSignedUrl_basicPathStyle(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + ".com" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSignedUrlConfig(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccSignedUrlPathStyleExists(t, "data.google_storage_object_signed_url.blerg", bucketName), ), }, }, @@ -59,7 +84,7 @@ func TestAccStorageSignedUrl_accTest(t *testing.T) { }) } -func testAccSignedUrlExists(t *testing.T, n string) resource.TestCheckFunc { +func testAccSignedUrlVirtualStyleExists(t *testing.T, n, bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { r := s.RootModule().Resources[n] @@ -69,6 +94,30 @@ func testAccSignedUrlExists(t *testing.T, n string) resource.TestCheckFunc { return fmt.Errorf("signed_url is empty: %v", a) } + splitUrl := strings.Split(a["signed_url"], "/") + if splitUrl[2] != fmt.Sprintf("%s.%s", bucketName, stoargeApiHost) { + return fmt.Errorf("invalid virtual style URL") + } + + return nil + } +} + +func testAccSignedUrlPathStyleExists(t *testing.T, n, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + r := s.RootModule().Resources[n] + a := r.Primary.Attributes + + if a["signed_url"] == "" { + return fmt.Errorf("signed_url is empty: %v", a) + } + + urlPrefix := fmt.Sprintf("%s://%s/%s/%s", "https", stoargeApiHost, bucketName, objectPath) + if !strings.HasPrefix(a["signed_url"], urlPrefix) { + return fmt.Errorf("invalid path style URL") + } + return nil } } @@ -131,13 +180,14 @@ func testAccSignedUrlRetrieval(n string, headers map[string]string) resource.Tes } } -const testGoogleSignedUrlConfig = ` +func testGoogleSignedUrlConfig(bucket string) string { + return fmt.Sprintf(` data "google_storage_object_signed_url" "blerg" { - bucket = "friedchicken" - path = "path/to/file" - + bucket = "%s" + path = "%s" +} +`, bucket, objectPath) } -` func testAccTestGoogleStorageObjectSignedURL(bucketName string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl index f543ef735ce3..3fa8315f455c 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.tmpl @@ -3,7 +3,6 @@ package storage import ( "bytes" "context" - "errors" "fmt" "log" "regexp" @@ -103,7 +102,7 @@ func ResourceStorageBucket() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: false, - Description: `When deleting a bucket, this boolean option will delete all contained objects, or anywhereCaches (if any). If you try to delete a bucket that contains objects or anywhereCaches, Terraform will fail that run, deleting anywhereCaches may take 80 minutes to complete.`, + Description: `When true, before deleting a bucket, delete all objects within the bucket, or Anywhere Caches caching data for that bucket. Otherwise, buckets with objects/caches will fail. Anywhere Cache requires additional permissions to interact with and will be ignored when those are not present, attempting to delete anyways. This may result in the objects in the bucket getting destroyed but not the bucket itself if there is a cache in use with the bucket. Force deletion may take a long time to delete buckets with lots of objects or with any Anywhere Caches (80m+).`, }, "labels": { @@ -1179,98 +1178,104 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - // Get the bucket bucket := d.Get("name").(string) - - var listError, deleteObjectError, deleteCacheError error - for deleteObjectError == nil { - res, err := config.NewStorageClient(userAgent).Objects.List(bucket).Versions(true).Do() - if err != nil { - log.Printf("Error listing contents of bucket %s: %v", bucket, err) - // If we can't list the contents, try deleting the bucket anyway in case it's empty - listError = err - break + forceDestroy := d.Get("force_destroy").(bool) + + // set up force_destroy state- we're deleting multiple resource kinds and parallelising those deletions, so we + // store some data in function scope to handle during cleanup + var objectsListError, deleteObjectError, deleteCacheError error + + // Create a workerpool for parallel deletion of resources. In the + // future, it would be great to expose Terraform's global parallelism + // flag here, but that's currently reserved for core use. Testing + // shows that NumCPUs-1 is the most performant on average networks. + // + // The challenge with making this user-configurable is that the + // configuration would reside in the Terraform configuration file, + // decreasing its portability. Ideally we'd want this to connect to + // Terraform's top-level -parallelism flag, but that's not plumbed nor + // is it scheduled to be plumbed to individual providers. + wp := workerpool.New(runtime.NumCPU() - 1) + + // delete anywhere caches + cacheList, _ := getAnywhereCacheListResult(d, config) // intentionally ignore errors on list- this requires extra permissions (storage.anywhereCaches.list) and we fall through if not permissioned + if len(cacheList) != 0 { + if !forceDestroy { + deleteErr := fmt.Errorf("Error trying to delete bucket %s without `force_destroy` set to true", bucket) + log.Printf("[DEBUG] Error attempting to delete bucket %q with anywhere caches present: %s\n\n", bucket, deleteErr) + return deleteErr } - cacheList, cacheListErr := getAnywhereCacheListResult(d, config) - if cacheListErr != nil { - // If we get any error, try deleting the bucket anyway in case it's empty - // This would help our customers to avoid requiring extra storage.anywhereCaches.list permission. + log.Printf("[DEBUG] Attempting to destroy %v anywhere caches on bucket %q due to `force_destroy` being set to true.", len(cacheList), bucket) + wp.Submit(func() { + err = deleteAnywhereCacheIfAny(d, config) + if err != nil { + deleteCacheError = fmt.Errorf("error deleting the caches on the bucket %s : %w", bucket, err) + } + }) + } + + // delete objects + for deleteObjectError == nil { + listResponse, listErr := config.NewStorageClient(userAgent).Objects.List(bucket).Versions(true).Do() + if listErr != nil { + log.Printf("Error listing contents of bucket %s: %v", bucket, listErr) + // If we can't list the contents, fall through and try deleting the bucket anyway in case it's empty + objectsListError = listErr break } - if len(res.Items) == 0 && len(cacheList) == 0 { - break // 0 items and no caches, bucket empty + if len(listResponse.Items) == 0 { + break // 0 items, bucket empty } if d.Get("retention_policy.0.is_locked").(bool) { - for _, item := range res.Items { + for _, item := range listResponse.Items { expiration, err := time.Parse(time.RFC3339, item.RetentionExpirationTime) if err != nil { return err } + if expiration.After(time.Now()) { - deleteErr := errors.New("Bucket '" + d.Get("name").(string) + "' contains objects that have not met the retention period yet and cannot be deleted.") + deleteErr := fmt.Errorf("Bucket %q contains objects that have not met the retention period yet and cannot be deleted.", bucket) log.Printf("Error! %s : %s\n\n", bucket, deleteErr) return deleteErr } } } - if !d.Get("force_destroy").(bool) { + if !forceDestroy { deleteErr := fmt.Errorf("Error trying to delete bucket %s without `force_destroy` set to true", bucket) - log.Printf("Error! %s : %s\n\n", bucket, deleteErr) + log.Printf("[DEBUG] Error attempting to delete bucket %q with objects: %s\n\n", bucket, deleteErr) return deleteErr } - // GCS requires that a bucket be empty (have no objects or object - // versions) before it can be deleted. - log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") - - // Create a workerpool for parallel deletion of resources. In the - // future, it would be great to expose Terraform's global parallelism - // flag here, but that's currently reserved for core use. Testing - // shows that NumCPUs-1 is the most performant on average networks. - // - // The challenge with making this user-configurable is that the - // configuration would reside in the Terraform configuration file, - // decreasing its portability. Ideally we'd want this to connect to - // Terraform's top-level -parallelism flag, but that's not plumbed nor - // is it scheduled to be plumbed to individual providers. - wp := workerpool.New(runtime.NumCPU() - 1) - - wp.Submit(func() { - err = deleteAnywhereCacheIfAny(d, config) - if err != nil { - deleteCacheError = fmt.Errorf("error deleting the caches on the bucket %s : %w", bucket, err) - } - }) + // GCS requires that a bucket be empty (have no objects or object versions) before it can be deleted. + log.Printf("[DEBUG] Attempting to destroy %v objects in bucket %q due to `force_destroy` being set to true. There may be more objects- additional pages are not checked in advance.", len(listResponse.Items), bucket) - for _, object := range res.Items { - log.Printf("[DEBUG] Found %s", object.Name) - object := object + for _, object := range listResponse.Items { + object := object // ensure that local variable is maintained over loop iterations. Go probably fixed this issue but that should be evaluated in depth. wp.Submit(func() { - log.Printf("[TRACE] Attempting to delete %s", object.Name) - if err := config.NewStorageClient(userAgent).Objects.Delete(bucket, object.Name).Generation(object.Generation).Do(); err != nil { + log.Printf("[DEBUG] Deleting object %q in bucket %q", object.Name, bucket) + err := config.NewStorageClient(userAgent).Objects.Delete(bucket, object.Name).Generation(object.Generation).Do() + if err != nil { deleteObjectError = err - log.Printf("[ERR] Failed to delete storage object %s: %s", object.Name, err) - } else { - log.Printf("[TRACE] Successfully deleted %s", object.Name) + log.Printf("[DEBUG] Failed to delete object %q in bucket %q: %s", object.Name, bucket, err) } }) } - - // Wait for everything to finish. - wp.StopWait() } - // remove empty bucket + // Wait for all force-destroyed children (objects, anywhere caches) to finish getting destroyed + wp.StopWait() + + // destroy bucket err = retry.Retry(1*time.Minute, func() *retry.RetryError { err := config.NewStorageClient(userAgent).Buckets.Delete(bucket).Do() if err == nil { @@ -1281,8 +1286,8 @@ func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error } return retry.NonRetryableError(err) }) - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 && strings.Contains(gerr.Message, "not empty") && listError != nil { - return fmt.Errorf("could not delete non-empty bucket due to error when listing contents: %v", listError) + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 && strings.Contains(gerr.Message, "not empty") && objectsListError != nil { + return fmt.Errorf("could not delete non-empty bucket due to error when listing contents: %v", objectsListError) } if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 && strings.Contains(gerr.Message, "not empty") && deleteObjectError != nil { return fmt.Errorf("could not delete non-empty bucket due to error when deleting contents: %v", deleteObjectError) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_meta.yaml b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_meta.yaml index cd88b6c818fa..141a4003642e 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_meta.yaml +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_meta.yaml @@ -11,6 +11,13 @@ fields: - api_field: 'contentEncoding' - api_field: 'contentLanguage' - api_field: 'contentType' + - api_field: 'contexts.custom.key' + - api_field: 'contexts.custom.value.createTime' + field: 'contexts.custom.create_time' + - api_field: 'contexts.custom.value.updateTime' + field: 'contexts.custom.update_time' + - api_field: 'contexts.custom.value.value' + field: 'contexts.custom.value' - api_field: 'checksums.crc32c' field: 'crc32c' - api_field: 'customerEncryption.encryptionAlgorithm' diff --git a/mmv1/third_party/terraform/services/storageinsights/resource_storage_insights_dataset_config_test.go b/mmv1/third_party/terraform/services/storageinsights/resource_storage_insights_dataset_config_test.go index 48194cdc1d38..8928eef23d83 100644 --- a/mmv1/third_party/terraform/services/storageinsights/resource_storage_insights_dataset_config_test.go +++ b/mmv1/third_party/terraform/services/storageinsights/resource_storage_insights_dataset_config_test.go @@ -220,6 +220,7 @@ resource "google_storage_insights_dataset_config" "config" { location = "us-central1" dataset_config_id = "tf_test_my_config%{random_suffix}" retention_period_days = 1 + activity_data_retention_period_days = 2 source_folders { folder_numbers = ["123", "456"] } @@ -236,6 +237,7 @@ resource "google_storage_insights_dataset_config" "config" { location = "us-central1" dataset_config_id = "tf_test_my_config%{random_suffix}" retention_period_days = 1 + activity_data_retention_period_days = 0 organization_scope = true identity { type = "IDENTITY_TYPE_PER_CONFIG" diff --git a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_test.go b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_test.go index abce779d1663..58d0d4d19150 100644 --- a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_test.go +++ b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_online_store_test.go @@ -83,3 +83,97 @@ resource google_vertex_ai_feature_online_store "feature_online_store" { } `, context) } + +func TestAccVertexAIFeatureOnlineStore_bigtable_full(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "kms_key_name": kms.CryptoKey.Name, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIFeatureOnlineStoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIFeatureOnlineStore_bigtable_full(context), + }, + { + ResourceName: "google_vertex_ai_feature_online_store.feature_online_store", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "etag", "region", "force_destroy", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccVertexAIFeatureOnlineStore_bigtable_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource google_vertex_ai_feature_online_store "feature_online_store" { + name = "tf_test_feature_online_store%{random_suffix}" + region = "us-central1" + + bigtable { + auto_scaling { + min_node_count = 1 + max_node_count = 2 + } + enable_direct_bigtable_access = true + zone = "us-central1-a" + } + encryption_spec { + kms_key_name = "%{kms_key_name}" + } + force_destroy = true +} +`, context) +} + +func TestAccVertexAIFeatureOnlineStore_bigtable_with_zone(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIFeatureOnlineStoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIFeatureOnlineStore_bigtable_with_zone(context), + }, + { + ResourceName: "google_vertex_ai_feature_online_store.feature_online_store", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "etag", "region", "force_destroy", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccVertexAIFeatureOnlineStore_bigtable_with_zone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource google_vertex_ai_feature_online_store "feature_online_store" { + name = "tf_test_feature_online_store%{random_suffix}" + region = "us-central1" + + bigtable { + auto_scaling { + min_node_count = 1 + max_node_count = 2 + cpu_utilization_target = 60 + } + zone = "us-central1-a" + } + force_destroy = true +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_index_endpoint_test.go b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_index_endpoint_test.go index 4b1fccb9e075..2cc51a0bdeab 100644 --- a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_index_endpoint_test.go +++ b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_index_endpoint_test.go @@ -44,6 +44,31 @@ func TestAccVertexAIIndexEndpoint_updated(t *testing.T) { }) } +func TestAccVertexAIIndexEndpoint_psc_automation_config(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIIndexEndpointDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIIndexEndpoint_psc_automation_config(context), + }, + { + ResourceName: "google_vertex_ai_index_endpoint.index_endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "region", "labels", "terraform_labels", "private_service_connect_config.0.psc_automation_configs"}, + }, + }, + }) +} + func testAccVertexAIIndexEndpoint_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_vertex_ai_index_endpoint" "index_endpoint" { @@ -89,3 +114,43 @@ data "google_compute_network" "vertex_network" { data "google_project" "project" {} `, context) } + +func testAccVertexAIIndexEndpoint_psc_automation_config(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_index_endpoint" "index_endpoint" { + display_name = "tf-test-sample-endpoint-%{random_suffix}" + description = "A sample vertex endpoint" + region = "us-central1" + + labels = { + label-one = "value-one" + } + + private_service_connect_config { + enable_private_service_connect = true + project_allowlist = [ + data.google_project.project.name, + ] + + psc_automation_configs { + project_id = data.google_project.project.id + network = google_compute_network.network.id + } + } +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "tf-test-subnetwork-%{random_suffix}" + ip_cidr_range = "192.168.0.0/24" + region = "us-central1" + network = google_compute_network.network.id +} + +resource "google_compute_network" "network" { + name = "tf-test-network-%{random_suffix}" + auto_create_subnetworks = false +} + +data "google_project" "project" {} +`, context) +} diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates.go b/mmv1/third_party/terraform/transport/error_retry_predicates.go index 042e686cd9d2..bcb31e9a81bf 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates.go @@ -649,3 +649,13 @@ func IsSiteVerificationRetryableError(err error) (bool, string) { } return false, "" } + +// Retry when waiting for ingestion to create a 1P Dataplex entry corresponding to some other resource. +func IsDataplex1PEntryIngestedError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 403 && strings.Contains(gerr.Body, "The action is not allowed on the Dataplex managed entry group") { + return true, fmt.Sprintf("Retry 403s for Dataplex Ingestion") + } + } + return false, "" +} diff --git a/mmv1/third_party/terraform/website/docs/d/artifact_registry_versions.html.markdown b/mmv1/third_party/terraform/website/docs/d/artifact_registry_versions.html.markdown new file mode 100644 index 000000000000..cf63cdca05d6 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/artifact_registry_versions.html.markdown @@ -0,0 +1,57 @@ +--- +subcategory: "Artifact Registry" +description: |- + Get information about versions within a Google Artifact Registry package. +--- + +# google_artifact_registry_versions + +Get information about Artifact Registry versions. +See [the official documentation](https://cloud.google.com/artifact-registry/docs/overview) +and [API](https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.packages.versions/list). + +## Example Usage + +```hcl +data "google_artifact_registry_versions" "my_versions" { + location = "us-central1" + repository_id = "example-repo" + package_name = "example-package" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - (Required) The location of the artifact registry. + +* `repository_id` - (Required) The last part of the repository name to fetch from. + +* `package_name` - (Required) The name of the package. + +* `filter` - (Optional) An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are `name` and `version`. Further information can be found in the [REST API](https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.packages.versions/list#query-parameters). + +* `view` - (Optional) The view, which determines what version information is returned in a response. Possible values are `"BASIC"` and `"FULL"`. Defaults to `"BASIC"`. + +* `project` - (Optional) The project ID in which the resource belongs. If it is not provided, the provider project is used. + +## Attributes Reference + +The following attributes are exported: + +* `versions` - A list of all retrieved Artifact Registry versions. Structure is [defined below](#nested_versions). + +The `versions` block supports: + +* `name` - The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/version1`. If the package part contains slashes, the slashes are escaped. + +* `description` - Description of the version, as specified in its metadata. + +* `related_tags` - A list of related tags. Will contain up to 100 tags that reference this version. + +* `create_time` - The time, as a RFC 3339 string, this package was created. + +* `update_time` - The time, as a RFC 3339 string, this package was last updated. This includes publishing a new version of the package. + +* `annotations` - Client specified annotations. diff --git a/mmv1/third_party/terraform/website/docs/d/backup_dr_backup.html.markdown b/mmv1/third_party/terraform/website/docs/d/backup_dr_backup.html.markdown index f5f5675fbbb8..65b9a3147d79 100644 --- a/mmv1/third_party/terraform/website/docs/d/backup_dr_backup.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/backup_dr_backup.html.markdown @@ -27,3 +27,20 @@ The following arguments are supported: * `project` - (Required) The Google Cloud Project in which the Backup belongs. * `data_source_id` - (Required) The ID of the Data Source in which the Backup belongs. * `backup_vault_id` - (Required) The ID of the Backup Vault of the Data Source in which the Backup belongs. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `name` - Name of resource + +* `backups` - List of all backups under data source. Structure is defined below. + +The `backups` block supports: + +* `name` - Name of the resource. +* `location` - Location of the resource. +* `backup_id` - Id of the requesting object, Backup. +* `backup_vault_id` - Name of the Backup Vault associated with Backup. +* `data_source_id` - Name of the Data Source associated with Backup. +* `create_time` - The time when the backup was created. diff --git a/mmv1/third_party/terraform/website/docs/d/cloud_identity_policies.html.markdown b/mmv1/third_party/terraform/website/docs/d/cloud_identity_policies.html.markdown new file mode 100644 index 000000000000..e96f9a94d7d6 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/cloud_identity_policies.html.markdown @@ -0,0 +1,101 @@ +--- +subcategory: "Cloud Identity" +layout: "google" +page_title: "Google: google_cloud_identity_policies" +sidebar_current: "docs-google-data-cloud-identity-list-policies" +description: |- + Use this data source to list Cloud Identity policies. +--- + +# google_cloud_identity_policies + +Use this data source to list Cloud Identity policies. + +## Example Usage + +```hcl +data "google_cloud_identity_policies" "all" { + # Example filter (optional)" + # filter = "customer == \"customers/my_customer\" && + # setting.type.matches('^settings/gmail\\..*$')" +} + +// The name of the first policy in the list of policies +output "first_policy_name" { + value = data.google_cloud_identity_policies.all.policies[0].name +} + +// The customer to whom the first policy belongs to. This will always be the +// same across multiple policies as well. +output "first_policy_customer" { + value = data.google_cloud_identity_policies.all.policies[0].customer +} + +// The CEL query of the first policy +output "policy_query_query" { + value = data.google_cloud_identity_policies.all.policies[0].policy_query[0].query +} + +// The org unit the first policy applies to +output "policy_query_org_unit" { + value = data.google_cloud_identity_policies.all.policies[0].policy_query[0].org_unit +} + +// The group the first policy applies to +output "policy_query_group" { + value = data.google_cloud_identity_policies.all.policies[0].policy_query[0].group +} + +// The sort order of the first policy +output "policy_query_sort_order" { + value = data.google_cloud_identity_policies.all.policies[0].policy_query[0].sort_order +} + +// The setting of the first policy as a JSON string +output "policy_setting" { + value = data.google_cloud_identity_policies.all.policies[0].setting +} + +// The type of policy - ADMIN/SYSTEM +output "policy_type" { + value = data.google_cloud_identity_policies.all.policies[0].type +} +``` + +## Argument Reference + +The following arguments are supported: + +* `filter` - (Optional) Filter expression for listing policies, as documented in the Cloud Identity Policy API policies.list method. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `policies` - List of Cloud Identity policies that match the filter (or all policies if no filter is provided). Structure is documented below. + +--- + +The `policies` block contains: + +* `name` - The resource name of the policy. + +* `customer` - The customer that the policy belongs to. + +* `policy_query` - A list containing the CEL query that defines which entities the policy applies to. Structure is documented below. + +* `setting` - The setting configured by this policy, represented as a JSON string. + +* `type` - The type of the policy. + +--- + +The `policy_query` block contains: + +* `query` - The query that defines which entities the policy applies to. + +* `group` - The group that the policy applies to. + +* `org_unit` - The org unit that the policy applies to. + +* `sort_order` - The sort order of the policy. diff --git a/mmv1/third_party/terraform/website/docs/d/compute_addresses.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_addresses.html.markdown index 453dc5b3973c..3545f7e6b1c8 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_addresses.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_addresses.html.markdown @@ -79,6 +79,7 @@ exported: * `address` - The IP address (for example `1.2.3.4`). * `address_type` - The IP address type, can be `EXTERNAL` or `INTERNAL`. * `description` - The IP address description. +* `prefix_length` - The prefix length of the IP range. If not present, it means the address field is a single IP address. * `status` - Indicates if the address is used. Possible values are: RESERVED or IN_USE. * `labels` - (Beta only) A map containing IP labels. * `region` - The region in which the address resides. diff --git a/mmv1/third_party/terraform/website/docs/d/compute_region_security_policy.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_region_security_policy.html.markdown new file mode 100644 index 000000000000..09dc7f6a495f --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/compute_region_security_policy.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Compute Engine" +description: |- + Fetches the details of a Compute Region Security Policy. +--- + +# google_compute_region_security_policy + +Use this data source to get information about a Compute Region Security Policy. For more details, see the [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/regionSecurityPolicies). + +## Example Usage + +```hcl +data "google_compute_region_security_policy" "default" { + name = "my-region-security-policy" + region = "us-west2" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + The name of the Region Security Policy. + +* `region` - + (Optional) + The region in which the Region Security Policy resides. If not specified, the provider region is used. + +* `project` - + (Optional) + The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + +## Attributes Reference + +See [google_compute_region_security_policy](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_region_security_policy) resource for details of all the available attributes. diff --git a/mmv1/third_party/terraform/website/docs/d/compute_router_nat.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_router_nat.html.markdown index 97be78757525..50686a1e122c 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_router_nat.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_router_nat.html.markdown @@ -6,7 +6,7 @@ description: |- # google_compute_router_nat -To get more information about Snapshot, see: +To get more information about RouterNat, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routers) * How-to Guides diff --git a/mmv1/third_party/terraform/website/docs/d/compute_storage_pool.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_storage_pool.html.markdown new file mode 100644 index 000000000000..3dfb5a958c4f --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/compute_storage_pool.html.markdown @@ -0,0 +1,42 @@ +--- +subcategory: "Compute Engine" +description: |- + Provide access to a Storage Pool's attributes +--- + +# google_compute_storage_pool + +Provides access to available Google Compute Storage Pool resources for a given project and zone. +See more about [Hyperdisk Storage Pools](https://cloud.google.com/compute/docs/disks/storage-pools) in the upstream docs. + +## Example Usage + +```hcl +data "google_compute_storage_pool" "my_pool" { + name = "my-storage-pool" + zone = "us-central1-a" +} + +output "pool_capacity" { + value = data.google_compute_storage_pool.my_pool.pool_provisioned_capacity_gb +} + +output "pool_type" { + value = data.google_compute_storage_pool.my_pool.storage_pool_type +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` (Required) - The name of the Storage Pool. +* `zone` (Required) - The zone where the Storage Pool resides. +* `project` (Optional) - The project in which the Storage Pool exists. If it is not provided, the provider project is used. + +## Note +* `deletion_protection` is always set to false on the data source and will not be represetative of the actual value on `google_compute_storage_pool` reaosure being read + +## Attributes Reference + +See [google_compute_storage_pool](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_storage_pool) resource for details of the available attributes. diff --git a/mmv1/third_party/terraform/website/docs/d/google_service_networking_peered_dns_domain.html.markdown b/mmv1/third_party/terraform/website/docs/d/google_service_networking_peered_dns_domain.html.markdown new file mode 100644 index 000000000000..1782bb7776eb --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/google_service_networking_peered_dns_domain.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "Service Networking" +description: |- + Get information about a Google Service Networking Peered DNS Domain. +--- + +# google_service_networking_peered_dns_domain + +Get information about a Google Service Networking Peered DNS Domain. + +When using Google Cloud DNS to manage internal DNS, peered DNS domains make your DNS available to services like Google Cloud Build. Use this data source to retrieve information about an existing peered DNS domain. + +For more information see [the API](https://cloud.google.com/service-infrastructure/docs/service-networking/reference/rest/v1/services.projects.global.networks.peeredDnsDomains) + +## Example Usage + +``` +data "google_service_networking_peered_dns_domain" "my_domain" { + project = "my-project" + name = "example-com" + network = "default" + service = "servicenetworking.googleapis.com" +} + +output "dns_suffix" { + value = data.google_service_networking_peered_dns_domain.my_domain.dns_suffix +} + +output "parent" { + value = data.google_service_networking_peered_dns_domain.my_domain.parent +} +``` + +## Argument Reference + +The following arguments are supported: + +* `project` - (Required) The producer project number or ID. + +* `name` - (Required) The internal name used for the peered DNS domain. + +* `network` - (Required) The network in the consumer project. + +* `service` - (Required) The private service connection between service and consumer network (e.g., `servicenetworking.googleapis.com`). + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `dns_suffix` - The DNS domain suffix of the peered DNS domain. + +* `parent` - An identifier for the resource with format `services/{{service}}/projects/{{project}}/global/networks/{{network}}`. + +* `id` - An identifier for the resource with format `services/{{service}}/projects/{{project}}/global/networks/{{network}}/peeredDnsDomains/{{name}}`. diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 561ba742eaaa..e935cae199aa 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -1152,7 +1152,7 @@ sole_tenant_config { * `data_cache_count` (Optional) - Number of raw-block local NVMe SSD disks to be attached to the node utilized for GKE Data Cache. If zero, then GKE Data Cache will not be enabled in the nodes. -The `fast_socket` block supports: +The `fast_socket` block supports: * `enabled` (Required) - Whether or not the NCCL Fast Socket is enabled @@ -1228,7 +1228,7 @@ Structure is [documented below](#nested_node_kubelet_config). * `network_tags` (Optional) - The network tag config for the cluster's automatically provisioned node pools. Structure is [documented below](#nested_network_tags). -* `linux_node_config` (Optional) - Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroup_mode` field is supported in `node_pool_auto_config`. Structure is [documented below](#nested_linux_node_config). +* `linux_node_config` (Optional) - Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroup_mode` and `node_kernel_module_loading` fields are supported in `node_pool_auto_config`. Structure is [documented below](#nested_linux_node_config). The `node_kubelet_config` block supports: @@ -1311,7 +1311,7 @@ notification_config { The `rotation_config` block supports: * `enabled` (Optional) - Enable the roation in Secret Manager add-on for this cluster. -* `rotation_interval` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) - The interval between two consecutive rotations. Default rotation interval is 2 minutes. +* `rotation_interval` (Optional) - The interval between two consecutive rotations. Default rotation interval is 2 minutes. The `secret_sync_config` block supports: @@ -1608,6 +1608,8 @@ linux_node_config { * `hugepages_config` - (Optional) Amounts for 2M and 1G hugepages. Structure is [documented below](#nested_hugepages_config). +* `node_kernel_module_loading` - (Optional) Settings for kernel module loading. Structure is [documented below](#nested_node_kernel_module_loading_config). + The `hugepages_config` block supports: * `hugepage_size_2m` - (Optional) Amount of 2M hugepages. @@ -1630,6 +1632,14 @@ linux_node_config { * `TRANSPARENT_HUGEPAGE_DEFRAG_NEVER`: An application will never enter direct reclaim or compaction. * `TRANSPARENT_HUGEPAGE_DEFRAG_UNSPECIFIED`: Default value. GKE will not modify the kernel configuration. +The `node_kernel_module_loading` block supports: + +* `policy` - (Optional) Possible kernel module loading policies. + Accepted values are: + * `POLICY_UNSPECIFIED`: Default if unset. GKE selects the image based on node type. For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. + * `ENFORCE_SIGNED_MODULES`: Enforced signature verification: Node pools will use a Container-Optimized OS image configured to allow loading of *Google-signed* external kernel modules. Loadpin is enabled but configured to exclude modules, and kernel module signature checking is enforced. + * `DO_NOT_ENFORCE_SIGNED_MODULES`: Mirrors existing DEFAULT behavior: For CPU and TPU nodes, the image will not allow loading external kernel modules. For GPU nodes, the image will allow loading any module, whether it is signed or not. + The `containerd_config` block supports: * `private_registry_access_config` (Optional) - Configuration for private container registries. There are two fields in this config: @@ -1654,6 +1664,41 @@ linux_node_config { * `enabled` (Required) - Whether writable cgroups are enabled. +* `registry_hosts` (Optional) - Defines containerd registry host configuration. Each `registry_hosts` entry represents a `hosts.toml` file. See [customize containerd configuration in GKE nodes](https://docs.cloud.google.com/kubernetes-engine/docs/how-to/customize-containerd-configuration#registryHosts) for more detail. Example: + ```hcl +registry_hosts { + server = "REGISTRY_SERVER_FQDN" + hosts { + host = "MIRROR_FQDN" + capabilities = [ + "HOST_CAPABILITY_PULL", + "HOST_CAPABILITY_RESOLVE", + "HOST_CAPABILITY_PUSH", + ] + override_path = false + dial_timeout = "30s" + header { + key = "HEADER_KEY" + value = [ + "HEADER_VALUE_1", + "HEADER_VALUE_2", + ] + } + ca { + gcp_secret_manager_secret_uri = "projects/PROJECT_ID_OR_NUMBER/secrets/CA_SECRET/versions/VERSION" + } + client { + cert { + gcp_secret_manager_secret_uri = "projects/PROJECT_ID_OR_NUMBER/secrets/CLIENT_CERT_SECRET/versions/VERSION" + } + key { + gcp_secret_manager_secret_uri = "projects/PROJECT_ID_OR_NUMBER/secrets/CLIENT_KEY_SECRET/versions/VERSION" + } + } + } +} + ``` + The `vertical_pod_autoscaling` block supports: * `enabled` (Required) - Enables vertical pod autoscaling diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index fef18ea52798..4a93aa24519d 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -270,6 +270,46 @@ resource "google_sql_database_instance" "main" { } ``` +### Cloud SQL Instance created with backupdr_backup +~> **NOTE:** For restoring from a backupdr_backup, note that the backup must be in active state. List down the backups using `google_backup_dr_backup`. Replace `backupdr_backup_full_path` with the backup name. + +```hcl +resource "google_sql_database_instance" "instance" { + name = "main-instance" + database_version = "MYSQL_8_0" + settings { + tier = "db-f1-micro" + backup_configuration { + enabled = true + binary_log_enabled = true + } + backupdr_backup = "backupdr_backup_full_path" + } +} +``` + +### Cloud SQL Instance created using point_in_time_restore +~> **NOTE:** Replace `backupdr_datasource` with the full datasource path, `time_stamp` should be in the format of `YYYY-MM-DDTHH:MM:SSZ`. + +```hcl +resource "google_sql_database_instance" "instance" { + name = "main-instance" + database_version = "MYSQL_8_0" + settings { + tier = "db-f1-micro" + backup_configuration { + enabled = true + binary_log_enabled = true + } + } + point_in_time_restore_context { + datasource = "backupdr_datasource" + target_instance = "target_instance_name" + point_in_time = "time_stamp" + } +} +``` + ## Argument Reference The following arguments are supported: @@ -457,6 +497,8 @@ The optional `settings.backup_configuration` subblock supports: * `enabled` - (Optional) True if backup configuration is enabled. +* `backup_tier` - (Computed) The backup tier that manages the backups for the instance. + * `start_time` - (Optional) `HH:MM` format time indicating when backup configuration starts. * `point_in_time_recovery_enabled` - (Optional) True if Point-in-time recovery is enabled. Will restart database if enabled after instance creation. Valid only for PostgreSQL and SQL Server instances. Enabled by default for PostgreSQL Enterprise Plus and SQL Server Enterprise Plus instances. @@ -688,7 +730,7 @@ block during resource creation/update will trigger the restore action after the * `project` - (Optional) The full project ID of the source instance.` -The optional, computed `replication_cluster` block represents a primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set only after both the primary and replica are created. This block supports: +The optional, computed `replication_cluster` block represents a primary instance and disaster recovery replica pair. Applicable to MySQL and PostgreSQL. This field can be set if the primary has psa_write_endpoint set or both the primary and replica are created. This block supports: * `psa_write_endpoint`: Read-only field which if set, indicates this instance has a private service access (PSA) DNS endpoint that is pointing to the primary instance of the cluster. If this instance is the primary, then the DNS endpoint points to this instance. After a switchover or replica failover operation, this DNS endpoint points to the promoted instance. This is a read-only field, returned to the user as information. This field can exist even if a standalone instance doesn't have a DR replica yet or the DR replica is deleted. diff --git a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown index f6baeb3a3e78..e5934134ec8c 100644 --- a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown @@ -167,9 +167,7 @@ The following arguments are supported: - - - -* `force_destroy` - (Optional, Default: false) When deleting a bucket, this - boolean option will delete all contained objects. If you try to delete a - bucket that contains objects, Terraform will fail that run. +* `force_destroy` - (Optional, Default: false) When true, before deleting a bucket, delete all objects within the bucket, or Anywhere Caches caching data for that bucket. Otherwise, buckets with objects/caches will fail. Anywhere Cache requires additional permissions to interact with and will be assumed not present when Terraform is not permissioned, attempting to delete the bucket anyways. This may result in the objects in the bucket getting destroyed but not the bucket itself if there is a cache in use with the bucket. Force deletion may take a long time to delete buckets with lots of objects or with any Anywhere Caches (80m+). * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. diff --git a/mmv1/third_party/tgc/resource_converters.go.tmpl b/mmv1/third_party/tgc/resource_converters.go.tmpl index faf1a10b6e08..917bc553727e 100644 --- a/mmv1/third_party/tgc/resource_converters.go.tmpl +++ b/mmv1/third_party/tgc/resource_converters.go.tmpl @@ -36,6 +36,7 @@ import ( "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/appengine" "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/artifactregistry" "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/beyondcorp" + "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/biglakeiceberg" "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/bigquery" "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/bigqueryanalyticshub" "github.com/GoogleCloudPlatform/terraform-google-conversion/v7/tfplan2cai/converters/google/resources/services/bigqueryconnection" @@ -220,13 +221,291 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_workbench_instance": {workbench.ResourceConverterWorkbenchInstance()}, "google_vmwareengine_private_cloud": {vmwareengine.ResourceConverterVmwareenginePrivateCloud()}, "google_vmwareengine_external_address": {vmwareengine.ResourceConverterVmwareengineExternalAddress()}, - {{- range $object := $.IamResources }} - {{- if $object.IamClassName }} - "{{ $object.TerraformName }}_iam_policy": { {{- $object.IamClassName }}IamPolicy()}, - "{{ $object.TerraformName }}_iam_binding": { {{- $object.IamClassName }}IamBinding()}, - "{{ $object.TerraformName }}_iam_member": { {{- $object.IamClassName }}IamMember()}, - {{- end }} - {{- end }} + "google_access_context_manager_access_policy_iam_policy": {accesscontextmanager.ResourceConverterAccessContextManagerAccessPolicyIamPolicy()}, + "google_access_context_manager_access_policy_iam_binding": {accesscontextmanager.ResourceConverterAccessContextManagerAccessPolicyIamBinding()}, + "google_access_context_manager_access_policy_iam_member": {accesscontextmanager.ResourceConverterAccessContextManagerAccessPolicyIamMember()}, + "google_apigee_environment_iam_policy": {apigee.ResourceConverterApigeeEnvironmentIamPolicy()}, + "google_apigee_environment_iam_binding": {apigee.ResourceConverterApigeeEnvironmentIamBinding()}, + "google_apigee_environment_iam_member": {apigee.ResourceConverterApigeeEnvironmentIamMember()}, + "google_artifact_registry_repository_iam_policy": {artifactregistry.ResourceConverterArtifactRegistryRepositoryIamPolicy()}, + "google_artifact_registry_repository_iam_binding": {artifactregistry.ResourceConverterArtifactRegistryRepositoryIamBinding()}, + "google_artifact_registry_repository_iam_member": {artifactregistry.ResourceConverterArtifactRegistryRepositoryIamMember()}, + "google_beyondcorp_security_gateway_iam_policy": {beyondcorp.ResourceConverterBeyondcorpSecurityGatewayIamPolicy()}, + "google_beyondcorp_security_gateway_iam_binding": {beyondcorp.ResourceConverterBeyondcorpSecurityGatewayIamBinding()}, + "google_beyondcorp_security_gateway_iam_member": {beyondcorp.ResourceConverterBeyondcorpSecurityGatewayIamMember()}, + "google_beyondcorp_security_gateway_application_iam_policy": {beyondcorp.ResourceConverterBeyondcorpSecurityGatewayApplicationIamPolicy()}, + "google_beyondcorp_security_gateway_application_iam_binding": {beyondcorp.ResourceConverterBeyondcorpSecurityGatewayApplicationIamBinding()}, + "google_beyondcorp_security_gateway_application_iam_member": {beyondcorp.ResourceConverterBeyondcorpSecurityGatewayApplicationIamMember()}, + "google_biglake_iceberg_catalog_iam_policy": {biglakeiceberg.ResourceConverterBiglakeIcebergIcebergCatalogIamPolicy()}, + "google_biglake_iceberg_catalog_iam_binding": {biglakeiceberg.ResourceConverterBiglakeIcebergIcebergCatalogIamBinding()}, + "google_biglake_iceberg_catalog_iam_member": {biglakeiceberg.ResourceConverterBiglakeIcebergIcebergCatalogIamMember()}, + "google_bigquery_table_iam_policy": {bigquery.ResourceConverterBigQueryTableIamPolicy()}, + "google_bigquery_table_iam_binding": {bigquery.ResourceConverterBigQueryTableIamBinding()}, + "google_bigquery_table_iam_member": {bigquery.ResourceConverterBigQueryTableIamMember()}, + "google_bigquery_analytics_hub_data_exchange_iam_policy": {bigqueryanalyticshub.ResourceConverterBigqueryAnalyticsHubDataExchangeIamPolicy()}, + "google_bigquery_analytics_hub_data_exchange_iam_binding": {bigqueryanalyticshub.ResourceConverterBigqueryAnalyticsHubDataExchangeIamBinding()}, + "google_bigquery_analytics_hub_data_exchange_iam_member": {bigqueryanalyticshub.ResourceConverterBigqueryAnalyticsHubDataExchangeIamMember()}, + "google_bigquery_analytics_hub_listing_iam_policy": {bigqueryanalyticshub.ResourceConverterBigqueryAnalyticsHubListingIamPolicy()}, + "google_bigquery_analytics_hub_listing_iam_binding": {bigqueryanalyticshub.ResourceConverterBigqueryAnalyticsHubListingIamBinding()}, + "google_bigquery_analytics_hub_listing_iam_member": {bigqueryanalyticshub.ResourceConverterBigqueryAnalyticsHubListingIamMember()}, + "google_bigquery_connection_iam_policy": {bigqueryconnection.ResourceConverterBigqueryConnectionConnectionIamPolicy()}, + "google_bigquery_connection_iam_binding": {bigqueryconnection.ResourceConverterBigqueryConnectionConnectionIamBinding()}, + "google_bigquery_connection_iam_member": {bigqueryconnection.ResourceConverterBigqueryConnectionConnectionIamMember()}, + "google_bigquery_datapolicy_data_policy_iam_policy": {bigquerydatapolicy.ResourceConverterBigqueryDatapolicyDataPolicyIamPolicy()}, + "google_bigquery_datapolicy_data_policy_iam_binding": {bigquerydatapolicy.ResourceConverterBigqueryDatapolicyDataPolicyIamBinding()}, + "google_bigquery_datapolicy_data_policy_iam_member": {bigquerydatapolicy.ResourceConverterBigqueryDatapolicyDataPolicyIamMember()}, + "google_bigquery_datapolicyv2_data_policy_iam_policy": {bigquerydatapolicyv2.ResourceConverterBigqueryDatapolicyv2DataPolicyIamPolicy()}, + "google_bigquery_datapolicyv2_data_policy_iam_binding": {bigquerydatapolicyv2.ResourceConverterBigqueryDatapolicyv2DataPolicyIamBinding()}, + "google_bigquery_datapolicyv2_data_policy_iam_member": {bigquerydatapolicyv2.ResourceConverterBigqueryDatapolicyv2DataPolicyIamMember()}, + "google_binary_authorization_attestor_iam_policy": {binaryauthorization.ResourceConverterBinaryAuthorizationAttestorIamPolicy()}, + "google_binary_authorization_attestor_iam_binding": {binaryauthorization.ResourceConverterBinaryAuthorizationAttestorIamBinding()}, + "google_binary_authorization_attestor_iam_member": {binaryauthorization.ResourceConverterBinaryAuthorizationAttestorIamMember()}, + "google_clouddeploy_custom_target_type_iam_policy": {clouddeploy.ResourceConverterClouddeployCustomTargetTypeIamPolicy()}, + "google_clouddeploy_custom_target_type_iam_binding": {clouddeploy.ResourceConverterClouddeployCustomTargetTypeIamBinding()}, + "google_clouddeploy_custom_target_type_iam_member": {clouddeploy.ResourceConverterClouddeployCustomTargetTypeIamMember()}, + "google_clouddeploy_delivery_pipeline_iam_policy": {clouddeploy.ResourceConverterClouddeployDeliveryPipelineIamPolicy()}, + "google_clouddeploy_delivery_pipeline_iam_binding": {clouddeploy.ResourceConverterClouddeployDeliveryPipelineIamBinding()}, + "google_clouddeploy_delivery_pipeline_iam_member": {clouddeploy.ResourceConverterClouddeployDeliveryPipelineIamMember()}, + "google_clouddeploy_target_iam_policy": {clouddeploy.ResourceConverterClouddeployTargetIamPolicy()}, + "google_clouddeploy_target_iam_binding": {clouddeploy.ResourceConverterClouddeployTargetIamBinding()}, + "google_clouddeploy_target_iam_member": {clouddeploy.ResourceConverterClouddeployTargetIamMember()}, + "google_cloudfunctions_function_iam_policy": {cloudfunctions.ResourceConverterCloudFunctionsCloudFunctionIamPolicy()}, + "google_cloudfunctions_function_iam_binding": {cloudfunctions.ResourceConverterCloudFunctionsCloudFunctionIamBinding()}, + "google_cloudfunctions_function_iam_member": {cloudfunctions.ResourceConverterCloudFunctionsCloudFunctionIamMember()}, + "google_cloudfunctions2_function_iam_policy": {cloudfunctions2.ResourceConverterCloudfunctions2functionIamPolicy()}, + "google_cloudfunctions2_function_iam_binding": {cloudfunctions2.ResourceConverterCloudfunctions2functionIamBinding()}, + "google_cloudfunctions2_function_iam_member": {cloudfunctions2.ResourceConverterCloudfunctions2functionIamMember()}, + "google_cloud_run_service_iam_policy": {cloudrun.ResourceConverterCloudRunServiceIamPolicy()}, + "google_cloud_run_service_iam_binding": {cloudrun.ResourceConverterCloudRunServiceIamBinding()}, + "google_cloud_run_service_iam_member": {cloudrun.ResourceConverterCloudRunServiceIamMember()}, + "google_cloud_run_v2_job_iam_policy": {cloudrunv2.ResourceConverterCloudRunV2JobIamPolicy()}, + "google_cloud_run_v2_job_iam_binding": {cloudrunv2.ResourceConverterCloudRunV2JobIamBinding()}, + "google_cloud_run_v2_job_iam_member": {cloudrunv2.ResourceConverterCloudRunV2JobIamMember()}, + "google_cloud_run_v2_service_iam_policy": {cloudrunv2.ResourceConverterCloudRunV2ServiceIamPolicy()}, + "google_cloud_run_v2_service_iam_binding": {cloudrunv2.ResourceConverterCloudRunV2ServiceIamBinding()}, + "google_cloud_run_v2_service_iam_member": {cloudrunv2.ResourceConverterCloudRunV2ServiceIamMember()}, + "google_cloud_run_v2_worker_pool_iam_policy": {cloudrunv2.ResourceConverterCloudRunV2WorkerPoolIamPolicy()}, + "google_cloud_run_v2_worker_pool_iam_binding": {cloudrunv2.ResourceConverterCloudRunV2WorkerPoolIamBinding()}, + "google_cloud_run_v2_worker_pool_iam_member": {cloudrunv2.ResourceConverterCloudRunV2WorkerPoolIamMember()}, + "google_cloud_tasks_queue_iam_policy": {cloudtasks.ResourceConverterCloudTasksQueueIamPolicy()}, + "google_cloud_tasks_queue_iam_binding": {cloudtasks.ResourceConverterCloudTasksQueueIamBinding()}, + "google_cloud_tasks_queue_iam_member": {cloudtasks.ResourceConverterCloudTasksQueueIamMember()}, + "google_colab_runtime_template_iam_policy": {colab.ResourceConverterColabRuntimeTemplateIamPolicy()}, + "google_colab_runtime_template_iam_binding": {colab.ResourceConverterColabRuntimeTemplateIamBinding()}, + "google_colab_runtime_template_iam_member": {colab.ResourceConverterColabRuntimeTemplateIamMember()}, + "google_compute_backend_bucket_iam_policy": {compute.ResourceConverterComputeBackendBucketIamPolicy()}, + "google_compute_backend_bucket_iam_binding": {compute.ResourceConverterComputeBackendBucketIamBinding()}, + "google_compute_backend_bucket_iam_member": {compute.ResourceConverterComputeBackendBucketIamMember()}, + "google_compute_backend_service_iam_policy": {compute.ResourceConverterComputeBackendServiceIamPolicy()}, + "google_compute_backend_service_iam_binding": {compute.ResourceConverterComputeBackendServiceIamBinding()}, + "google_compute_backend_service_iam_member": {compute.ResourceConverterComputeBackendServiceIamMember()}, + "google_compute_disk_iam_policy": {compute.ResourceConverterComputeDiskIamPolicy()}, + "google_compute_disk_iam_binding": {compute.ResourceConverterComputeDiskIamBinding()}, + "google_compute_disk_iam_member": {compute.ResourceConverterComputeDiskIamMember()}, + "google_compute_image_iam_policy": {compute.ResourceConverterComputeImageIamPolicy()}, + "google_compute_image_iam_binding": {compute.ResourceConverterComputeImageIamBinding()}, + "google_compute_image_iam_member": {compute.ResourceConverterComputeImageIamMember()}, + "google_compute_instance_iam_policy": {compute.ResourceConverterComputeInstanceIamPolicy()}, + "google_compute_instance_iam_binding": {compute.ResourceConverterComputeInstanceIamBinding()}, + "google_compute_instance_iam_member": {compute.ResourceConverterComputeInstanceIamMember()}, + "google_compute_instance_template_iam_policy": {compute.ResourceConverterComputeInstanceTemplateIamPolicy()}, + "google_compute_instance_template_iam_binding": {compute.ResourceConverterComputeInstanceTemplateIamBinding()}, + "google_compute_instance_template_iam_member": {compute.ResourceConverterComputeInstanceTemplateIamMember()}, + "google_compute_instant_snapshot_iam_policy": {compute.ResourceConverterComputeInstantSnapshotIamPolicy()}, + "google_compute_instant_snapshot_iam_binding": {compute.ResourceConverterComputeInstantSnapshotIamBinding()}, + "google_compute_instant_snapshot_iam_member": {compute.ResourceConverterComputeInstantSnapshotIamMember()}, + "google_compute_region_backend_service_iam_policy": {compute.ResourceConverterComputeRegionBackendServiceIamPolicy()}, + "google_compute_region_backend_service_iam_binding": {compute.ResourceConverterComputeRegionBackendServiceIamBinding()}, + "google_compute_region_backend_service_iam_member": {compute.ResourceConverterComputeRegionBackendServiceIamMember()}, + "google_compute_region_disk_iam_policy": {compute.ResourceConverterComputeRegionDiskIamPolicy()}, + "google_compute_region_disk_iam_binding": {compute.ResourceConverterComputeRegionDiskIamBinding()}, + "google_compute_region_disk_iam_member": {compute.ResourceConverterComputeRegionDiskIamMember()}, + "google_compute_snapshot_iam_policy": {compute.ResourceConverterComputeSnapshotIamPolicy()}, + "google_compute_snapshot_iam_binding": {compute.ResourceConverterComputeSnapshotIamBinding()}, + "google_compute_snapshot_iam_member": {compute.ResourceConverterComputeSnapshotIamMember()}, + "google_compute_storage_pool_iam_policy": {compute.ResourceConverterComputeStoragePoolIamPolicy()}, + "google_compute_storage_pool_iam_binding": {compute.ResourceConverterComputeStoragePoolIamBinding()}, + "google_compute_storage_pool_iam_member": {compute.ResourceConverterComputeStoragePoolIamMember()}, + "google_compute_subnetwork_iam_policy": {compute.ResourceConverterComputeSubnetworkIamPolicy()}, + "google_compute_subnetwork_iam_binding": {compute.ResourceConverterComputeSubnetworkIamBinding()}, + "google_compute_subnetwork_iam_member": {compute.ResourceConverterComputeSubnetworkIamMember()}, + "google_container_analysis_note_iam_policy": {containeranalysis.ResourceConverterContainerAnalysisNoteIamPolicy()}, + "google_container_analysis_note_iam_binding": {containeranalysis.ResourceConverterContainerAnalysisNoteIamBinding()}, + "google_container_analysis_note_iam_member": {containeranalysis.ResourceConverterContainerAnalysisNoteIamMember()}, + "google_data_catalog_entry_group_iam_policy": {datacatalog.ResourceConverterDataCatalogEntryGroupIamPolicy()}, + "google_data_catalog_entry_group_iam_binding": {datacatalog.ResourceConverterDataCatalogEntryGroupIamBinding()}, + "google_data_catalog_entry_group_iam_member": {datacatalog.ResourceConverterDataCatalogEntryGroupIamMember()}, + "google_data_catalog_policy_tag_iam_policy": {datacatalog.ResourceConverterDataCatalogPolicyTagIamPolicy()}, + "google_data_catalog_policy_tag_iam_binding": {datacatalog.ResourceConverterDataCatalogPolicyTagIamBinding()}, + "google_data_catalog_policy_tag_iam_member": {datacatalog.ResourceConverterDataCatalogPolicyTagIamMember()}, + "google_data_catalog_tag_template_iam_policy": {datacatalog.ResourceConverterDataCatalogTagTemplateIamPolicy()}, + "google_data_catalog_tag_template_iam_binding": {datacatalog.ResourceConverterDataCatalogTagTemplateIamBinding()}, + "google_data_catalog_tag_template_iam_member": {datacatalog.ResourceConverterDataCatalogTagTemplateIamMember()}, + "google_data_catalog_taxonomy_iam_policy": {datacatalog.ResourceConverterDataCatalogTaxonomyIamPolicy()}, + "google_data_catalog_taxonomy_iam_binding": {datacatalog.ResourceConverterDataCatalogTaxonomyIamBinding()}, + "google_data_catalog_taxonomy_iam_member": {datacatalog.ResourceConverterDataCatalogTaxonomyIamMember()}, + "google_data_fusion_instance_iam_policy": {datafusion.ResourceConverterDataFusionInstanceIamPolicy()}, + "google_data_fusion_instance_iam_binding": {datafusion.ResourceConverterDataFusionInstanceIamBinding()}, + "google_data_fusion_instance_iam_member": {datafusion.ResourceConverterDataFusionInstanceIamMember()}, + "google_dataplex_aspect_type_iam_policy": {dataplex.ResourceConverterDataplexAspectTypeIamPolicy()}, + "google_dataplex_aspect_type_iam_binding": {dataplex.ResourceConverterDataplexAspectTypeIamBinding()}, + "google_dataplex_aspect_type_iam_member": {dataplex.ResourceConverterDataplexAspectTypeIamMember()}, + "google_dataplex_asset_iam_policy": {dataplex.ResourceConverterDataplexAssetIamPolicy()}, + "google_dataplex_asset_iam_binding": {dataplex.ResourceConverterDataplexAssetIamBinding()}, + "google_dataplex_asset_iam_member": {dataplex.ResourceConverterDataplexAssetIamMember()}, + "google_dataplex_datascan_iam_policy": {dataplex.ResourceConverterDataplexDatascanIamPolicy()}, + "google_dataplex_datascan_iam_binding": {dataplex.ResourceConverterDataplexDatascanIamBinding()}, + "google_dataplex_datascan_iam_member": {dataplex.ResourceConverterDataplexDatascanIamMember()}, + "google_dataplex_entry_group_iam_policy": {dataplex.ResourceConverterDataplexEntryGroupIamPolicy()}, + "google_dataplex_entry_group_iam_binding": {dataplex.ResourceConverterDataplexEntryGroupIamBinding()}, + "google_dataplex_entry_group_iam_member": {dataplex.ResourceConverterDataplexEntryGroupIamMember()}, + "google_dataplex_entry_type_iam_policy": {dataplex.ResourceConverterDataplexEntryTypeIamPolicy()}, + "google_dataplex_entry_type_iam_binding": {dataplex.ResourceConverterDataplexEntryTypeIamBinding()}, + "google_dataplex_entry_type_iam_member": {dataplex.ResourceConverterDataplexEntryTypeIamMember()}, + "google_dataplex_glossary_iam_policy": {dataplex.ResourceConverterDataplexGlossaryIamPolicy()}, + "google_dataplex_glossary_iam_binding": {dataplex.ResourceConverterDataplexGlossaryIamBinding()}, + "google_dataplex_glossary_iam_member": {dataplex.ResourceConverterDataplexGlossaryIamMember()}, + "google_dataplex_lake_iam_policy": {dataplex.ResourceConverterDataplexLakeIamPolicy()}, + "google_dataplex_lake_iam_binding": {dataplex.ResourceConverterDataplexLakeIamBinding()}, + "google_dataplex_lake_iam_member": {dataplex.ResourceConverterDataplexLakeIamMember()}, + "google_dataplex_task_iam_policy": {dataplex.ResourceConverterDataplexTaskIamPolicy()}, + "google_dataplex_task_iam_binding": {dataplex.ResourceConverterDataplexTaskIamBinding()}, + "google_dataplex_task_iam_member": {dataplex.ResourceConverterDataplexTaskIamMember()}, + "google_dataplex_zone_iam_policy": {dataplex.ResourceConverterDataplexZoneIamPolicy()}, + "google_dataplex_zone_iam_binding": {dataplex.ResourceConverterDataplexZoneIamBinding()}, + "google_dataplex_zone_iam_member": {dataplex.ResourceConverterDataplexZoneIamMember()}, + "google_dataproc_autoscaling_policy_iam_policy": {dataproc.ResourceConverterDataprocAutoscalingPolicyIamPolicy()}, + "google_dataproc_autoscaling_policy_iam_binding": {dataproc.ResourceConverterDataprocAutoscalingPolicyIamBinding()}, + "google_dataproc_autoscaling_policy_iam_member": {dataproc.ResourceConverterDataprocAutoscalingPolicyIamMember()}, + "google_dataproc_metastore_database_iam_policy": {dataprocmetastore.ResourceConverterDataprocMetastoreDatabaseIamPolicy()}, + "google_dataproc_metastore_database_iam_binding": {dataprocmetastore.ResourceConverterDataprocMetastoreDatabaseIamBinding()}, + "google_dataproc_metastore_database_iam_member": {dataprocmetastore.ResourceConverterDataprocMetastoreDatabaseIamMember()}, + "google_dataproc_metastore_federation_iam_policy": {dataprocmetastore.ResourceConverterDataprocMetastoreFederationIamPolicy()}, + "google_dataproc_metastore_federation_iam_binding": {dataprocmetastore.ResourceConverterDataprocMetastoreFederationIamBinding()}, + "google_dataproc_metastore_federation_iam_member": {dataprocmetastore.ResourceConverterDataprocMetastoreFederationIamMember()}, + "google_dataproc_metastore_service_iam_policy": {dataprocmetastore.ResourceConverterDataprocMetastoreServiceIamPolicy()}, + "google_dataproc_metastore_service_iam_binding": {dataprocmetastore.ResourceConverterDataprocMetastoreServiceIamBinding()}, + "google_dataproc_metastore_service_iam_member": {dataprocmetastore.ResourceConverterDataprocMetastoreServiceIamMember()}, + "google_dataproc_metastore_table_iam_policy": {dataprocmetastore.ResourceConverterDataprocMetastoreTableIamPolicy()}, + "google_dataproc_metastore_table_iam_binding": {dataprocmetastore.ResourceConverterDataprocMetastoreTableIamBinding()}, + "google_dataproc_metastore_table_iam_member": {dataprocmetastore.ResourceConverterDataprocMetastoreTableIamMember()}, + "google_dns_managed_zone_iam_policy": {dns.ResourceConverterDNSManagedZoneIamPolicy()}, + "google_dns_managed_zone_iam_binding": {dns.ResourceConverterDNSManagedZoneIamBinding()}, + "google_dns_managed_zone_iam_member": {dns.ResourceConverterDNSManagedZoneIamMember()}, + "google_gemini_repository_group_iam_policy": {gemini.ResourceConverterGeminiRepositoryGroupIamPolicy()}, + "google_gemini_repository_group_iam_binding": {gemini.ResourceConverterGeminiRepositoryGroupIamBinding()}, + "google_gemini_repository_group_iam_member": {gemini.ResourceConverterGeminiRepositoryGroupIamMember()}, + "google_gke_backup_backup_plan_iam_policy": {gkebackup.ResourceConverterGKEBackupBackupPlanIamPolicy()}, + "google_gke_backup_backup_plan_iam_binding": {gkebackup.ResourceConverterGKEBackupBackupPlanIamBinding()}, + "google_gke_backup_backup_plan_iam_member": {gkebackup.ResourceConverterGKEBackupBackupPlanIamMember()}, + "google_gke_backup_restore_plan_iam_policy": {gkebackup.ResourceConverterGKEBackupRestorePlanIamPolicy()}, + "google_gke_backup_restore_plan_iam_binding": {gkebackup.ResourceConverterGKEBackupRestorePlanIamBinding()}, + "google_gke_backup_restore_plan_iam_member": {gkebackup.ResourceConverterGKEBackupRestorePlanIamMember()}, + "google_gke_hub_membership_iam_policy": {gkehub.ResourceConverterGKEHubMembershipIamPolicy()}, + "google_gke_hub_membership_iam_binding": {gkehub.ResourceConverterGKEHubMembershipIamBinding()}, + "google_gke_hub_membership_iam_member": {gkehub.ResourceConverterGKEHubMembershipIamMember()}, + "google_gke_hub_feature_iam_policy": {gkehub2.ResourceConverterGKEHub2FeatureIamPolicy()}, + "google_gke_hub_feature_iam_binding": {gkehub2.ResourceConverterGKEHub2FeatureIamBinding()}, + "google_gke_hub_feature_iam_member": {gkehub2.ResourceConverterGKEHub2FeatureIamMember()}, + "google_gke_hub_scope_iam_policy": {gkehub2.ResourceConverterGKEHub2ScopeIamPolicy()}, + "google_gke_hub_scope_iam_binding": {gkehub2.ResourceConverterGKEHub2ScopeIamBinding()}, + "google_gke_hub_scope_iam_member": {gkehub2.ResourceConverterGKEHub2ScopeIamMember()}, + "google_healthcare_consent_store_iam_policy": {healthcare.ResourceConverterHealthcareConsentStoreIamPolicy()}, + "google_healthcare_consent_store_iam_binding": {healthcare.ResourceConverterHealthcareConsentStoreIamBinding()}, + "google_healthcare_consent_store_iam_member": {healthcare.ResourceConverterHealthcareConsentStoreIamMember()}, + "google_iam_workload_identity_pool_iam_policy": {iambeta.ResourceConverterIAMBetaWorkloadIdentityPoolIamPolicy()}, + "google_iam_workload_identity_pool_iam_binding": {iambeta.ResourceConverterIAMBetaWorkloadIdentityPoolIamBinding()}, + "google_iam_workload_identity_pool_iam_member": {iambeta.ResourceConverterIAMBetaWorkloadIdentityPoolIamMember()}, + "google_iam_workforce_pool_iam_policy": {iamworkforcepool.ResourceConverterIAMWorkforcePoolWorkforcePoolIamPolicy()}, + "google_iam_workforce_pool_iam_binding": {iamworkforcepool.ResourceConverterIAMWorkforcePoolWorkforcePoolIamBinding()}, + "google_iam_workforce_pool_iam_member": {iamworkforcepool.ResourceConverterIAMWorkforcePoolWorkforcePoolIamMember()}, + "google_iap_tunnel_iam_policy": {iap.ResourceConverterIapTunnelIamPolicy()}, + "google_iap_tunnel_iam_binding": {iap.ResourceConverterIapTunnelIamBinding()}, + "google_iap_tunnel_iam_member": {iap.ResourceConverterIapTunnelIamMember()}, + "google_iap_tunnel_dest_group_iam_policy": {iap.ResourceConverterIapTunnelDestGroupIamPolicy()}, + "google_iap_tunnel_dest_group_iam_binding": {iap.ResourceConverterIapTunnelDestGroupIamBinding()}, + "google_iap_tunnel_dest_group_iam_member": {iap.ResourceConverterIapTunnelDestGroupIamMember()}, + "google_iap_tunnel_instance_iam_policy": {iap.ResourceConverterIapTunnelInstanceIamPolicy()}, + "google_iap_tunnel_instance_iam_binding": {iap.ResourceConverterIapTunnelInstanceIamBinding()}, + "google_iap_tunnel_instance_iam_member": {iap.ResourceConverterIapTunnelInstanceIamMember()}, + "google_iap_web_iam_policy": {iap.ResourceConverterIapWebIamPolicy()}, + "google_iap_web_iam_binding": {iap.ResourceConverterIapWebIamBinding()}, + "google_iap_web_iam_member": {iap.ResourceConverterIapWebIamMember()}, + "google_kms_ekm_connection_iam_policy": {kms.ResourceConverterKMSEkmConnectionIamPolicy()}, + "google_kms_ekm_connection_iam_binding": {kms.ResourceConverterKMSEkmConnectionIamBinding()}, + "google_kms_ekm_connection_iam_member": {kms.ResourceConverterKMSEkmConnectionIamMember()}, + "google_logging_log_view_iam_policy": {logging.ResourceConverterLoggingLogViewIamPolicy()}, + "google_logging_log_view_iam_binding": {logging.ResourceConverterLoggingLogViewIamBinding()}, + "google_logging_log_view_iam_member": {logging.ResourceConverterLoggingLogViewIamMember()}, + "google_notebooks_instance_iam_policy": {notebooks.ResourceConverterNotebooksInstanceIamPolicy()}, + "google_notebooks_instance_iam_binding": {notebooks.ResourceConverterNotebooksInstanceIamBinding()}, + "google_notebooks_instance_iam_member": {notebooks.ResourceConverterNotebooksInstanceIamMember()}, + "google_notebooks_runtime_iam_policy": {notebooks.ResourceConverterNotebooksRuntimeIamPolicy()}, + "google_notebooks_runtime_iam_binding": {notebooks.ResourceConverterNotebooksRuntimeIamBinding()}, + "google_notebooks_runtime_iam_member": {notebooks.ResourceConverterNotebooksRuntimeIamMember()}, + "google_privateca_ca_pool_iam_policy": {privateca.ResourceConverterPrivatecaCaPoolIamPolicy()}, + "google_privateca_ca_pool_iam_binding": {privateca.ResourceConverterPrivatecaCaPoolIamBinding()}, + "google_privateca_ca_pool_iam_member": {privateca.ResourceConverterPrivatecaCaPoolIamMember()}, + "google_privateca_certificate_template_iam_policy": {privateca.ResourceConverterPrivatecaCertificateTemplateIamPolicy()}, + "google_privateca_certificate_template_iam_binding": {privateca.ResourceConverterPrivatecaCertificateTemplateIamBinding()}, + "google_privateca_certificate_template_iam_member": {privateca.ResourceConverterPrivatecaCertificateTemplateIamMember()}, + "google_pubsub_schema_iam_policy": {pubsub.ResourceConverterPubsubSchemaIamPolicy()}, + "google_pubsub_schema_iam_binding": {pubsub.ResourceConverterPubsubSchemaIamBinding()}, + "google_pubsub_schema_iam_member": {pubsub.ResourceConverterPubsubSchemaIamMember()}, + "google_pubsub_topic_iam_policy": {pubsub.ResourceConverterPubsubTopicIamPolicy()}, + "google_pubsub_topic_iam_binding": {pubsub.ResourceConverterPubsubTopicIamBinding()}, + "google_pubsub_topic_iam_member": {pubsub.ResourceConverterPubsubTopicIamMember()}, + "google_secret_manager_secret_iam_policy": {secretmanager.ResourceConverterSecretManagerSecretIamPolicy()}, + "google_secret_manager_secret_iam_binding": {secretmanager.ResourceConverterSecretManagerSecretIamBinding()}, + "google_secret_manager_secret_iam_member": {secretmanager.ResourceConverterSecretManagerSecretIamMember()}, + "google_secret_manager_regional_secret_iam_policy": {secretmanagerregional.ResourceConverterSecretManagerRegionalRegionalSecretIamPolicy()}, + "google_secret_manager_regional_secret_iam_binding": {secretmanagerregional.ResourceConverterSecretManagerRegionalRegionalSecretIamBinding()}, + "google_secret_manager_regional_secret_iam_member": {secretmanagerregional.ResourceConverterSecretManagerRegionalRegionalSecretIamMember()}, + "google_secure_source_manager_instance_iam_policy": {securesourcemanager.ResourceConverterSecureSourceManagerInstanceIamPolicy()}, + "google_secure_source_manager_instance_iam_binding": {securesourcemanager.ResourceConverterSecureSourceManagerInstanceIamBinding()}, + "google_secure_source_manager_instance_iam_member": {securesourcemanager.ResourceConverterSecureSourceManagerInstanceIamMember()}, + "google_secure_source_manager_repository_iam_policy": {securesourcemanager.ResourceConverterSecureSourceManagerRepositoryIamPolicy()}, + "google_secure_source_manager_repository_iam_binding": {securesourcemanager.ResourceConverterSecureSourceManagerRepositoryIamBinding()}, + "google_secure_source_manager_repository_iam_member": {securesourcemanager.ResourceConverterSecureSourceManagerRepositoryIamMember()}, + "google_scc_source_iam_policy": {securitycenter.ResourceConverterSecurityCenterSourceIamPolicy()}, + "google_scc_source_iam_binding": {securitycenter.ResourceConverterSecurityCenterSourceIamBinding()}, + "google_scc_source_iam_member": {securitycenter.ResourceConverterSecurityCenterSourceIamMember()}, + "google_scc_v2_organization_source_iam_policy": {securitycenterv2.ResourceConverterSecurityCenterV2OrganizationSourceIamPolicy()}, + "google_scc_v2_organization_source_iam_binding": {securitycenterv2.ResourceConverterSecurityCenterV2OrganizationSourceIamBinding()}, + "google_scc_v2_organization_source_iam_member": {securitycenterv2.ResourceConverterSecurityCenterV2OrganizationSourceIamMember()}, + "google_service_directory_namespace_iam_policy": {servicedirectory.ResourceConverterServiceDirectoryNamespaceIamPolicy()}, + "google_service_directory_namespace_iam_binding": {servicedirectory.ResourceConverterServiceDirectoryNamespaceIamBinding()}, + "google_service_directory_namespace_iam_member": {servicedirectory.ResourceConverterServiceDirectoryNamespaceIamMember()}, + "google_service_directory_service_iam_policy": {servicedirectory.ResourceConverterServiceDirectoryServiceIamPolicy()}, + "google_service_directory_service_iam_binding": {servicedirectory.ResourceConverterServiceDirectoryServiceIamBinding()}, + "google_service_directory_service_iam_member": {servicedirectory.ResourceConverterServiceDirectoryServiceIamMember()}, + "google_endpoints_service_iam_policy": {servicemanagement.ResourceConverterServiceManagementServiceIamPolicy()}, + "google_endpoints_service_iam_binding": {servicemanagement.ResourceConverterServiceManagementServiceIamBinding()}, + "google_endpoints_service_iam_member": {servicemanagement.ResourceConverterServiceManagementServiceIamMember()}, + "google_endpoints_service_consumers_iam_policy": {servicemanagement.ResourceConverterServiceManagementServiceConsumersIamPolicy()}, + "google_endpoints_service_consumers_iam_binding": {servicemanagement.ResourceConverterServiceManagementServiceConsumersIamBinding()}, + "google_endpoints_service_consumers_iam_member": {servicemanagement.ResourceConverterServiceManagementServiceConsumersIamMember()}, + "google_vertex_ai_endpoint_iam_policy": {vertexai.ResourceConverterVertexAIEndpointIamPolicy()}, + "google_vertex_ai_endpoint_iam_binding": {vertexai.ResourceConverterVertexAIEndpointIamBinding()}, + "google_vertex_ai_endpoint_iam_member": {vertexai.ResourceConverterVertexAIEndpointIamMember()}, + "google_vertex_ai_feature_group_iam_policy": {vertexai.ResourceConverterVertexAIFeatureGroupIamPolicy()}, + "google_vertex_ai_feature_group_iam_binding": {vertexai.ResourceConverterVertexAIFeatureGroupIamBinding()}, + "google_vertex_ai_feature_group_iam_member": {vertexai.ResourceConverterVertexAIFeatureGroupIamMember()}, + "google_vertex_ai_feature_online_store_iam_policy": {vertexai.ResourceConverterVertexAIFeatureOnlineStoreIamPolicy()}, + "google_vertex_ai_feature_online_store_iam_binding": {vertexai.ResourceConverterVertexAIFeatureOnlineStoreIamBinding()}, + "google_vertex_ai_feature_online_store_iam_member": {vertexai.ResourceConverterVertexAIFeatureOnlineStoreIamMember()}, + "google_vertex_ai_feature_online_store_featureview_iam_policy": {vertexai.ResourceConverterVertexAIFeatureOnlineStoreFeatureviewIamPolicy()}, + "google_vertex_ai_feature_online_store_featureview_iam_binding": {vertexai.ResourceConverterVertexAIFeatureOnlineStoreFeatureviewIamBinding()}, + "google_vertex_ai_feature_online_store_featureview_iam_member": {vertexai.ResourceConverterVertexAIFeatureOnlineStoreFeatureviewIamMember()}, + "google_vertex_ai_featurestore_iam_policy": {vertexai.ResourceConverterVertexAIFeaturestoreIamPolicy()}, + "google_vertex_ai_featurestore_iam_binding": {vertexai.ResourceConverterVertexAIFeaturestoreIamBinding()}, + "google_vertex_ai_featurestore_iam_member": {vertexai.ResourceConverterVertexAIFeaturestoreIamMember()}, + "google_vertex_ai_featurestore_entitytype_iam_policy": {vertexai.ResourceConverterVertexAIFeaturestoreEntitytypeIamPolicy()}, + "google_vertex_ai_featurestore_entitytype_iam_binding": {vertexai.ResourceConverterVertexAIFeaturestoreEntitytypeIamBinding()}, + "google_vertex_ai_featurestore_entitytype_iam_member": {vertexai.ResourceConverterVertexAIFeaturestoreEntitytypeIamMember()}, + "google_workbench_instance_iam_policy": {workbench.ResourceConverterWorkbenchInstanceIamPolicy()}, + "google_workbench_instance_iam_binding": {workbench.ResourceConverterWorkbenchInstanceIamBinding()}, + "google_workbench_instance_iam_member": {workbench.ResourceConverterWorkbenchInstanceIamMember()}, "google_project": { resourcemanager.ResourceConverterProject(), resourcemanager.ResourceConverterProjectBillingInfo(), diff --git a/mmv1/third_party/tgc_next/Makefile b/mmv1/third_party/tgc_next/Makefile index b6dc9e2fe54b..69895ebff7ca 100644 --- a/mmv1/third_party/tgc_next/Makefile +++ b/mmv1/third_party/tgc_next/Makefile @@ -18,7 +18,7 @@ test-integration: go version terraform --version ./config-tf-dev-override.sh - TF_CLI_CONFIG_FILE="$${PWD}/${TF_CONFIG_FILE}" GO111MODULE=on go test -run=TestAcc $(TESTPATH) $(TESTARGS) -timeout 30m -v ./... + TF_CLI_CONFIG_FILE="$${PWD}/${TF_CONFIG_FILE}" GO111MODULE=on go test -run=TestAcc $(TESTPATH) $(TESTARGS) -p 8 -parallel 8 -timeout 60m -v ./... mod-clean: git restore go.mod diff --git a/mmv1/third_party/tgc_next/test/setup.go b/mmv1/third_party/tgc_next/test/setup.go index 6adbf6d9578a..f08ad623f927 100644 --- a/mmv1/third_party/tgc_next/test/setup.go +++ b/mmv1/third_party/tgc_next/test/setup.go @@ -39,6 +39,7 @@ type TgcMetadataPayload struct { RawConfig string `json:"raw_config"` ResourceMetadata map[string]*ResourceMetadata `json:"resource_metadata"` PrimaryResource string `json:"primary_resource"` + CaiReadTime time.Time `json:"cai_read_time"` } type ResourceTestData struct { @@ -60,7 +61,7 @@ type Resource struct { const ( ymdFormat = "2006-01-02" - maxAttempts = 3 + maxAttempts = 5 ) var ( diff --git a/tools/issue-labeler/labeler/enrolled_teams.yml b/tools/issue-labeler/labeler/enrolled_teams.yml index 371b8da515df..dab2c75a4e8c 100755 --- a/tools/issue-labeler/labeler/enrolled_teams.yml +++ b/tools/issue-labeler/labeler/enrolled_teams.yml @@ -323,6 +323,10 @@ service/container: service/containeranalysis: resources: - google_container_analysis_.* +service/data-transfer-essentials: + resources: + - google_network_connectivity_multicloud_data_transfer_config + - google_network_connectivity_destination service/datacatalog: resources: - google_data_catalog_.* @@ -374,6 +378,7 @@ service/dialogflow-cx: - google_dialogflow_entity_type - google_dialogflow_fulfillment - google_dialogflow_intent + - google_dialogflow_version service/discoveryengine: resources: - google_discovery_engine_.* @@ -418,6 +423,10 @@ service/firebase: - google_firebase_storage.* - google_firebase_web.* - google_firebaserules_.* +service/firebaseailogic: + team: firebase-terraform + resources: + - google_firebase_ai_logic.* service/firebaseapphosting: resources: - google_firebase_app_hosting.* @@ -559,6 +568,9 @@ service/network-connectivity-center: resources: - google_network_connectivity_hub - google_network_connectivity_spoke + - google_network_connectivity_gateway_advertised_route + - google_network_security_sac_realm + - google_network_security_sac_attachment service/network-security: resources: - google_network_security_authorization_policy