diff --git a/colab_runtime_template_full/main.tf b/colab_runtime_template_full/main.tf index f3309004..528460db 100644 --- a/colab_runtime_template_full/main.tf +++ b/colab_runtime_template_full/main.tf @@ -53,4 +53,17 @@ resource "google_colab_runtime_template" "runtime-template" { encryption_spec { kms_key_name = "my-crypto-key-${local.name_suffix}" } + + software_config { + env { + name = "TEST" + value = 1 + } + + post_startup_script_config { + post_startup_script = "echo 'hello world'" + post_startup_script_url = "gs://colab-enterprise-pss-secure/secure_pss.sh" + post_startup_script_behavior = "RUN_ONCE" + } + } } diff --git a/compute_interconnect_attachment_custom_ranges/backing_file.tf b/compute_interconnect_attachment_custom_ranges/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/compute_interconnect_attachment_custom_ranges/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/compute_interconnect_attachment_custom_ranges/main.tf b/compute_interconnect_attachment_custom_ranges/main.tf new file mode 100644 index 00000000..70fc11ad --- /dev/null +++ b/compute_interconnect_attachment_custom_ranges/main.tf @@ -0,0 +1,26 @@ +resource "google_compute_interconnect_attachment" "custom-ranges-interconnect-attachment" { + name = "test-custom-ranges-interconnect-attachment-${local.name_suffix}" + edge_availability_domain = "AVAILABILITY_DOMAIN_1" + type = "PARTNER" + router = google_compute_router.foobar.id + mtu = 1500 + stack_type = "IPV4_IPV6" + labels = { mykey = "myvalue" } + candidate_cloud_router_ip_address = "192.169.0.1/29" + candidate_customer_router_ip_address = "192.169.0.2/29" + candidate_cloud_router_ipv6_address = "748d:2f23:6651:9455:828b:ca81:6fe0:fed1/125" + candidate_customer_router_ipv6_address = "748d:2f23:6651:9455:828b:ca81:6fe0:fed2/125" +} + +resource "google_compute_router" "foobar" { + name = "test-router-${local.name_suffix}" + network = google_compute_network.foobar.name + bgp { + asn = 16550 + } +} + +resource "google_compute_network" "foobar" { + name = "test-network-${local.name_suffix}" + auto_create_subnetworks = false +} diff --git a/compute_interconnect_attachment_custom_ranges/motd b/compute_interconnect_attachment_custom_ranges/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/compute_interconnect_attachment_custom_ranges/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/compute_interconnect_attachment_custom_ranges/tutorial.md b/compute_interconnect_attachment_custom_ranges/tutorial.md new file mode 100644 index 00000000..d18d3f03 --- /dev/null +++ b/compute_interconnect_attachment_custom_ranges/tutorial.md @@ -0,0 +1,79 @@ +# Compute Interconnect Attachment Custom Ranges - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/datastream_stream_postgresql_sslconfig_server_and_client_verification/backing_file.tf b/datastream_stream_postgresql_sslconfig_server_and_client_verification/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/datastream_stream_postgresql_sslconfig_server_and_client_verification/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/datastream_stream_postgresql_sslconfig_server_and_client_verification/main.tf b/datastream_stream_postgresql_sslconfig_server_and_client_verification/main.tf new file mode 100644 index 00000000..d007b295 --- /dev/null +++ b/datastream_stream_postgresql_sslconfig_server_and_client_verification/main.tf @@ -0,0 +1,69 @@ +data "google_datastream_static_ips" "datastream_ips" { + location = "us-central1" +} + +resource "google_sql_database_instance" "instance" { + name = "my-instance-${local.name_suffix}" + database_version = "POSTGRES_15" + region = "us-central1" + settings { + tier = "db-f1-micro" + ip_configuration { + ipv4_enabled = true + ssl_mode = "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" + dynamic "authorized_networks" { + for_each = data.google_datastream_static_ips.datastream_ips.static_ips + iterator = ip + + content { + name = format("datastream-%d", ip.key) + value = ip.value + } + } + } + } + + deletion_protection = false +} + +resource "google_sql_database" "db" { + instance = google_sql_database_instance.instance.name + name = "db" +} + +resource "random_password" "pwd" { + length = 16 + special = false +} + +resource "google_sql_user" "user" { + name = "user" + instance = google_sql_database_instance.instance.name + password = random_password.pwd.result +} + +resource "google_sql_ssl_cert" "client_cert" { + common_name = "client-name" + instance = google_sql_database_instance.instance.name +} + +resource "google_datastream_connection_profile" "default" { + display_name = "Connection Profile" + location = "us-central1" + connection_profile_id = "profile-id-${local.name_suffix}" + + postgresql_profile { + hostname = google_sql_database_instance.instance.public_ip_address + port = 5432 + username = "user" + password = random_password.pwd.result + database = google_sql_database.db.name + ssl_config { + server_and_client_verification { + client_certificate = google_sql_ssl_cert.client_cert.cert + client_key = google_sql_ssl_cert.client_cert.private_key + ca_certificate = google_sql_ssl_cert.client_cert.server_ca_cert + } + } + } +} diff --git a/datastream_stream_postgresql_sslconfig_server_and_client_verification/motd b/datastream_stream_postgresql_sslconfig_server_and_client_verification/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/datastream_stream_postgresql_sslconfig_server_and_client_verification/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/datastream_stream_postgresql_sslconfig_server_and_client_verification/tutorial.md b/datastream_stream_postgresql_sslconfig_server_and_client_verification/tutorial.md new file mode 100644 index 00000000..26af5984 --- /dev/null +++ b/datastream_stream_postgresql_sslconfig_server_and_client_verification/tutorial.md @@ -0,0 +1,79 @@ +# Datastream Stream Postgresql Sslconfig Server And Client Verification - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/dialogflow_conversation_profile_basic/main.tf b/dialogflow_conversation_profile_basic/main.tf index ea7397e0..804e06e6 100644 --- a/dialogflow_conversation_profile_basic/main.tf +++ b/dialogflow_conversation_profile_basic/main.tf @@ -1,6 +1,6 @@ resource "google_dialogflow_agent" "basic_agent" { display_name = "example_agent" - default_language_code = "en-us" + default_language_code = "en" time_zone = "America/New_York" } resource "google_dialogflow_conversation_profile" "basic_profile" { diff --git a/looker_instance_psc/main.tf b/looker_instance_psc/main.tf index 541ef4ba..bcd496d7 100644 --- a/looker_instance_psc/main.tf +++ b/looker_instance_psc/main.tf @@ -11,7 +11,23 @@ resource "google_looker_instance" "looker-instance" { } psc_config { allowed_vpcs = ["projects/test-project/global/networks/test"] - # update only - # service_attachments = [{local_fqdn: "www.local-fqdn.com" target_service_attachment_uri: "projects/my-project/regions/us-east1/serviceAttachments/sa"}] + + # First Service Attachment + # service_attachments { + # local_fqdn = "www.example-one.com" + # target_service_attachment_uri = "projects/my-project/regions/us-east1/serviceAttachments/sa-1" + # } + + # Second Service Attachment + # service_attachments { + # local_fqdn = "api.internal-partner.com" + # target_service_attachment_uri = "projects/partner-project/regions/us-central1/serviceAttachments/sa-gateway" + # } + + # Third Service Attachment + # service_attachments { + # local_fqdn = "git.internal-repo.com" + # target_service_attachment_uri = "projects/devops-project/regions/us-west1/serviceAttachments/gitlab-sa" + # } } } diff --git a/network_connectivity_destination_basic/backing_file.tf b/network_connectivity_destination_basic/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/network_connectivity_destination_basic/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/network_connectivity_destination_basic/main.tf b/network_connectivity_destination_basic/main.tf new file mode 100644 index 00000000..75414568 --- /dev/null +++ b/network_connectivity_destination_basic/main.tf @@ -0,0 +1,20 @@ +resource "google_network_connectivity_multicloud_data_transfer_config" "config" { + name = "basic-config-${local.name_suffix}" + location = "europe-west4" + description = "A basic multicloud data transfer config for the destination example" +} + +resource "google_network_connectivity_destination" "example" { + name = "basic-destination-${local.name_suffix}" + location = "europe-west4" + multicloud_data_transfer_config = google_network_connectivity_multicloud_data_transfer_config.config.name + description = "A basic destination" + labels = { + foo = "bar" + } + ip_prefix = "10.0.0.0/8" + endpoints { + asn = "14618" + csp = "AWS" + } +} diff --git a/network_connectivity_destination_basic/motd b/network_connectivity_destination_basic/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/network_connectivity_destination_basic/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/network_connectivity_destination_basic/tutorial.md b/network_connectivity_destination_basic/tutorial.md new file mode 100644 index 00000000..39a31c11 --- /dev/null +++ b/network_connectivity_destination_basic/tutorial.md @@ -0,0 +1,79 @@ +# Network Connectivity Destination Basic - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/network_connectivity_multicloud_data_transfer_config_basic/backing_file.tf b/network_connectivity_multicloud_data_transfer_config_basic/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/network_connectivity_multicloud_data_transfer_config_basic/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/network_connectivity_multicloud_data_transfer_config_basic/main.tf b/network_connectivity_multicloud_data_transfer_config_basic/main.tf new file mode 100644 index 00000000..73cea28a --- /dev/null +++ b/network_connectivity_multicloud_data_transfer_config_basic/main.tf @@ -0,0 +1,14 @@ +resource "google_network_connectivity_multicloud_data_transfer_config" "example" { + name = "basic_config-${local.name_suffix}" + location = "europe-west1" + description = "A basic multicloud data transfer configs" + labels = { + foo = "bar" + } + services { + service_name = "big-query" + } + services { + service_name = "cloud-storage" + } +} diff --git a/network_connectivity_multicloud_data_transfer_config_basic/motd b/network_connectivity_multicloud_data_transfer_config_basic/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/network_connectivity_multicloud_data_transfer_config_basic/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/network_connectivity_multicloud_data_transfer_config_basic/tutorial.md b/network_connectivity_multicloud_data_transfer_config_basic/tutorial.md new file mode 100644 index 00000000..98633c04 --- /dev/null +++ b/network_connectivity_multicloud_data_transfer_config_basic/tutorial.md @@ -0,0 +1,79 @@ +# Network Connectivity Multicloud Data Transfer Config Basic - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/network_services_lb_route_extension_basic/main.tf b/network_services_lb_route_extension_basic/main.tf index 4f68d612..13b5d302 100644 --- a/network_services_lb_route_extension_basic/main.tf +++ b/network_services_lb_route_extension_basic/main.tf @@ -210,6 +210,12 @@ resource "google_network_services_lb_route_extension" "default" { fail_open = false forward_headers = ["custom-header"] + + supported_events = ["REQUEST_HEADERS", "REQUEST_BODY", "REQUEST_TRAILERS"] + request_body_send_mode = "BODY_SEND_MODE_FULL_DUPLEX_STREAMED" + metadata = { + "key" = "value" + } } } diff --git a/network_services_lb_route_extension_observability/backing_file.tf b/network_services_lb_route_extension_observability/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/network_services_lb_route_extension_observability/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/network_services_lb_route_extension_observability/main.tf b/network_services_lb_route_extension_observability/main.tf new file mode 100644 index 00000000..5fa58db2 --- /dev/null +++ b/network_services_lb_route_extension_observability/main.tf @@ -0,0 +1,349 @@ +# Internal HTTP load balancer with a managed instance group backend +# VPC network +resource "google_compute_network" "ilb_network" { + name = "l7-ilb-network-${local.name_suffix}" + auto_create_subnetworks = false +} + +# proxy-only subnet +resource "google_compute_subnetwork" "proxy_subnet" { + name = "l7-ilb-proxy-subnet-${local.name_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-west1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +# backend subnet +resource "google_compute_subnetwork" "ilb_subnet" { + name = "l7-ilb-subnet-${local.name_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-west1" + network = google_compute_network.ilb_network.id + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# forwarding rule +resource "google_compute_forwarding_rule" "default" { + name = "l7-ilb-forwarding-rule-${local.name_suffix}" + region = "us-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL_MANAGED" + port_range = "80" + target = google_compute_region_target_http_proxy.default.id + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + network_tier = "PREMIUM" + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# HTTP target proxy +resource "google_compute_region_target_http_proxy" "default" { + name = "l7-ilb-target-http-proxy-${local.name_suffix}" + region = "us-west1" + url_map = google_compute_region_url_map.default.id +} + +# URL map +resource "google_compute_region_url_map" "default" { + name = "tf-test-l7-ilb-regional-url-map%{random_suffix}" + region = "us-west1" + default_service = google_compute_region_backend_service.default.id + + host_rule { + hosts = ["service-extensions.com"] + path_matcher = "callouts" + } + + path_matcher { + name = "callouts" + default_service = google_compute_region_backend_service.callouts_backend.id + } +} + +# backend service +resource "google_compute_region_backend_service" "default" { + name = "l7-ilb-backend-subnet-${local.name_suffix}" + region = "us-west1" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + health_checks = [google_compute_region_health_check.default.id] + + backend { + group = google_compute_region_instance_group_manager.mig.instance_group + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } +} + +# instance template +resource "google_compute_instance_template" "instance_template" { + name = "l7-ilb-mig-template-${local.name_suffix}" + machine_type = "e2-small" + tags = ["http-server"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + disk { + source_image = "debian-cloud/debian-12" + auto_delete = true + boot = true + } + + # install nginx and serve a simple web page + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + set -euo pipefail + + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y nginx-light jq + + NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname") + IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip") + METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])') + + cat < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "l7-ilb-hc-${local.name_suffix}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "l7-ilb-mig1-${local.name_suffix}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "l7-ilb-fw-allow-iap-hc-${local.name_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "l7-ilb-fw-allow-ilb-to-backends-${local.name_suffix}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} + +resource "google_network_services_lb_route_extension" "default" { + name = "l7-ilb-route-ext-${local.name_suffix}" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + supported_events = ["REQUEST_HEADERS"] + observability_mode = true + } + } + + labels = { + foo = "bar" + } +} + +# test instance +resource "google_compute_instance" "vm_test" { + name = "l7-ilb-test-vm-${local.name_suffix}" + zone = "us-west1-b" + machine_type = "e2-small" + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + } + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "l7-ilb-callouts-ins-${local.name_suffix}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false + + depends_on = [ + google_compute_instance.vm_test + ] +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "l7-ilb-callouts-ins-group-${local.name_suffix}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "l7-ilb-callouts-hc-${local.name_suffix}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "l7-ilb-callouts-backend-${local.name_suffix}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} diff --git a/network_services_lb_route_extension_observability/motd b/network_services_lb_route_extension_observability/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/network_services_lb_route_extension_observability/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/network_services_lb_route_extension_observability/tutorial.md b/network_services_lb_route_extension_observability/tutorial.md new file mode 100644 index 00000000..3c0956a3 --- /dev/null +++ b/network_services_lb_route_extension_observability/tutorial.md @@ -0,0 +1,79 @@ +# Network Services Lb Route Extension Observability - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/network_services_multicast_group_consumer_activation_basic/backing_file.tf b/network_services_multicast_group_consumer_activation_basic/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/network_services_multicast_group_consumer_activation_basic/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/network_services_multicast_group_consumer_activation_basic/main.tf b/network_services_multicast_group_consumer_activation_basic/main.tf new file mode 100644 index 00000000..101318ff --- /dev/null +++ b/network_services_multicast_group_consumer_activation_basic/main.tf @@ -0,0 +1,56 @@ +resource "google_compute_network" "network" { + name = "test-network-mgca-${local.name_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "test-domain-mgca-${local.name_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "test-domain-activation-mgca-${local.name_suffix}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_consumer_association" "consumer_association" { + multicast_consumer_association_id = "test-consumer-association-mgca-${local.name_suffix}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "test-internal-range-mgca-${local.name_suffix}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "test-group-range-mgca-${local.name_suffix}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "test-mgra-mgca-${local.name_suffix}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_consumer_activation" mgca_test { + multicast_group_consumer_activation_id = "test-mgca-mgca-${local.name_suffix}" + location = "us-central1-b" + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_consumer_association = google_network_services_multicast_consumer_association.consumer_association.id +} diff --git a/network_services_multicast_group_consumer_activation_basic/motd b/network_services_multicast_group_consumer_activation_basic/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/network_services_multicast_group_consumer_activation_basic/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/network_services_multicast_group_consumer_activation_basic/tutorial.md b/network_services_multicast_group_consumer_activation_basic/tutorial.md new file mode 100644 index 00000000..2801c08e --- /dev/null +++ b/network_services_multicast_group_consumer_activation_basic/tutorial.md @@ -0,0 +1,79 @@ +# Network Services Multicast Group Consumer Activation Basic - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/network_services_multicast_group_producer_activation_basic/backing_file.tf b/network_services_multicast_group_producer_activation_basic/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/network_services_multicast_group_producer_activation_basic/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/network_services_multicast_group_producer_activation_basic/main.tf b/network_services_multicast_group_producer_activation_basic/main.tf new file mode 100644 index 00000000..66d9c9d2 --- /dev/null +++ b/network_services_multicast_group_producer_activation_basic/main.tf @@ -0,0 +1,56 @@ +resource "google_compute_network" "network" { + name = "test-network-mgpa-${local.name_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_services_multicast_domain" "multicast_domain" { + multicast_domain_id = "test-domain-mgpa-${local.name_suffix}" + location = "global" + admin_network = google_compute_network.network.id + connection_config { connection_type="SAME_VPC"} + depends_on = [google_compute_network.network] +} + +resource "google_network_services_multicast_domain_activation" "multicast_domain_activation" { + multicast_domain_activation_id = "test-domain-activation-mgpa-${local.name_suffix}" + location = "us-central1-b" + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_producer_association" "producer_association" { + multicast_producer_association_id = "test-producer-association-mgpa-${local.name_suffix}" + location = "us-central1-b" + network = google_compute_network.network.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id + depends_on = [google_compute_network.network] +} + + +resource "google_network_connectivity_internal_range" "internal_range" { + name = "test-internal-range-mgpa-${local.name_suffix}" + network = google_compute_network.network.self_link + usage = "FOR_VPC" + peering = "FOR_SELF" + ip_cidr_range = "224.2.0.2/32" +} + +resource "google_network_services_multicast_group_range" "group_range" { + multicast_group_range_id = "test-group-range-mgpa-${local.name_suffix}" + location = "global" + reserved_internal_range = google_network_connectivity_internal_range.internal_range.id + multicast_domain = google_network_services_multicast_domain.multicast_domain.id +} + +resource "google_network_services_multicast_group_range_activation" "group_range_activation" { + multicast_group_range_activation_id = "test-mgra-mgpa-${local.name_suffix}" + location = "us-central1-b" + multicast_group_range = google_network_services_multicast_group_range.group_range.id + multicast_domain_activation = google_network_services_multicast_domain_activation.multicast_domain_activation.id +} + +resource "google_network_services_multicast_group_producer_activation" mgpa_test { + multicast_group_producer_activation_id = "test-mgpa-mgpa-${local.name_suffix}" + location = "us-central1-b" + multicast_group_range_activation = google_network_services_multicast_group_range_activation.group_range_activation.id + multicast_producer_association = google_network_services_multicast_producer_association.producer_association.id +} diff --git a/network_services_multicast_group_producer_activation_basic/motd b/network_services_multicast_group_producer_activation_basic/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/network_services_multicast_group_producer_activation_basic/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/network_services_multicast_group_producer_activation_basic/tutorial.md b/network_services_multicast_group_producer_activation_basic/tutorial.md new file mode 100644 index 00000000..f4c77f95 --- /dev/null +++ b/network_services_multicast_group_producer_activation_basic/tutorial.md @@ -0,0 +1,79 @@ +# Network Services Multicast Group Producer Activation Basic - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/redis_cluster_ha_with_labels/backing_file.tf b/redis_cluster_ha_with_labels/backing_file.tf new file mode 100644 index 00000000..c60b1199 --- /dev/null +++ b/redis_cluster_ha_with_labels/backing_file.tf @@ -0,0 +1,15 @@ +# This file has some scaffolding to make sure that names are unique and that +# a region and zone are selected when you try to create your Terraform resources. + +locals { + name_suffix = "${random_pet.suffix.id}" +} + +resource "random_pet" "suffix" { + length = 2 +} + +provider "google" { + region = "us-central1" + zone = "us-central1-c" +} diff --git a/redis_cluster_ha_with_labels/main.tf b/redis_cluster_ha_with_labels/main.tf new file mode 100644 index 00000000..8429bc6d --- /dev/null +++ b/redis_cluster_ha_with_labels/main.tf @@ -0,0 +1,61 @@ +resource "google_redis_cluster" "cluster-ha-with-labels" { + name = "ha-cluster-${local.name_suffix}" + shard_count = 3 + labels = { + my_key = "my_val" + other_key = "other_val" + } + psc_configs { + network = google_compute_network.consumer_net.id + } + region = "us-central1" + replica_count = 1 + node_type = "REDIS_SHARED_CORE_NANO" + transit_encryption_mode = "TRANSIT_ENCRYPTION_MODE_DISABLED" + authorization_mode = "AUTH_MODE_DISABLED" + redis_configs = { + maxmemory-policy = "volatile-ttl" + } + deletion_protection_enabled = false + + zone_distribution_config { + mode = "MULTI_ZONE" + } + maintenance_policy { + weekly_maintenance_window { + day = "MONDAY" + start_time { + hours = 1 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] +} + +resource "google_network_connectivity_service_connection_policy" "default" { + name = "my-policy-${local.name_suffix}" + location = "us-central1" + service_class = "gcp-memorystore-redis" + description = "my basic service connection policy" + network = google_compute_network.consumer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.consumer_subnet.id] + } +} + +resource "google_compute_subnetwork" "consumer_subnet" { + name = "my-subnet-${local.name_suffix}" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.consumer_net.id +} + +resource "google_compute_network" "consumer_net" { + name = "my-network-${local.name_suffix}" + auto_create_subnetworks = false +} diff --git a/redis_cluster_ha_with_labels/motd b/redis_cluster_ha_with_labels/motd new file mode 100644 index 00000000..45a906e8 --- /dev/null +++ b/redis_cluster_ha_with_labels/motd @@ -0,0 +1,7 @@ +=== + +These examples use real resources that will be billed to the +Google Cloud Platform project you use - so make sure that you +run "terraform destroy" before quitting! + +=== diff --git a/redis_cluster_ha_with_labels/tutorial.md b/redis_cluster_ha_with_labels/tutorial.md new file mode 100644 index 00000000..359deaf2 --- /dev/null +++ b/redis_cluster_ha_with_labels/tutorial.md @@ -0,0 +1,79 @@ +# Redis Cluster Ha With Labels - Terraform + +## Setup + + + +Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform. + + + +Terraform provisions real GCP resources, so anything you create in this session will be billed against this project. + +## Terraforming! + +Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command +to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up +the project name from the environment variable. + +```bash +export GOOGLE_CLOUD_PROJECT={{project-id}} +``` + +After that, let's get Terraform started. Run the following to pull in the providers. + +```bash +terraform init +``` + +With the providers downloaded and a project set, you're ready to use Terraform. Go ahead! + +```bash +terraform apply +``` + +Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan. + +```bash +yes +``` + + +## Post-Apply + +### Editing your config + +Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed. + +```bash +terraform plan +``` + +So let's make a change! Try editing a number, or appending a value to the name in the editor. Then, +run a 'plan' again. + +```bash +terraform plan +``` + +Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes +at the 'yes' prompt. + +```bash +terraform apply +``` + +```bash +yes +``` + +## Cleanup + +Run the following to remove the resources Terraform provisioned: + +```bash +terraform destroy +``` +```bash +yes +``` diff --git a/storage_insights_dataset_config_excludes/main.tf b/storage_insights_dataset_config_excludes/main.tf index 2a74edb3..0f2b172c 100644 --- a/storage_insights_dataset_config_excludes/main.tf +++ b/storage_insights_dataset_config_excludes/main.tf @@ -2,6 +2,7 @@ resource "google_storage_insights_dataset_config" "config_excludes" { location = "us-central1" dataset_config_id = "my_config_excludes-${local.name_suffix}" retention_period_days = 1 + activity_data_retention_period_days = 2 organization_scope = true identity { type = "IDENTITY_TYPE_PER_PROJECT"