diff --git a/.github/workflows/terraform-test.yml b/.github/workflows/terraform-test.yml new file mode 100644 index 0000000..ba85a3f --- /dev/null +++ b/.github/workflows/terraform-test.yml @@ -0,0 +1,79 @@ +name: terraform test + +# Runs the variable-validation suite under tests/terraform/. +# See tests/terraform/README.md for what's covered and how to run locally. + +on: + pull_request: + paths: + - "**/*.tf" + - "**/*.tftest.hcl" + - "templates/TEMPLATE_terraform.tfvars" + - "scripts/installer/validation/**" + - "scripts/installer/data_external/**" + - "scripts/installer/utils/**" + - "tests/datafiles/**" + - "tests/terraform/**" + - "Makefile" + - ".github/workflows/terraform-test.yml" + push: + branches: + - master + - "release/v*" + - "chore/v26-**" # Temporary: keep visibility while the stacked v26 work is in flight. + workflow_dispatch: + +jobs: + terraform-test: + name: terraform test + runs-on: ubuntu-latest + timeout-minutes: 15 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.14.8 + terraform_wrapper: false + + - name: terraform fmt + run: terraform fmt -check -recursive -diff + + - name: Setup tflint + uses: terraform-linters/setup-tflint@v4 + with: + tflint_version: v0.61.0 + + - name: tflint init + run: tflint --init + + - name: tflint + # TFLINT_CONFIG_FILE is required so child modules walked by --recursive + # use the root .tflint.hcl — they don't auto-discover it. + env: + TFLINT_CONFIG_FILE: ${{ github.workspace }}/.tflint.hcl + run: tflint --recursive + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + # tests/requirements.txt is the pytest-suite fixture; this job only needs + # stdlib Python (generate_testing_secrets.py uses no third-party imports). + + - name: Terraform init (modules only) + run: terraform init -backend=false + + - name: Generate test fixtures + env: + CX_SKIP_SSM: "true" + run: | + mkdir -p tests/logs + make generate_test_data + + - name: Run terraform test + run: make terraform_test diff --git a/.tflint.hcl b/.tflint.hcl new file mode 100644 index 0000000..220c41c --- /dev/null +++ b/.tflint.hcl @@ -0,0 +1,31 @@ +config { + call_module_type = "local" + force = false +} + +plugin "terraform" { + enabled = true + preset = "recommended" +} + +plugin "aws" { + enabled = true + version = "0.42.0" + source = "github.com/terraform-linters/tflint-ruleset-aws" +} + +# Most variables are typed-only flags by convention; description requirement +# would be noise rather than signal here. +rule "terraform_documented_variables" { + enabled = false +} +rule "terraform_documented_outputs" { + enabled = false +} + +# Modules are pinned via folder name (e.g. modules/connection_strings/v1.0.0/), +# not via a `version = ` argument. +rule "terraform_module_version" { + enabled = false +} + diff --git a/000_main.tf b/000_main.tf index daf34bc..d17f53a 100644 --- a/000_main.tf +++ b/000_main.tf @@ -2,13 +2,31 @@ ## Provider & Backend ## ------------------------------------------------------------------------------------ terraform { - required_version = ">= 1.1.0" + # Bumped from 1.1.0 -> 1.7.0: terraform test override_data blocks (used by tests/terraform/) + # are only supported on >= 1.7. + required_version = ">= 1.7.0" required_providers { aws = { source = "hashicorp/aws" version = "~> 5.12.0" } + tls = { + source = "hashicorp/tls" + version = "~> 4.0" + } + null = { + source = "hashicorp/null" + version = "~> 3.2" + } + random = { + source = "hashicorp/random" + version = "~> 3.6" + } + external = { + source = "hashicorp/external" + version = "~> 2.3" + } } backend "local" { @@ -71,15 +89,10 @@ locals { # NLB health checks originate from NLB nodes within the VPC — not from external IPs in sg_ingress_cidrs. # Without the VPC CIDR in the EC2 security group, health checks are blocked, the target shows unhealthy, # and the NLB stops forwarding real SSH traffic even though connect-proxy is running correctly. - vpc_cidr_block = var.flag_create_new_vpc == true ? var.vpc_new_cidr_range : data.aws_vpc.preexisting[0].cidr_block + vpc_cidr_block = var.flag_create_new_vpc == true ? var.vpc_new_cidr_range : data.aws_vpc.preexisting[0].cidr_block flag_map_public_ip_on_launch = var.flag_map_public_ip_on_launch == true || var.flag_make_instance_public == true ? true : false - # SSM - # --------------------------------------------------------------------------------------- - # Load bootstrapped secrets and define target for TF-generated SSM values. Magical - don't know why it works but it does. - ssm_root = "/config/${var.app_name}" - tower_secrets = jsondecode(data.aws_ssm_parameter.tower_secrets.value) tower_secret_keys = nonsensitive(toset([for k, v in local.tower_secrets : k])) @@ -135,8 +148,8 @@ locals { # Studios SSH — see 002_security_groups.tf for why two separate rules are needed # (one for direct EC2 access, one for NLB path; NLBs don't have security groups # so both use CIDR-based rules rather than source_security_group_id) - sg_ec2_noalb_ssh = try([module.sg_ec2_noalb_ssh[0].security_group_id], []) - sg_from_nlb_ssh = try([module.sg_from_nlb_ssh[0].security_group_id], []) + sg_ec2_noalb_ssh = try([module.sg_ec2_noalb_ssh[0].security_group_id], []) + sg_from_nlb_ssh = try([module.sg_from_nlb_ssh[0].security_group_id], []) sg_ec2_final = concat( local.sg_ec2_core, @@ -150,7 +163,6 @@ locals { local.sg_from_nlb_ssh, ) - ec2_sg_final_raw = join(",", [for sg in local.sg_ec2_final : jsonencode(sg)]) # Needed? # ALB - Determine which CIDR Blocks to attach to allowed ports @@ -209,8 +221,7 @@ locals { # Miscellaneous # --------------------------------------------------------------------------------------- # These are needed to handle templatefile rendering to Bash echoing to file craziness. - dollar = "$" - singlequote = "'" + dollar = "$" # Ansible @@ -244,8 +255,7 @@ module "connection_strings" { source = "./modules/connection_strings/v1.0.0" # Feature Flags - flag_create_load_balancer = var.flag_create_load_balancer - flag_do_not_use_https = var.flag_do_not_use_https + flag_do_not_use_https = var.flag_do_not_use_https flag_create_external_db = var.flag_create_external_db flag_use_existing_external_db = var.flag_use_existing_external_db @@ -270,7 +280,7 @@ module "connection_strings" { # Studios Configuration flag_enable_data_studio = var.flag_enable_data_studio - flag_enable_data_studio_ssh = var.flag_enable_data_studio_ssh + flag_enable_data_studio_ssh = var.flag_enable_data_studio_ssh flag_studio_enable_path_routing = var.flag_studio_enable_path_routing data_studio_path_routing_url = var.flag_studio_enable_path_routing ? var.data_studio_path_routing_url : "" diff --git a/002_security_groups.tf b/002_security_groups.tf index 4dec213..2eec13f 100644 --- a/002_security_groups.tf +++ b/002_security_groups.tf @@ -109,7 +109,7 @@ module "sg_ec2_noalb_connect" { to_port = 9090 protocol = "tcp" description = "Connect-Proxy" - cidr_blocks = "${join(",", var.sg_ingress_cidrs)}" + cidr_blocks = join(",", var.sg_ingress_cidrs) } ] # number_of_computed_ingress_with_source_security_group_id = 1 diff --git a/004_iam.tf b/004_iam.tf index ea9084c..ac61fde 100644 --- a/004_iam.tf +++ b/004_iam.tf @@ -14,11 +14,10 @@ locals { tag_key = local.global_prefix, aws_region = var.aws_region, aws_account = var.aws_account, - app_name = var.app_name, ssm_key_arn = data.aws_kms_alias.default_ssm.arn, flag_allow_aws_instance_credentials = var.flag_allow_aws_instance_credentials, - tower_aws_role = local.seqerakit_secrets["TOWER_AWS_ROLE"]["value"], + tower_aws_role = local.seqerakit_secrets["TOWER_AWS_ROLE"]["value"], } ) diff --git a/005_parameter_store.tf b/005_parameter_store.tf index 06f0ed6..619bc87 100644 --- a/005_parameter_store.tf +++ b/005_parameter_store.tf @@ -26,22 +26,22 @@ data "aws_ssm_parameter" "wave_lite_secrets" { # Generate individual SSM Parameters # ------------------------------------------------ resource "aws_ssm_parameter" "client_supplied_secrets_tower" { - for_each = local.tower_secret_keys - name = nonsensitive(local.tower_secrets[each.key]["ssm_key"]) - value = local.tower_secrets[each.key]["value"] - type = "SecureString" - overwrite = var.flag_overwrite_ssm_keys + for_each = local.tower_secret_keys + name = nonsensitive(local.tower_secrets[each.key]["ssm_key"]) + value = local.tower_secrets[each.key]["value"] + type = "SecureString" + overwrite = var.flag_overwrite_ssm_keys } resource "aws_ssm_parameter" "client_supplied_secrets_seqerakit" { # for_each = local.seqerakit_secret_keys - for_each = var.flag_run_seqerakit == true ? local.seqerakit_secret_keys : [] - name = nonsensitive(local.seqerakit_secrets[each.key]["ssm_key"]) - value = local.seqerakit_secrets[each.key]["value"] - type = "SecureString" - overwrite = var.flag_overwrite_ssm_keys + for_each = var.flag_run_seqerakit == true ? local.seqerakit_secret_keys : [] + name = nonsensitive(local.seqerakit_secrets[each.key]["ssm_key"]) + value = local.seqerakit_secrets[each.key]["value"] + type = "SecureString" + overwrite = var.flag_overwrite_ssm_keys } @@ -50,19 +50,19 @@ resource "aws_ssm_parameter" "client_supplied_secrets_groundswell" { # count = var.flag_enable_groundswell == true ? 1 : 0 # for_each = local.groundswell_secret_keys - for_each = var.flag_enable_groundswell == true ? local.groundswell_secret_keys : [] - name = nonsensitive(local.groundswell_secrets[each.key]["ssm_key"]) - value = local.groundswell_secrets[each.key]["value"] - type = "SecureString" - overwrite = var.flag_overwrite_ssm_keys + for_each = var.flag_enable_groundswell == true ? local.groundswell_secret_keys : [] + name = nonsensitive(local.groundswell_secrets[each.key]["ssm_key"]) + value = local.groundswell_secrets[each.key]["value"] + type = "SecureString" + overwrite = var.flag_overwrite_ssm_keys } resource "aws_ssm_parameter" "client_supplied_secrets_wave_lite" { - for_each = var.flag_use_wave_lite == true ? local.wave_lite_secret_keys : [] - name = nonsensitive(local.wave_lite_secrets[each.key]["ssm_key"]) - value = local.wave_lite_secrets[each.key]["value"] - type = "SecureString" - overwrite = var.flag_overwrite_ssm_keys + for_each = var.flag_use_wave_lite == true ? local.wave_lite_secret_keys : [] + name = nonsensitive(local.wave_lite_secrets[each.key]["ssm_key"]) + value = local.wave_lite_secrets[each.key]["value"] + type = "SecureString" + overwrite = var.flag_overwrite_ssm_keys } diff --git a/008_route53.tf b/008_route53.tf index 0121bbf..316a202 100644 --- a/008_route53.tf +++ b/008_route53.tf @@ -66,7 +66,7 @@ resource "aws_route53_record" "alb_connect" { count = local.dns_create_alb_record == true ? 1 : 0 zone_id = local.dns_zone_id - name = var.flag_studio_enable_path_routing ? module.connection_strings.tower_connect_dns : module.connection_strings.tower_connect_wildcard_dns + name = var.flag_studio_enable_path_routing ? module.connection_strings.tower_connect_dns : module.connection_strings.tower_connect_wildcard_dns type = "A" @@ -82,8 +82,8 @@ resource "aws_route53_record" "ec2_connect" { count = local.dns_create_ec2_record == true ? 1 : 0 zone_id = local.dns_zone_id - name = var.flag_studio_enable_path_routing ? module.connection_strings.tower_connect_dns : module.connection_strings.tower_connect_wildcard_dns - type = "A" + name = var.flag_studio_enable_path_routing ? module.connection_strings.tower_connect_dns : module.connection_strings.tower_connect_wildcard_dns + type = "A" ttl = "5" records = [local.dns_instance_ip] @@ -124,8 +124,8 @@ resource "aws_route53_record" "alb_wave" { count = local.dns_create_alb_record == true && var.flag_use_wave_lite == true ? 1 : 0 zone_id = local.dns_zone_id - name = module.connection_strings.tower_wave_dns - type = "A" + name = module.connection_strings.tower_wave_dns + type = "A" alias { name = module.alb[0].lb_dns_name diff --git a/009_define_file_templates.tf b/009_define_file_templates.tf index e24b6ff..1f7c2ab 100644 --- a/009_define_file_templates.tf +++ b/009_define_file_templates.tf @@ -44,12 +44,12 @@ locals { flag_limit_data_studio_to_some_workspaces = var.flag_limit_data_studio_to_some_workspaces, data_studio_eligible_workspaces = var.data_studio_eligible_workspaces, - flag_enable_data_studio_ssh = var.flag_enable_data_studio_ssh, - data_studio_ssh_address = module.connection_strings.tower_connect_ssh_url, - flag_limit_data_studio_ssh_to_some_workspaces = var.flag_limit_data_studio_ssh_to_some_workspaces, - data_studio_ssh_eligible_workspaces = var.data_studio_ssh_eligible_workspaces, - connect_ssh_fingerprint = tls_private_key.connect_ssh_host_key.public_key_fingerprint_sha256, - + flag_enable_data_studio_ssh = var.flag_enable_data_studio_ssh, + data_studio_ssh_address = module.connection_strings.tower_connect_ssh_url, + flag_limit_data_studio_ssh_to_some_workspaces = var.flag_limit_data_studio_ssh_to_some_workspaces, + data_studio_ssh_eligible_workspaces = var.data_studio_ssh_eligible_workspaces, + connect_ssh_fingerprint = tls_private_key.connect_ssh_host_key.public_key_fingerprint_sha256, + data_studio_options = var.data_studio_options, flag_studio_enable_path_routing = var.flag_studio_enable_path_routing, @@ -106,7 +106,6 @@ locals { swell_db_user = local.groundswell_secrets["SWELL_DB_USER"]["value"], swell_db_password = local.groundswell_secrets["SWELL_DB_PASSWORD"]["value"], swell_database_name = var.swell_database_name, - db_database_name = var.db_database_name, } ) @@ -128,10 +127,10 @@ locals { data_studios_env = templatefile("assets/src/tower_config/data-studios.env.tpl", { - flag_enable_data_studio = var.flag_enable_data_studio, - tower_server_url = module.connection_strings.tower_server_url, - tower_redis_url = module.connection_strings.tower_connect_redis_url, - tower_connect_server_url = module.connection_strings.tower_connect_server_url, + flag_enable_data_studio = var.flag_enable_data_studio, + tower_server_url = module.connection_strings.tower_server_url, + tower_redis_url = module.connection_strings.tower_connect_redis_url, + tower_connect_server_url = module.connection_strings.tower_connect_server_url, flag_enable_data_studio_ssh = var.flag_enable_data_studio_ssh, connect_ssh_key_path = "/data/ssh-host-key", } @@ -422,7 +421,7 @@ resource "tls_private_key" "connect_pem" { } resource "tls_private_key" "connect_ssh_host_key" { - algorithm = "ED25519" + algorithm = "ED25519" } diff --git a/010_prepare_config_files.tf b/010_prepare_config_files.tf index da23b5b..74a1bdc 100644 --- a/010_prepare_config_files.tf +++ b/010_prepare_config_files.tf @@ -5,7 +5,7 @@ ## ------------------------------------------------------------------------------------ resource "null_resource" "generate_independent_config_files" { - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } provisioner "local-exec" { working_dir = path.module @@ -100,7 +100,7 @@ resource "null_resource" "generate_config_files_with_dependencies" { aws_ec2_instance_connect_endpoint.example, null_resource.generate_independent_config_files ] - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } provisioner "local-exec" { working_dir = path.module @@ -140,7 +140,7 @@ resource "null_resource" "generate_config_files_with_dependencies" { resource "null_resource" "aws_batch_manual" { count = var.seqerakit_aws_use_forge == false && var.seqerakit_aws_use_batch == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [ null_resource.generate_config_files_with_dependencies, null_resource.generate_independent_config_files @@ -159,7 +159,7 @@ resource "null_resource" "aws_batch_manual" { resource "null_resource" "aws_batch_forge" { count = var.seqerakit_aws_use_forge == true && var.seqerakit_aws_use_batch == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [ null_resource.generate_config_files_with_dependencies, null_resource.generate_independent_config_files @@ -179,7 +179,7 @@ resource "null_resource" "aws_batch_forge" { ## Flag for file transfer to start # ------------------------------------------------------------------------------------- resource "null_resource" "allow_file_copy_to_start" { - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [ null_resource.generate_independent_config_files, diff --git a/011_configure_vm.tf b/011_configure_vm.tf index be026e7..6fb40a2 100644 --- a/011_configure_vm.tf +++ b/011_configure_vm.tf @@ -13,7 +13,7 @@ Accepting repetitive boilerplate in return for finer granularity and more visibi resource "null_resource" "ssh_connectivity_check" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.allow_file_copy_to_start] provisioner "local-exec" { @@ -41,7 +41,7 @@ resource "null_resource" "ssh_connectivity_check" { resource "null_resource" "file_transfer" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.ssh_connectivity_check] provisioner "local-exec" { @@ -66,7 +66,7 @@ resource "null_resource" "file_transfer" { resource "null_resource" "host_configuration" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.file_transfer] provisioner "local-exec" { @@ -87,7 +87,7 @@ resource "null_resource" "host_configuration" { resource "null_resource" "ansible_setup" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.host_configuration] provisioner "local-exec" { @@ -108,7 +108,7 @@ resource "null_resource" "ansible_setup" { resource "null_resource" "system_packages" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.ansible_setup] provisioner "local-exec" { @@ -129,7 +129,7 @@ resource "null_resource" "system_packages" { resource "null_resource" "update_configuration_files" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.system_packages] provisioner "local-exec" { @@ -150,7 +150,7 @@ resource "null_resource" "update_configuration_files" { resource "null_resource" "pull_containers_run_tower" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.update_configuration_files] provisioner "local-exec" { @@ -171,7 +171,7 @@ resource "null_resource" "pull_containers_run_tower" { resource "null_resource" "wait_for_tower" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.pull_containers_run_tower] provisioner "local-exec" { @@ -192,7 +192,7 @@ resource "null_resource" "wait_for_tower" { resource "null_resource" "patch_groundswell" { count = var.flag_vm_copy_files_to_instance == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.wait_for_tower] provisioner "local-exec" { @@ -214,7 +214,7 @@ resource "null_resource" "patch_groundswell" { resource "null_resource" "run_seqerkit" { count = var.flag_vm_copy_files_to_instance == true && var.flag_run_seqerakit == true ? 1 : 0 - triggers = { always_run = "${timestamp()}" } + triggers = { always_run = timestamp() } depends_on = [null_resource.patch_groundswell] provisioner "local-exec" { diff --git a/Makefile b/Makefile index 5667d4f..7823312 100644 --- a/Makefile +++ b/Makefile @@ -45,6 +45,19 @@ generate_test_data: run_tests: @pytest -c tests/pytest.ini tests/ +terraform_test: + @echo "Running terraform test (variable validations) — see tests/terraform/README.md" + @if [ ! -f tests/datafiles/terraform.tfvars ]; then \ + echo "tests/datafiles/terraform.tfvars not found. Run 'make generate_test_data' first."; \ + exit 1; \ + fi + @# scripts/installer/data_external/*.py reads terraform.tfvars from the project + @# root by hard-coded path, so we have to materialise it there for the test run. + @cp tests/datafiles/terraform.tfvars terraform.tfvars + @cp tests/datafiles/base-overrides.auto.tfvars base-overrides.auto.tfvars 2>/dev/null || true + @trap "rm -f terraform.tfvars base-overrides.auto.tfvars" EXIT; \ + terraform test -test-directory=tests/terraform + purge_cached_plans: @cd tests/ && rm -rf .plan_cache diff --git a/modules/connection_strings/v1.0.0/main.tf b/modules/connection_strings/v1.0.0/main.tf index 196edeb..fbd0a72 100644 --- a/modules/connection_strings/v1.0.0/main.tf +++ b/modules/connection_strings/v1.0.0/main.tf @@ -1,3 +1,13 @@ +terraform { + required_version = ">= 1.7.0" + required_providers { + external = { + source = "hashicorp/external" + version = "~> 2.3" + } + } +} + # https://medium.com/@leslie.alldridge/terraform-external-data-source-using-custom-python-script-with-example-cea5e618d83e data "external" "generate_db_connection_string" { program = ["python3", "${abspath(path.root)}/scripts/installer/data_external/generate_db_connection_string.py"] @@ -52,7 +62,7 @@ locals { # NOTE: Connect has same logic. I've duplicated to better handle divergence risk. sp_redis_container = var.flag_use_container_redis ? "redis" : "" sp_redis_external_mock = var.flag_create_external_redis && var.use_mocks ? "mock.tower-redis.com" : "" - sp_redis_external_new = var.flag_create_external_redis && !var.use_mocks ? "${var.elasticache_tower.cache_nodes[0].address}" : "" + sp_redis_external_new = var.flag_create_external_redis && !var.use_mocks ? var.elasticache_tower.cache_nodes[0].address : "" tower_redis_dns = join("", [local.sp_redis_container, local.sp_redis_external_mock, local.sp_redis_external_new]) sp_redis_dns_with_port = var.flag_create_external_redis && !var.use_mocks ? "${local.tower_redis_dns}:${var.elasticache_tower.cache_nodes[0].port}" : "${local.tower_redis_dns}:6379" @@ -77,8 +87,8 @@ locals { # NOTE: `tower_connect_wildcard_dns` is misleading now since one of the options isn't actually a wildcard, but it means no changes in downstream DNS & ALB rules. connect_enabled = var.flag_enable_data_studio && !var.flag_do_not_use_https ? true : false - ct_dns = var.flag_studio_enable_path_routing ? "${var.data_studio_path_routing_url}" : "connect.${var.tower_server_url}" - ct_wildcard_dns = var.flag_studio_enable_path_routing ? "${var.data_studio_path_routing_url}" : "*.${var.tower_server_url}" + ct_dns = var.flag_studio_enable_path_routing ? var.data_studio_path_routing_url : "connect.${var.tower_server_url}" + ct_wildcard_dns = var.flag_studio_enable_path_routing ? var.data_studio_path_routing_url : "*.${var.tower_server_url}" tower_connect_dns = local.connect_enabled ? local.ct_dns : "N/A" tower_connect_wildcard_dns = local.connect_enabled ? local.ct_wildcard_dns : "N/A" tower_connect_server_url = local.connect_enabled ? "https://${local.tower_connect_dns}" : "N/A" @@ -88,20 +98,20 @@ locals { # Using same mock as tower redis to make tests more realistic. ct_redis_container = var.flag_use_container_redis ? "redis" : "" ct_redis_external_mock = var.flag_create_external_redis && var.use_mocks ? "mock.tower-redis.com" : "" - ct_redis_external_new = var.flag_create_external_redis && !var.use_mocks ? "${var.elasticache_tower.cache_nodes[0].address}" : "" + ct_redis_external_new = var.flag_create_external_redis && !var.use_mocks ? var.elasticache_tower.cache_nodes[0].address : "" ct_redis_dns = var.flag_enable_data_studio ? join("", [local.ct_redis_container, local.ct_redis_external_mock, local.ct_redis_external_new]) : "N/A" tower_connect_redis_dns = local.connect_enabled ? local.ct_redis_dns : "N/A" ct_redis_dns_with_port = var.flag_create_external_redis && !var.use_mocks ? "${local.tower_connect_redis_dns}:${var.elasticache_tower.cache_nodes[0].port}" : "${local.tower_connect_redis_dns}:6379" - tower_connect_redis_url = var.flag_enable_data_studio && !var.flag_do_not_use_https ? "${local.ct_redis_dns_with_port}" : "N/A" + tower_connect_redis_url = var.flag_enable_data_studio && !var.flag_do_not_use_https ? local.ct_redis_dns_with_port : "N/A" # CONNECT SSH # --------------------------------------------------------------------------------------- - connect_ssh_enabled = var.flag_enable_data_studio_ssh ? true : false - tower_connect_ssh_dns = local.connect_ssh_enabled ? "connect-ssh.${var.tower_server_url}" : "N/A" - tower_connect_ssh_url = local.connect_ssh_enabled ? "https://${local.tower_connect_ssh_dns}" : "N/A" + connect_ssh_enabled = var.flag_enable_data_studio_ssh ? true : false + tower_connect_ssh_dns = local.connect_ssh_enabled ? "connect-ssh.${var.tower_server_url}" : "N/A" + tower_connect_ssh_url = local.connect_ssh_enabled ? "https://${local.tower_connect_ssh_dns}" : "N/A" + - # WAVE-LITE # --------------------------------------------------------------------------------------- # TODO: June 16/25 -- Consider if `rediss://` hardcode aligns with how config is presented. diff --git a/modules/connection_strings/v1.0.0/variables.tf b/modules/connection_strings/v1.0.0/variables.tf index 6520335..cf75f60 100644 --- a/modules/connection_strings/v1.0.0/variables.tf +++ b/modules/connection_strings/v1.0.0/variables.tf @@ -1,11 +1,6 @@ ## ------------------------------------------------------------------------------------ ## Feature Flags ## ------------------------------------------------------------------------------------ -variable "flag_create_load_balancer" { - description = "Whether to create a load balancer" - type = bool -} - variable "flag_do_not_use_https" { description = "Whether to disable HTTPS" type = bool @@ -40,7 +35,7 @@ variable "flag_enable_data_studio" { description = "Whether to use Studios." type = bool } - + variable "flag_enable_data_studio_ssh" { description = "Whether SSH access to Data Studios is enabled." type = bool diff --git a/modules/elasticache/output.tf b/modules/elasticache/output.tf index 5fc9783..ef683b4 100644 --- a/modules/elasticache/output.tf +++ b/modules/elasticache/output.tf @@ -11,8 +11,8 @@ output "dns" { value = ( var.elasticache_instance.clustered.multi_az_enabled == true ? - "${aws_elasticache_replication_group.redis.configuration_endpoint_address}" : - "${aws_elasticache_replication_group.redis.primary_endpoint_address}" + aws_elasticache_replication_group.redis.configuration_endpoint_address : + aws_elasticache_replication_group.redis.primary_endpoint_address ) description = "DNS of the Elasticache instance / cluster." } diff --git a/modules/subnet_collector/v1.0.0/main.tf b/modules/subnet_collector/v1.0.0/main.tf index 1195e44..d7f48f5 100644 --- a/modules/subnet_collector/v1.0.0/main.tf +++ b/modules/subnet_collector/v1.0.0/main.tf @@ -1,3 +1,13 @@ +terraform { + required_version = ">= 1.7.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.12.0" + } + } +} + ## DATA ## =============================================================================== /* @@ -24,11 +34,6 @@ data "aws_subnet" "sp_subnets" { locals { - - # Combine real and testing flows - mock_sp_vpc = "vpc-0fd280748c05b375b" - mock_aws_subnets_all = ["subnet-00fad764627895f33", "subnet-0d0e8ba3b03b97a65", "subnet-0e07c9b2edbd84ea4", "subnet-0f18039d5ffcf6cd3"] - # For new VPC: Get subnet details from the VPC module real_new_vpc_subnets = var.create_new_vpc ? { public = { diff --git a/scripts/installer/validation/check_configuration.py b/scripts/installer/validation/check_configuration.py index e65ecf0..d302903 100644 --- a/scripts/installer/validation/check_configuration.py +++ b/scripts/installer/validation/check_configuration.py @@ -31,13 +31,6 @@ def log_error_and_exit(message: str): exit(1) -def only_one_true_set(flags: List) -> None: - """Ensure only 1 entry per flag grouping is `true`. Aggregate values of all specified values and then count.""" - values = [flag for flag in flags] - if values.count(True) != 1: - log_error_and_exit(f"Only one of these flags may be true: {str(flags)}.") - - def subnet_privacy(tfvars_subnets: List, vpc_subnets: List, qualifier: str) -> None: """Compare VPC subnets privacy vs CIDRs defined in tfvars for various components.""" try: @@ -46,43 +39,9 @@ def subnet_privacy(tfvars_subnets: List, vpc_subnets: List, qualifier: str) -> N log_error_and_exit(qualifier) -def ensure_dependency_populated(flag: bool, child: str, qualifier: str) -> None: - """If a flag is set, ensure dependent keys also set.""" - - if flag: - try: - assert "REPLACE_ME" not in child, f"[ERROR]: {qualifier}" - logger.debug(f"[OK]: {qualifier}") - except AssertionError: - log_error_and_exit(qualifier) - else: - logger.debug(f"[SKIP]: {qualifier}") - - # ------------------------------------------------------------------------------- # GROUPING FUNCTIONS # ------------------------------------------------------------------------------- -def verify_only_one_true_set(data: SimpleNamespace): - """Check that related config blocks only have 1 true and * false.""" - only_one_true_set([data.flag_create_new_vpc, data.flag_use_existing_vpc]) - only_one_true_set( - [ - data.flag_create_external_db, - data.flag_use_existing_external_db, - data.flag_use_container_db, - ] - ) - only_one_true_set([data.flag_create_external_redis, data.flag_use_container_redis]) - only_one_true_set( - [ - data.flag_create_load_balancer, - data.flag_use_private_cacert, - data.flag_do_not_use_https, - ] - ) - only_one_true_set( - [data.flag_use_aws_ses_iam_integration, data.flag_use_existing_smtp] - ) def verify_sensitive_keys(data: SimpleNamespace, data_dictionary: dict): @@ -110,67 +69,25 @@ def verify_sensitive_keys(data: SimpleNamespace, data_dictionary: dict): ) -def verify_tfvars_config_dependencies(data: SimpleNamespace): - """Ensure dependent keys are a populated if a flag is active.""" - # VPC Dependency checks - ensure_dependency_populated( - data.flag_use_existing_vpc, - data.vpc_existing_id, - "`vpc_existing_id` value is missing.", - ) - ensure_dependency_populated( - data.flag_create_load_balancer, - data.alb_certificate_arn, - "`alb_certificate_arn` value is missing.", - ) - - # DNS dependency checks - ensure_dependency_populated( - data.flag_create_route53_private_zone, - data.new_route53_private_zone_name, - "`new_route53_private_zone_name` value is missing.", - ) - ensure_dependency_populated( - data.flag_use_existing_route53_public_zone, - data.existing_route53_public_zone_name, - "`existing_route53_public_zone_name` value is missing.", - ) - ensure_dependency_populated( - data.flag_use_existing_route53_private_zone, - data.existing_route53_private_zone_name, - "`existing_route53_private_zone_name` value is missing.", - ) - - def verify_tower_server_url(data: SimpleNamespace): - """Verify the tower server url is correctly configured.""" - - if data.tower_server_url.startswith("http"): - log_error_and_exit("Field `tower_server_url` must not have a prefix.") - + """Warn when tower_server_port is non-default, since the docker-compose template assumes 8000.""" if data.tower_server_port != "8000": logger.warning( "Tower instance not using default port (8000). Ensure Docker-Compose file is updated accordingly." ) -def verify_tower_root_users(data: SimpleNamespace): - """Ensure at least one root user is specified.""" - if data.tower_root_users in ["REPLACE_ME", ""]: +def verify_tower_self_signed_certs(data: SimpleNamespace): + """When private cert mode is on, the bucket prefix must be populated.""" + if ( + data.flag_use_private_cacert + and data.private_cacert_bucket_prefix == "REPLACE_ME" + ): log_error_and_exit( - "Please populate `tower_root_user` with at least one email address." + "When `flag_use_private_cacert = true`, `private_cacert_bucket_prefix` must be set to an s3:// URI." ) -def verify_tower_self_signed_certs(data: SimpleNamespace): - """Check self-signed certificate settings (if necessary).""" - if data.flag_use_private_cacert: - if not data.private_cacert_bucket_prefix.startswith("s3://"): - log_error_and_exit( - " Field `private_cacert_bucket_prefix` must start with `s3://`" - ) - - def verify_docker_daemon_loggin(data: SimpleNamespace): """Check Docker Daemon logging configuration.""" logging_flags = [ @@ -396,39 +313,23 @@ def verify_database_configuration(data: SimpleNamespace): def verify_docker_version(data: SimpleNamespace): - """Make sure MySQL 5.6 is not present""" + """Reject docker-compose templates that pin a MySQL image below 8.x.""" yaml.sort_base_mapping_type_on_output = False + mysql_pin = re.compile(r"mysql:(\d+)") with open("assets/src/docker_compose/docker-compose.yml.tpl") as file: # PYYAML fails with `yaml.scanner.ScannerError` due to Terraform templating. Switching to less elegant alternative. - # dcfile = yaml.safe_load(file) - # image = dcfile['services']['db']['image'] - lines = file.readlines() - - for line in lines: - if "mysql:5.6" in line: + for line in file.readlines(): + match = mysql_pin.search(line) + if match and int(match.group(1)) < 8: log_error_and_exit( - "MySQL 5.6 is obsolete. Please chooses MySQL 5.7 or higher in your docker-compose file." + f"docker-compose template pins MySQL {match.group(1)}.x. master supports only MySQL 8 and above." ) - if "5.6" in data.db_engine_version: - log_error_and_exit( - "MySQL 5.6 is obsolete. Please chooses MySQL 5.7 in `db_engine_version`." - ) - def verify_data_studio(data: SimpleNamespace): """Verify fields related to Data Studio.""" - if data.flag_enable_data_studio: - if data.flag_limit_data_studio_to_some_workspaces: - # https://www.geeksforgeeks.org/python-check-whether-string-contains-only-numbers-or-not/ - # if re.match('[0-9]*$', data.data_studio_eligible_workspaces): - if not re.findall(r"[0-9]+,[0-9]+", data.data_studio_eligible_workspaces): - log_error_and_exit( - "`data_studio_eligible_workspaces may only be populated by digits and commas." - ) - if data.flag_use_private_cacert: logger.warning( "Please see documentation to understand how to make private certs work with Studios images." @@ -465,17 +366,6 @@ def verify_data_studio_ssh(data: SimpleNamespace): "Studios SSH requires connect-proxy >= 0.10.0. Please verify your `data_studio_container_version`." ) - if data.flag_limit_data_studio_ssh_to_some_workspaces: - workspaces = data.data_studio_ssh_eligible_workspaces - try: - workspaces = workspaces.split(",") - for wsp in workspaces: - isinstance(int(wsp), int) - except ValueError: - log_error_and_exit( - "Variable `data_studio_ssh_eligible_workspaces` has non-integer values. Fix before deploying." - ) - def verify_alb_settings(data: SimpleNamespace): """Verify that user does not have contradictory settings in case of ALB vs. no ALB.""" @@ -543,25 +433,6 @@ def verify_insecure_platform(data: SimpleNamespace): log_error_and_exit("Wave-Lite requires a secure Seqera Platform endpoint.") -def verify_pipeline_versioning(data: SimpleNamespace): - """Conduct checks if pipeline versioning is active.""" - if data.tower_enable_pipeline_versioning: - # All workspaces eligible. Return. - if data.pipeline_versioning_eligible_workspaces == "": - return - - # Only some eligible (via comma-delimited string); verify - workspaces = data.pipeline_versioning_eligible_workspaces - try: - workspaces = workspaces.split(",") - for wsp in workspaces: - isinstance(int(wsp), int) - except ValueError: - log_error_and_exit( - "Variable `pipeline_versioning_eligible_workspaces` has non-integer values. Fix before deploying." - ) - - # ------------------------------------------------------------------------------- # MAIN # ------------------------------------------------------------------------------- @@ -574,30 +445,17 @@ def verify_pipeline_versioning(data: SimpleNamespace): data_dictionary = tf_vars_json_payload data = SimpleNamespace(**data_dictionary) - # Check minimum container version. master supports only the latest Platform major (v26.1.x). - # Bug-fix support for v25-and-below lives on the release/v25 branch — see documentation/branching_policy.md. - if not ((data.tower_container_version).startswith("v")) or ( - data.tower_container_version < "v26.1.0" - ): - log_error_and_exit( - "This branch of the installer supports only Seqera Platform v26.1.0+. " - "For v25.x or earlier, check out the release/v25 branch." - ) - # Verify tfvars fields print("\n") logger.info("Verifying TFVARS file") logger.info("-" * 50) - verify_only_one_true_set(data) verify_sensitive_keys(data, data_dictionary) - verify_tfvars_config_dependencies(data) verify_docker_version(data) # Verify Tower application configurations print("\n") logger.info("Verifying Tower configurations") logger.info("-" * 50) - verify_tower_root_users(data) verify_tower_self_signed_certs(data) verify_tower_server_url(data) verify_docker_daemon_loggin(data) @@ -647,12 +505,6 @@ def verify_pipeline_versioning(data: SimpleNamespace): verify_production_deployment(data) verify_insecure_platform(data=data) - # Check pipeline versioning - print("\n") - logger.info("Verifying pipeline versioning") - logger.info("-" * 50) - verify_pipeline_versioning(data) - print("\n") logger.info("Finished tfvars configuration check.") diff --git a/templates/TEMPLATE_terraform.tfvars b/templates/TEMPLATE_terraform.tfvars index 332dd37..209a631 100644 --- a/templates/TEMPLATE_terraform.tfvars +++ b/templates/TEMPLATE_terraform.tfvars @@ -225,8 +225,8 @@ to work. e.g: - mywavelite.example.com */ -flag_use_wave = false -flag_use_wave_lite = false +flag_use_wave = false +flag_use_wave_lite = false # TODO(#332): bump to the wave-lite version paired with the v26.1.x release set. wave_lite_container_version = "v1.29.1" @@ -439,19 +439,19 @@ NOTES: */ # Studios -flag_enable_data_studio = true +flag_enable_data_studio = true # TODO(#332): bump to the data-studio / connect-proxy version paired with the v26.1.x release set. -data_studio_container_version = "0.11.0" -flag_limit_data_studio_to_some_workspaces = false -data_studio_eligible_workspaces = "" +data_studio_container_version = "0.11.0" +flag_limit_data_studio_to_some_workspaces = false +data_studio_eligible_workspaces = "" # Studios SSH flag_enable_data_studio_ssh = false flag_limit_data_studio_ssh_to_some_workspaces = false data_studio_ssh_eligible_workspaces = "" -flag_studio_enable_path_routing = false -data_studio_path_routing_url = "REPLACE_ME_IF_NECESSARY" +flag_studio_enable_path_routing = false +data_studio_path_routing_url = "REPLACE_ME_IF_NECESSARY" @@ -500,7 +500,7 @@ data_studio_options = { status = "deprecated" container = "public.cr.seqera.io/platform/data-studio-xpra:6.2.0-r2-1-0.9.0" }, - vscode-1-101-2-0-11-0 = { + vscode-1-101-2-0-11-0 = { qualifier = "VSCODE-1-101-2-0-11-0" icon = "vscode" tool = "vscode" @@ -555,7 +555,7 @@ db_database_name = "tower" ## ------------------------------------------------------------------------------------ This section added to handle new connection string requirements for Tower v24.1.0+ */ -db_container_engine = "mysql" +db_container_engine = "mysql" # TODO(#332): confirm the container DB engine version paired with the v26.1.x release set. db_container_engine_version = "8.0" @@ -580,7 +580,7 @@ WARNING: - You must supply your own backup solution. */ -db_engine = "mysql" +db_engine = "mysql" # TODO(#332): confirm the RDS engine version & matching param group paired with the v26.1.x release set. db_engine_version = "8.0" db_param_group = "mysql8.0" @@ -594,7 +594,7 @@ db_backup_retention_period = 7 db_enable_storage_encrypted = true -wave_lite_db_engine = "postgres" +wave_lite_db_engine = "postgres" # TODO(#332): confirm the wave-lite Postgres engine version paired with the v26.1.x release set. wave_lite_db_engine_version = "17.5" wave_lite_db_param_group = "postgres17" diff --git a/tests/datafiles/012_testing_outputs.tf b/tests/datafiles/012_testing_outputs.tf index a36d596..5d7a406 100644 --- a/tests/datafiles/012_testing_outputs.tf +++ b/tests/datafiles/012_testing_outputs.tf @@ -2,6 +2,10 @@ ## Write local values to output for testing purposes ## Keep name of local the same; prefix with 'local_' for easy conversion ## ------------------------------------------------------------------------------------ -output local_wave_enabled { - value = local.wave_enabled -} +terraform { + required_version = ">= 1.7.0" +} + +output "local_wave_enabled" { + value = local.wave_enabled +} diff --git a/tests/datafiles/generate_core_data.sh b/tests/datafiles/generate_core_data.sh index 0979a2a..d71130b 100755 --- a/tests/datafiles/generate_core_data.sh +++ b/tests/datafiles/generate_core_data.sh @@ -44,8 +44,10 @@ aws_account = "128997144437" aws_region = "us-east-1" aws_profile = "development" -# TODO(#332): bump to the v26.1.x GA tag; baselines in tests/datafiles/expected_results/ will need to be regenerated. -tower_container_version = "v25.3.0" +# TODO(#332): replace the v26.1.0 placeholder below with the v26.1.x GA tag once known. +# Baselines in tests/datafiles/expected_results/ will need to be regenerated if the GA tag +# affects rendered template output. +tower_container_version = "v26.1.0" ## ------------------------------------------------------------------------------------ @@ -330,7 +332,15 @@ cat << 'EOF' > 012_testing_outputs.tf ## Write local values to output for testing purposes ## Keep name of local the same; prefix with 'local_' for easy conversion ## ------------------------------------------------------------------------------------ +terraform { + required_version = ">= 1.7.0" +} + output local_wave_enabled { - value = local.wave_enabled -} + value = local.wave_enabled +} EOF + +# Format generated fixtures so the repo-wide fmt-check stays green. Soft-fail when +# terraform is not on PATH (some dev shells); CI always has it. +terraform fmt 012_testing_outputs.tf terraform.tfvars base-overrides.auto.tfvars >/dev/null 2>&1 || true diff --git a/tests/terraform/README.md b/tests/terraform/README.md new file mode 100644 index 0000000..a4bb51c --- /dev/null +++ b/tests/terraform/README.md @@ -0,0 +1,47 @@ +# `terraform test` suite + +Native Terraform tests (`.tftest.hcl`) covering variable validations defined in +`variables.tf`. Complements the pytest suite under `tests/unit/` and `tests/integration/`, +which still owns rendered-template-content assertions and Python validator coverage. + +## What's here today + +| File | Asserts | +| --- | --- | +| `version_validation.tftest.hcl` | `tower_container_version` floor (v26.1.0+) and tag-shape regex. | +| `string_shape_validation.tftest.hcl` | `tower_server_url`, `tower_root_users`, `alb_certificate_arn`, `private_cacert_bucket_prefix`, MySQL 8.x floor on both DB engine version variables. | +| `workspace_id_validation.tftest.hcl` | Comma-separated-integer rule on `data_studio_eligible_workspaces`, `data_studio_ssh_eligible_workspaces`, `pipeline_versioning_eligible_workspaces`. | + +Each test uses `command = plan` and `expect_failures = [var.foo]` to assert a specific +variable validation trips. Other variables are sourced from a baseline tfvars file. + +## How to run + +`terraform test` requires a complete tfvars baseline so all required variables resolve. +The same fixture used by pytest works here: + +```sh +make generate_test_data # produces tests/datafiles/terraform.tfvars +make terraform_test # runs `terraform test` against tests/terraform/ +``` + +Or directly: + +```sh +terraform init +terraform test \ + -test-directory=tests/terraform \ + -var-file=tests/datafiles/terraform.tfvars +``` + +## What does NOT live here (yet) + +- **Plan-shape assertions** beyond variable validation (e.g. "this combination of flags + produces N security groups"). Those will land as a follow-up under #334 once the shape + of resource-level assertions is settled. +- **Rendered-template-content tests** (e.g. asserting a particular line appears in the + rendered `tower.yml`). Stays in pytest — `terraform test` is awkward for string content. +- **CI**. A workflow that runs both `terraform test` and `pytest` on every PR is a + separate slice under #334. + +Refs #334. diff --git a/tests/terraform/cross_variable_validation.tftest.hcl b/tests/terraform/cross_variable_validation.tftest.hcl new file mode 100644 index 0000000..92a755f --- /dev/null +++ b/tests/terraform/cross_variable_validation.tftest.hcl @@ -0,0 +1,173 @@ +## Cross-variable validation rules: only-one-of-N flag groups and +## conditional-dependency requirements. + +mock_provider "aws" {} + +# Each SSM payload is jsondecode'd by the root locals, which then look up specific +# keys (and an inner `ssm_key` field) — so the stub JSON has to mirror that shape. + +override_data { + target = data.aws_ssm_parameter.tower_secrets + values = { + value = "{\"TOWER_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_USER\"},\"TOWER_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_PASSWORD\"},\"TOWER_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_USER\"},\"TOWER_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.seqerakit_secrets + values = { + value = "{\"TOWER_AWS_ROLE\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_AWS_ROLE\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.groundswell_secrets + values = { + value = "{\"SWELL_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_USER\"},\"SWELL_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.wave_lite_secrets + values = { + value = "{\"WAVE_LITE_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_USER\"},\"WAVE_LITE_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_PASSWORD\"},\"WAVE_LITE_DB_LIMITED_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_USER\"},\"WAVE_LITE_DB_LIMITED_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_PASSWORD\"},\"WAVE_LITE_REDIS_AUTH\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_REDIS_AUTH\"}}" + } +} + +override_data { + target = module.connection_strings.data.external.generate_db_connection_string + values = { + result = { + status = "0" + value = "?permitMysqlScheme=true" + } + } +} + +run "rejects_two_vpc_sources_true" { + command = plan + + variables { + flag_create_new_vpc = true + flag_use_existing_vpc = true + } + + expect_failures = [var.flag_create_new_vpc] +} + +run "rejects_zero_vpc_sources_true" { + command = plan + + variables { + flag_create_new_vpc = false + flag_use_existing_vpc = false + } + + expect_failures = [var.flag_create_new_vpc] +} + +run "rejects_two_db_sources_true" { + command = plan + + variables { + flag_create_external_db = true + flag_use_existing_external_db = true + flag_use_container_db = false + } + + expect_failures = [var.flag_create_external_db] +} + +run "rejects_two_redis_sources_true" { + command = plan + + variables { + flag_create_external_redis = true + flag_use_container_redis = true + } + + expect_failures = [var.flag_create_external_redis] +} + +run "rejects_two_endpoint_modes_true" { + command = plan + + variables { + flag_create_load_balancer = true + flag_use_private_cacert = true + flag_do_not_use_https = false + } + + expect_failures = [var.flag_create_load_balancer] +} + +run "rejects_both_smtp_modes_true" { + command = plan + + variables { + flag_use_aws_ses_iam_integration = true + flag_use_existing_smtp = true + } + + expect_failures = [var.flag_use_aws_ses_iam_integration] +} + +# ----- conditional-dependency rules --------------------------------------------------- + +run "rejects_existing_vpc_without_id" { + command = plan + + variables { + flag_use_existing_vpc = true + flag_create_new_vpc = false + vpc_existing_id = "REPLACE_ME" + } + + expect_failures = [var.vpc_existing_id] +} + +run "rejects_alb_creation_without_cert_arn" { + command = plan + + variables { + flag_create_load_balancer = true + flag_use_private_cacert = false + flag_do_not_use_https = false + alb_certificate_arn = "REPLACE_ME" + } + + expect_failures = [var.alb_certificate_arn] +} + +run "rejects_route53_private_zone_without_name" { + command = plan + + variables { + flag_create_route53_private_zone = true + new_route53_private_zone_name = "REPLACE_ME" + } + + expect_failures = [var.new_route53_private_zone_name] +} + +run "rejects_existing_public_zone_without_name" { + command = plan + + variables { + flag_use_existing_route53_public_zone = true + existing_route53_public_zone_name = "REPLACE_ME" + } + + expect_failures = [var.existing_route53_public_zone_name] +} + +run "rejects_existing_private_zone_without_name" { + command = plan + + variables { + flag_use_existing_route53_private_zone = true + existing_route53_private_zone_name = "REPLACE_ME" + } + + expect_failures = [var.existing_route53_private_zone_name] +} diff --git a/tests/terraform/string_shape_validation.tftest.hcl b/tests/terraform/string_shape_validation.tftest.hcl new file mode 100644 index 0000000..1817b84 --- /dev/null +++ b/tests/terraform/string_shape_validation.tftest.hcl @@ -0,0 +1,134 @@ +## String-shape variable validations: URLs, ARNs, S3 prefix, MySQL 8.x floor. + +mock_provider "aws" {} + +# Each SSM payload is jsondecode'd by the root locals, which then look up specific +# keys (and an inner `ssm_key` field) — so the stub JSON has to mirror that shape. + +override_data { + target = data.aws_ssm_parameter.tower_secrets + values = { + value = "{\"TOWER_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_USER\"},\"TOWER_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_PASSWORD\"},\"TOWER_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_USER\"},\"TOWER_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.seqerakit_secrets + values = { + value = "{\"TOWER_AWS_ROLE\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_AWS_ROLE\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.groundswell_secrets + values = { + value = "{\"SWELL_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_USER\"},\"SWELL_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.wave_lite_secrets + values = { + value = "{\"WAVE_LITE_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_USER\"},\"WAVE_LITE_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_PASSWORD\"},\"WAVE_LITE_DB_LIMITED_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_USER\"},\"WAVE_LITE_DB_LIMITED_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_PASSWORD\"},\"WAVE_LITE_REDIS_AUTH\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_REDIS_AUTH\"}}" + } +} + +override_data { + target = module.connection_strings.data.external.generate_db_connection_string + values = { + result = { + status = "0" + value = "?permitMysqlScheme=true" + } + } +} + +run "rejects_http_prefix_on_tower_server_url" { + command = plan + + variables { + tower_server_url = "http://tower.example.com" + } + + expect_failures = [var.tower_server_url] +} + +run "rejects_https_prefix_on_tower_server_url" { + command = plan + + variables { + tower_server_url = "https://tower.example.com" + } + + expect_failures = [var.tower_server_url] +} + +run "rejects_unset_tower_root_users" { + command = plan + + variables { + tower_root_users = "REPLACE_ME" + } + + expect_failures = [var.tower_root_users] +} + +run "rejects_empty_tower_root_users" { + command = plan + + variables { + tower_root_users = " " + } + + expect_failures = [var.tower_root_users] +} + +run "rejects_malformed_alb_certificate_arn" { + command = plan + + variables { + alb_certificate_arn = "not-an-arn" + } + + expect_failures = [var.alb_certificate_arn] +} + +run "rejects_non_s3_private_cacert_bucket_prefix" { + command = plan + + variables { + private_cacert_bucket_prefix = "https://my-bucket.s3.amazonaws.com" + } + + expect_failures = [var.private_cacert_bucket_prefix] +} + +run "rejects_mysql_5_7_db_engine_version" { + command = plan + + variables { + db_engine_version = "5.7" + } + + expect_failures = [var.db_engine_version] +} + +run "rejects_mysql_5_7_db_container_engine_version" { + command = plan + + variables { + db_container_engine_version = "5.7" + } + + expect_failures = [var.db_container_engine_version] +} + +run "rejects_mysql_5_6_db_engine_version" { + command = plan + + variables { + db_engine_version = "5.6" + } + + expect_failures = [var.db_engine_version] +} diff --git a/tests/terraform/version_validation.tftest.hcl b/tests/terraform/version_validation.tftest.hcl new file mode 100644 index 0000000..b9b9c6a --- /dev/null +++ b/tests/terraform/version_validation.tftest.hcl @@ -0,0 +1,97 @@ +## tower_container_version validation rules: floor + tag-shape. +## See tests/terraform/README.md for how to run. + +mock_provider "aws" {} + +# Each SSM payload is jsondecode'd by the root locals, which then look up specific +# keys (and an inner `ssm_key` field) — so the stub JSON has to mirror that shape. + +override_data { + target = data.aws_ssm_parameter.tower_secrets + values = { + value = "{\"TOWER_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_USER\"},\"TOWER_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_PASSWORD\"},\"TOWER_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_USER\"},\"TOWER_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.seqerakit_secrets + values = { + value = "{\"TOWER_AWS_ROLE\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_AWS_ROLE\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.groundswell_secrets + values = { + value = "{\"SWELL_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_USER\"},\"SWELL_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.wave_lite_secrets + values = { + value = "{\"WAVE_LITE_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_USER\"},\"WAVE_LITE_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_PASSWORD\"},\"WAVE_LITE_DB_LIMITED_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_USER\"},\"WAVE_LITE_DB_LIMITED_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_PASSWORD\"},\"WAVE_LITE_REDIS_AUTH\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_REDIS_AUTH\"}}" + } +} + +override_data { + target = module.connection_strings.data.external.generate_db_connection_string + values = { + result = { + status = "0" + value = "?permitMysqlScheme=true" + } + } +} + +run "rejects_v25_below_floor" { + command = plan + + variables { + tower_container_version = "v25.3.0" + } + + expect_failures = [var.tower_container_version] +} + +run "rejects_v23_below_floor" { + command = plan + + variables { + tower_container_version = "v23.4.5" + } + + expect_failures = [var.tower_container_version] +} + +run "rejects_missing_v_prefix" { + command = plan + + variables { + tower_container_version = "26.1.0" + } + + expect_failures = [var.tower_container_version] +} + +run "rejects_non_semver_tag" { + command = plan + + variables { + tower_container_version = "vlatest" + } + + expect_failures = [var.tower_container_version] +} + +run "accepts_v26_1_0" { + command = plan + + variables { + tower_container_version = "v26.1.0" + } + + # No expect_failures — plan is expected to proceed past variable validation. + # Other validations / cross-variable Python checks may still fail downstream; + # this test only asserts the floor passes. +} diff --git a/tests/terraform/workspace_id_validation.tftest.hcl b/tests/terraform/workspace_id_validation.tftest.hcl new file mode 100644 index 0000000..b2d1bbf --- /dev/null +++ b/tests/terraform/workspace_id_validation.tftest.hcl @@ -0,0 +1,108 @@ +## Comma-separated-integer workspace-ID list variables. + +mock_provider "aws" {} + +# Each SSM payload is jsondecode'd by the root locals, which then look up specific +# keys (and an inner `ssm_key` field) — so the stub JSON has to mirror that shape. + +override_data { + target = data.aws_ssm_parameter.tower_secrets + values = { + value = "{\"TOWER_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_USER\"},\"TOWER_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_PASSWORD\"},\"TOWER_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_USER\"},\"TOWER_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_DB_MASTER_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.seqerakit_secrets + values = { + value = "{\"TOWER_AWS_ROLE\":{\"value\":\"stub\",\"ssm_key\":\"/stub/TOWER_AWS_ROLE\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.groundswell_secrets + values = { + value = "{\"SWELL_DB_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_USER\"},\"SWELL_DB_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/SWELL_DB_PASSWORD\"}}" + } +} + +override_data { + target = data.aws_ssm_parameter.wave_lite_secrets + values = { + value = "{\"WAVE_LITE_DB_MASTER_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_USER\"},\"WAVE_LITE_DB_MASTER_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_MASTER_PASSWORD\"},\"WAVE_LITE_DB_LIMITED_USER\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_USER\"},\"WAVE_LITE_DB_LIMITED_PASSWORD\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_DB_LIMITED_PASSWORD\"},\"WAVE_LITE_REDIS_AUTH\":{\"value\":\"stub\",\"ssm_key\":\"/stub/WAVE_LITE_REDIS_AUTH\"}}" + } +} + +override_data { + target = module.connection_strings.data.external.generate_db_connection_string + values = { + result = { + status = "0" + value = "?permitMysqlScheme=true" + } + } +} + +run "rejects_non_numeric_data_studio_eligible_workspaces" { + command = plan + + variables { + data_studio_eligible_workspaces = "abc,def" + } + + expect_failures = [var.data_studio_eligible_workspaces] +} + +run "rejects_trailing_comma_data_studio_eligible_workspaces" { + command = plan + + variables { + data_studio_eligible_workspaces = "123,456," + } + + expect_failures = [var.data_studio_eligible_workspaces] +} + +run "accepts_empty_data_studio_eligible_workspaces" { + command = plan + + variables { + data_studio_eligible_workspaces = "" + } +} + +run "accepts_single_id_data_studio_eligible_workspaces" { + command = plan + + variables { + data_studio_eligible_workspaces = "123" + } +} + +run "accepts_multiple_ids_data_studio_eligible_workspaces" { + command = plan + + variables { + data_studio_eligible_workspaces = "123,456,789" + } +} + +run "rejects_space_separated_data_studio_ssh_eligible_workspaces" { + command = plan + + variables { + data_studio_ssh_eligible_workspaces = "123 456" + } + + expect_failures = [var.data_studio_ssh_eligible_workspaces] +} + +run "rejects_non_numeric_pipeline_versioning_eligible_workspaces" { + command = plan + + variables { + pipeline_versioning_eligible_workspaces = "1,two,3" + } + + expect_failures = [var.pipeline_versioning_eligible_workspaces] +} diff --git a/variables.tf b/variables.tf index f130d60..5dc25bd 100644 --- a/variables.tf +++ b/variables.tf @@ -43,6 +43,24 @@ variable "tower_container_version" { type = string description = "Seqera Platform container version. master supports only v26.1.0+ — earlier majors live on the release/vN branches. See documentation/branching_policy.md." # TODO(#332): once v26.1.x GA is selected, document the exact pinned tag here for reference. + + validation { + condition = can(regex("^v[0-9]+\\.[0-9]+\\.[0-9]+", var.tower_container_version)) + error_message = "tower_container_version must be a Seqera Platform tag like \"v26.1.0\"." + } + + validation { + # The leading !can(...) short-circuits when the tag-shape validation above is already + # failing, so a malformed input produces the shape error rather than a regex crash. + condition = ( + !can(regex("^v[0-9]+\\.[0-9]+\\.[0-9]+", var.tower_container_version)) + || ( + tonumber(regex("^v([0-9]+)\\.", var.tower_container_version)[0]) == 26 + && tonumber(regex("^v[0-9]+\\.([0-9]+)\\.", var.tower_container_version)[0]) >= 1 + ) + ) + error_message = "This branch of the installer supports Seqera Platform v26.1.0 through v26.x. For v25.x or earlier, check out the release/v25 branch." + } } @@ -76,21 +94,55 @@ variable "custom_resource_naming_prefix" { type = string } # Flags - Infrastructure # ------------------------------------------------------------------------------------ -variable "flag_create_new_vpc" { type = bool } +# Cross-variable "exactly one of N flags must be true" rules are attached to the +# first flag in each group, so the validation fires regardless of which flag the +# user toggled. + +variable "flag_create_new_vpc" { + type = bool + validation { + condition = length([for f in [var.flag_create_new_vpc, var.flag_use_existing_vpc] : f if f]) == 1 + error_message = "Exactly one of flag_create_new_vpc / flag_use_existing_vpc must be true." + } +} variable "flag_use_existing_vpc" { type = bool } -variable "flag_create_external_db" { type = bool } +variable "flag_create_external_db" { + type = bool + validation { + condition = length([for f in [var.flag_create_external_db, var.flag_use_existing_external_db, var.flag_use_container_db] : f if f]) == 1 + error_message = "Exactly one of flag_create_external_db / flag_use_existing_external_db / flag_use_container_db must be true." + } +} variable "flag_use_existing_external_db" { type = bool } variable "flag_use_container_db" { type = bool } -variable "flag_create_external_redis" { type = bool } # TO DO +variable "flag_create_external_redis" { + type = bool # TO DO + validation { + condition = length([for f in [var.flag_create_external_redis, var.flag_use_container_redis] : f if f]) == 1 + error_message = "Exactly one of flag_create_external_redis / flag_use_container_redis must be true." + } +} variable "flag_use_container_redis" { type = bool } -variable "flag_create_load_balancer" { type = bool } +variable "flag_create_load_balancer" { + type = bool + validation { + condition = length([for f in [var.flag_create_load_balancer, var.flag_use_private_cacert, var.flag_do_not_use_https] : f if f]) == 1 + error_message = "Exactly one of flag_create_load_balancer / flag_use_private_cacert / flag_do_not_use_https must be true." + } +} variable "flag_use_private_cacert" { type = bool } variable "flag_do_not_use_https" { type = bool } -variable "flag_use_aws_ses_iam_integration" { type = bool } +variable "flag_use_aws_ses_iam_integration" { + type = bool + validation { + condition = length([for f in [var.flag_use_aws_ses_iam_integration, var.flag_use_existing_smtp] : f if f]) == 1 + error_message = "Exactly one of flag_use_aws_ses_iam_integration / flag_use_existing_smtp must be true." + } +} variable "flag_use_existing_smtp" { type = bool } @@ -127,17 +179,42 @@ variable "flag_use_existing_route53_public_zone" { type = bool } variable "flag_use_existing_route53_private_zone" { type = bool } variable "flag_create_hosts_file_entry" { type = bool } -variable "new_route53_private_zone_name" { type = string } +variable "new_route53_private_zone_name" { + type = string + validation { + condition = !var.flag_create_route53_private_zone || (var.new_route53_private_zone_name != "REPLACE_ME" && length(trimspace(var.new_route53_private_zone_name)) > 0) + error_message = "When flag_create_route53_private_zone = true, new_route53_private_zone_name must be set." + } +} + +variable "existing_route53_public_zone_name" { + type = string + validation { + condition = !var.flag_use_existing_route53_public_zone || (var.existing_route53_public_zone_name != "REPLACE_ME" && length(trimspace(var.existing_route53_public_zone_name)) > 0) + error_message = "When flag_use_existing_route53_public_zone = true, existing_route53_public_zone_name must be set." + } +} -variable "existing_route53_public_zone_name" { type = string } -variable "existing_route53_private_zone_name" { type = string } +variable "existing_route53_private_zone_name" { + type = string + validation { + condition = !var.flag_use_existing_route53_private_zone || (var.existing_route53_private_zone_name != "REPLACE_ME" && length(trimspace(var.existing_route53_private_zone_name)) > 0) + error_message = "When flag_use_existing_route53_private_zone = true, existing_route53_private_zone_name must be set." + } +} # ------------------------------------------------------------------------------------ # Custom Private CA # ------------------------------------------------------------------------------------ -variable "private_cacert_bucket_prefix" { type = string } +variable "private_cacert_bucket_prefix" { + type = string + validation { + condition = var.private_cacert_bucket_prefix == "REPLACE_ME" || startswith(var.private_cacert_bucket_prefix, "s3://") + error_message = "private_cacert_bucket_prefix must start with \"s3://\" (or be \"REPLACE_ME\" if private certs are not in use)." + } +} # ------------------------------------------------------------------------------------ @@ -168,7 +245,13 @@ variable "flag_map_public_ip_on_launch" { # VPC (Existing) # ------------------------------------------------------------------------------------ -variable "vpc_existing_id" { type = string } +variable "vpc_existing_id" { + type = string + validation { + condition = !var.flag_use_existing_vpc || (var.vpc_existing_id != "REPLACE_ME" && length(trimspace(var.vpc_existing_id)) > 0) + error_message = "When flag_use_existing_vpc = true, vpc_existing_id must be set." + } +} variable "vpc_existing_ec2_subnets" { type = list(string) } variable "vpc_existing_batch_subnets" { type = list(string) } variable "vpc_existing_db_subnets" { type = list(string) } @@ -230,11 +313,25 @@ variable "data_explorer_disabled_workspaces" { type = string } variable "flag_enable_data_studio" { type = bool } variable "data_studio_container_version" { type = string } variable "flag_limit_data_studio_to_some_workspaces" { type = bool } -variable "data_studio_eligible_workspaces" { type = string } +variable "data_studio_eligible_workspaces" { + type = string + description = "Comma-separated list of numeric workspace IDs eligible for Studios. Empty string allowed when not limiting." + validation { + condition = var.data_studio_eligible_workspaces == "" || can(regex("^[0-9]+(,[0-9]+)*$", var.data_studio_eligible_workspaces)) + error_message = "data_studio_eligible_workspaces must be a comma-separated list of integers (e.g. \"123,456\") or an empty string." + } +} variable "flag_enable_data_studio_ssh" { type = bool } variable "flag_limit_data_studio_ssh_to_some_workspaces" { type = bool } -variable "data_studio_ssh_eligible_workspaces" { type = string } +variable "data_studio_ssh_eligible_workspaces" { + type = string + description = "Comma-separated list of numeric workspace IDs eligible for Studios SSH. Empty string allowed when not limiting." + validation { + condition = var.data_studio_ssh_eligible_workspaces == "" || can(regex("^[0-9]+(,[0-9]+)*$", var.data_studio_ssh_eligible_workspaces)) + error_message = "data_studio_ssh_eligible_workspaces must be a comma-separated list of integers (e.g. \"123,456\") or an empty string." + } +} variable "flag_studio_enable_path_routing" { type = bool } variable "data_studio_path_routing_url" { @@ -265,7 +362,13 @@ variable "db_database_name" { type = string } # ------------------------------------------------------------------------------------ variable "db_container_engine" { type = string } -variable "db_container_engine_version" { type = string } +variable "db_container_engine_version" { + type = string + validation { + condition = startswith(var.db_container_engine_version, "8.") + error_message = "db_container_engine_version must be MySQL 8.x. master supports only MySQL 8 and above." + } +} # ------------------------------------------------------------------------------------ @@ -273,7 +376,13 @@ variable "db_container_engine_version" { type = string } # ------------------------------------------------------------------------------------ variable "db_engine" { type = string } -variable "db_engine_version" { type = string } +variable "db_engine_version" { + type = string + validation { + condition = startswith(var.db_engine_version, "8.") + error_message = "db_engine_version must be MySQL 8.x. master supports only MySQL 8 and above." + } +} variable "db_param_group" { type = string } variable "db_instance_class" { type = string } variable "db_allocated_storage" { type = number } @@ -365,15 +474,32 @@ variable "ec2_update_ami_if_available" { type = bool } # ALB # ------------------------------------------------------------------------------------ -variable "alb_certificate_arn" { type = string } +variable "alb_certificate_arn" { + type = string + + validation { + condition = var.alb_certificate_arn == "REPLACE_ME" || startswith(var.alb_certificate_arn, "arn:aws:acm:") || startswith(var.alb_certificate_arn, "arn:aws-us-gov:acm:") + error_message = "alb_certificate_arn must be a full ACM certificate ARN (or \"REPLACE_ME\" if no ALB is being created)." + } + + validation { + condition = !var.flag_create_load_balancer || var.alb_certificate_arn != "REPLACE_ME" + error_message = "When flag_create_load_balancer = true, alb_certificate_arn must be set to a real ACM ARN." + } +} # ------------------------------------------------------------------------------------ # TOWER CONFIGURATION # ------------------------------------------------------------------------------------ -variable "tower_server_url" { type = string } -variable "tower_server_port" { type = string } # TODO: Update SG-generation logic to use this value +variable "tower_server_url" { + type = string + validation { + condition = !startswith(var.tower_server_url, "http://") && !startswith(var.tower_server_url, "https://") + error_message = "tower_server_url must not include a scheme prefix (no \"http://\" or \"https://\")." + } +} variable "tower_contact_email" { type = string } variable "tower_enable_platforms" { type = string } @@ -391,7 +517,14 @@ variable "tower_smtp_starttls_enable" { type = bool } variable "tower_smtp_starttls_required" { type = bool } variable "tower_smtp_ssl_protocols" { type = string } -variable "tower_root_users" { type = string } +variable "tower_root_users" { + type = string + description = "Comma-separated list of email addresses to be granted root-user privileges in Seqera Platform." + validation { + condition = length(trimspace(var.tower_root_users)) > 0 && var.tower_root_users != "REPLACE_ME" + error_message = "tower_root_users must contain at least one email address." + } +} variable "tower_email_trusted_orgs" { type = string } variable "tower_email_trusted_users" { type = string } @@ -400,7 +533,14 @@ variable "tower_audit_retention_days" { type = number } variable "tower_enable_openapi" { type = bool } variable "tower_enable_pipeline_versioning" { type = bool } -variable "pipeline_versioning_eligible_workspaces" { type = string } +variable "pipeline_versioning_eligible_workspaces" { + type = string + description = "Comma-separated list of numeric workspace IDs eligible for pipeline versioning. Empty string allowed when not limiting." + validation { + condition = var.pipeline_versioning_eligible_workspaces == "" || can(regex("^[0-9]+(,[0-9]+)*$", var.pipeline_versioning_eligible_workspaces)) + error_message = "pipeline_versioning_eligible_workspaces must be a comma-separated list of integers (e.g. \"123,456\") or an empty string." + } +} # ------------------------------------------------------------------------------------ # TOWER CONFIGURATION - OIDC