diff --git a/.githooks/data_external/example_import_tfvars_via_extractor.py b/.githooks/data_external/example_import_tfvars_via_extractor.py deleted file mode 100644 index 84874bf6..00000000 --- a/.githooks/data_external/example_import_tfvars_via_extractor.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 -import os -import json -import sys -from types import SimpleNamespace -from typing import List - -sys.dont_write_bytecode = True - -# NOTE! This part is a bit convoluted but there's a method to the madness: -# - Script executes from `/~/cx-field-tools-installer`. -# - Need to change dir into .githooks (to avoid the `.` import problem), import, then return to project root so the extractor can find tfvars. -# - Error this is solving: `ImportError: attempted relative import with no known parent package`` -project_root = os.getcwd() -os.chdir(f"{project_root}/.githooks") -sys.path.append(".") -from utils.extractors import get_tfvars_as_json #convert_tfvars_to_dictionary - -# Extract tfvars just like we do with the Python validation script -os.chdir(project_root) -data_dictionary = get_tfvars_as_json() -data = SimpleNamespace(**data_dictionary) - -# Connection string modifiers -mysql8_connstring = "allowPublicKeyRetrieval=true&useSSL=false" -v24plus_connstring = "permitMysqlScheme=true" - - -def return_tf_payload(status: str, value: str): - payload = {'status': status, 'value': value} - print(json.dumps(payload)) - - -def generate_connection_string(mysql8: str, v24plus: str): - connection_string = "" - add_mysql8 = False - add_v24plus = False - - if mysql8.startswith("8."): - add_mysql8 = True - - if v24plus >= "v24": - add_v24plus = True - - if add_mysql8 and add_v24plus: - connection_string = f"?{mysql8_connstring}&{v24plus_connstring}" - elif add_mysql8 and not add_v24plus: - connection_string = f"?{mysql8_connstring}" - elif not add_mysql8 and add_v24plus: - connection_string = f"?{v24plus_connstring}" - - return connection_string - - -if data.flag_use_container_db: - connection_string = generate_connection_string(data.db_container_engine_version, data.tower_container_version) -else: - connection_string = generate_connection_string(data.db_engine_version, data.tower_container_version) - -return_tf_payload("0", connection_string) -exit(0) \ No newline at end of file diff --git a/.githooks/data_external/generate_db_connection_string.py b/.githooks/data_external/generate_db_connection_string.py index 882e528e..00a8646c 100644 --- a/.githooks/data_external/generate_db_connection_string.py +++ b/.githooks/data_external/generate_db_connection_string.py @@ -3,13 +3,35 @@ import json import sys from types import SimpleNamespace -from typing import List sys.dont_write_bytecode = True + +## ------------------------------------------------------------------------------------ +## Extract TFvars and get query values +## ------------------------------------------------------------------------------------ +# NOTE! This part is a bit convoluted but there's a method to the madness: +# - Script executes from `~/cx-field-tools-installer`. +# - Change into .githooks (to avoid the `.` import problem), import, return to project root to extract tfvars. +# - Error thrown without workaround: `ImportError: attempted relative import with no known parent package` +# - Query object to receive objects via TF data call (i.e. resources) +project_root = os.getcwd() +os.chdir(f"{project_root}/.githooks") +sys.path.append(".") + +from utils.extractors import get_tfvars_as_json #convert_tfvars_to_dictionary +from utils.logger import external_logger +from utils.common_data_external_functions import getDVal, return_tf_payload + +os.chdir(project_root) +data_dictionary = get_tfvars_as_json() +data = SimpleNamespace(**data_dictionary) + # Much simpler way to get variable passed in (via Terraform sending to stdin) query = json.load(sys.stdin) -data = SimpleNamespace(**query) +# query = SimpleNamespace(**query) +## ------------------------------------------------------------------------------------ + # Get engine_version depending on whether container DB or RDS instance is in play engine_version = data.db_container_engine_version if data.flag_use_container_db else data.db_engine_version @@ -19,21 +41,11 @@ v24plus_connstring = "permitMysqlScheme=true" -def return_tf_payload(status: str, value: str): - payload = {'status': status, 'value': value} - print(json.dumps(payload)) - - def generate_connection_string(mysql8: str, v24plus: str): connection_string = "" - add_mysql8 = False - add_v24plus = False - if mysql8.startswith("8."): - add_mysql8 = True - - if v24plus >= "v24": - add_v24plus = True + add_mysql8 = True if mysql8.startswith("8.") else False + add_v24plus = True if v24plus >= "v24" else False if add_mysql8 and add_v24plus: connection_string = f"?{mysql8_connstring}&{v24plus_connstring}" @@ -42,11 +54,13 @@ def generate_connection_string(mysql8: str, v24plus: str): elif not add_mysql8 and add_v24plus: connection_string = f"?{v24plus_connstring}" - return connection_string + values = { + "connection_string": connection_string + } + + return values if __name__ == '__main__': - connection_string = generate_connection_string(engine_version, data.tower_container_version) - return_tf_payload("0", connection_string) - - exit(0) \ No newline at end of file + values = generate_connection_string(engine_version, data.tower_container_version) + return_tf_payload("0", values) \ No newline at end of file diff --git a/.githooks/data_external/generate_dns_values.py b/.githooks/data_external/generate_dns_values.py new file mode 100644 index 00000000..9dc2b1c8 --- /dev/null +++ b/.githooks/data_external/generate_dns_values.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +import os +import json +import sys +from types import SimpleNamespace + +sys.dont_write_bytecode = True + + +## ------------------------------------------------------------------------------------ +## Extract TFvars and get query values +## ------------------------------------------------------------------------------------ +# NOTE! This part is a bit convoluted but there's a method to the madness: +# - Script executes from `~/cx-field-tools-installer`. +# - Change into .githooks (to avoid the `.` import problem), import, return to project root to extract tfvars. +# - Error thrown without workaround: `ImportError: attempted relative import with no known parent package` +# - Query object to receive objects via TF data call (i.e. resources) +project_root = os.getcwd() +os.chdir(f"{project_root}/.githooks") +sys.path.append(".") + +from utils.extractors import get_tfvars_as_json #convert_tfvars_to_dictionary +from utils.logger import external_logger +from utils.common_data_external_functions import getDVal, return_tf_payload + +os.chdir(project_root) +data_dictionary = get_tfvars_as_json() +data = SimpleNamespace(**data_dictionary) + +# Much simpler way to get variable passed in (via Terraform sending to stdin) +query = json.load(sys.stdin) +# query = SimpleNamespace(**query) +## ------------------------------------------------------------------------------------ + + +# Vars +dns_zone_mappings = { + "flag_create_route53_private_zone": ("zone_private_new", [0, 'id']), + "flag_use_existing_route53_private_zone": ("zone_private_existing", [0, 'id']), + "flag_use_existing_route53_public_zone": ("zone_public_existing", [0, 'id']), +} + +# Transformed dictionary nested into flat dicionary so value can be passed around. +# I don't love this approach but can't find a cleaner/simpler way right now. +dns_instance_ip_mappings = { + "flag_make_instance_private": ("tower_host_instance", ['private_ip']), + "flag_make_instance_private_behind_public_alb": ("tower_host_instance", ['private_ip']), + "flag_private_tower_without_eice": ("tower_host_instance", ['private_ip']), + "flag_make_instance_public": ("tower_host_eip", [0, 'public_ip']) +} + + +def populate_values(query): + + external_logger.debug(f"Query is: {query}") + + # Baseline variables + dns_zone_id = "" + dns_instance_ip = "" + + for k,v in dns_zone_mappings.items(): + external_logger.debug(f"k is : {k}; and v is: {v}") + tf_obj, dpath = v + tf_obj_json = json.loads(query[tf_obj]) + dns_zone_id = getDVal(tf_obj_json, dpath) if data_dictionary[k] else dns_instance_ip + + + for k,v in dns_instance_ip_mappings.items(): + '''`aws_instance.ec2.private_ip` vs `aws_eip.towerhost[0].public_ip`''' + tf_obj, dpath = v + tf_obj_json = json.loads(query[tf_obj]) + dns_instance_ip = getDVal(tf_obj_json, dpath) if data_dictionary[k] else dns_instance_ip + + values = { + "dns_zone_id": dns_zone_id, + "dns_instance_ip": dns_instance_ip + } + + return values + + +if __name__ == '__main__': + values = populate_values(query) + return_tf_payload("0", values) \ No newline at end of file diff --git a/.githooks/data_external/generate_flags.py b/.githooks/data_external/generate_flags.py new file mode 100644 index 00000000..f95c6923 --- /dev/null +++ b/.githooks/data_external/generate_flags.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +import os +import json +import sys +from types import SimpleNamespace + +sys.dont_write_bytecode = True + + +## ------------------------------------------------------------------------------------ +## Extract TFvars and get query values +## ------------------------------------------------------------------------------------ +# NOTE! This part is a bit convoluted but there's a method to the madness: +# - Script executes from `~/cx-field-tools-installer`. +# - Change into .githooks (to avoid the `.` import problem), import, return to project root to extract tfvars. +# - Error thrown without workaround: `ImportError: attempted relative import with no known parent package` +# - Query object to receive objects via TF data call (i.e. resources) +project_root = os.getcwd() +os.chdir(f"{project_root}/.githooks") +sys.path.append(".") + +from utils.extractors import get_tfvars_as_json #convert_tfvars_to_dictionary +from utils.logger import external_logger +from utils.common_data_external_functions import getDVal, return_tf_payload + +os.chdir(project_root) +data_dictionary = get_tfvars_as_json() +data = SimpleNamespace(**data_dictionary) + +# Much simpler way to get variable passed in (via Terraform sending to stdin) +query = json.load(sys.stdin) +# query = SimpleNamespace(**query) +## ------------------------------------------------------------------------------------ + + +def populate_values(query): + + external_logger.debug(f"Query is: {query}") + + # Determine kinda of DNS record to create + dns_create_alb_record = True if (data.flag_create_load_balancer and not data.flag_create_hosts_file_entry) else False + dns_create_ec2_record = True if (not data.flag_create_load_balancer and not data.flag_create_hosts_file_entry) else False + + values = { + "dns_create_alb_record": dns_create_alb_record, + "dns_create_ec2_record": dns_create_ec2_record, + } + + return values + + +if __name__ == '__main__': + values = populate_values(query) + return_tf_payload("0", values) \ No newline at end of file diff --git a/.githooks/data_external/template_get_tfvars_and_query.py b/.githooks/data_external/template_get_tfvars_and_query.py new file mode 100644 index 00000000..b40fd0a6 --- /dev/null +++ b/.githooks/data_external/template_get_tfvars_and_query.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +import os +import json +import sys +from types import SimpleNamespace + +sys.dont_write_bytecode = True + + +## ------------------------------------------------------------------------------------ +## Extract TFvars and get query values +## ------------------------------------------------------------------------------------ +# NOTE! This part is a bit convoluted but there's a method to the madness: +# - Script executes from `~/cx-field-tools-installer`. +# - Change into .githooks (to avoid the `.` import problem), import, return to project root to extract tfvars. +# - Error thrown without workaround: `ImportError: attempted relative import with no known parent package` +# - Query object to receive objects via TF data call (i.e. resources) +project_root = os.getcwd() +os.chdir(f"{project_root}/.githooks") +sys.path.append(".") + +from utils.extractors import get_tfvars_as_json #convert_tfvars_to_dictionary +from utils.logger import external_logger +from utils.common_data_external_functions import getDVal, return_tf_payload + +os.chdir(project_root) +data_dictionary = get_tfvars_as_json() +data = SimpleNamespace(**data_dictionary) + +# Much simpler way to get variable passed in (via Terraform sending to stdin) +query = json.load(sys.stdin) +# query = SimpleNamespace(**query) +## ------------------------------------------------------------------------------------ + + +def populate_values(query): + + external_logger.debug(f"Query is: {query}") + + # Add logic here + + values = { + "key": "value" + } + + return values + + +if __name__ == '__main__': + values = populate_values(query) + return_tf_payload("0", values) + diff --git a/.githooks/utils/common_data_external_functions.py b/.githooks/utils/common_data_external_functions.py new file mode 100644 index 00000000..569f9b23 --- /dev/null +++ b/.githooks/utils/common_data_external_functions.py @@ -0,0 +1,45 @@ +# Centralizes common functions which will be needed across scripts called by TF `data.external`. +import json +from typing import Any + +from utils.logger import external_logger + + +# https://www.reddit.com/r/learnpython/comments/y02net/is_there_a_better_way_to_store_full_dictionary/ +def getDVal(d : dict, listPath : list) -> Any: + ''' + Recursively loop through a dictionary object to get to the target nested key. + This allows us to specify a TF objecy and the exact subpath to slice from. + Note: The path must be defined as discrete elements in a list. + ''' + for key in listPath: + d = d[key] + return d + + +def convert_booleans_to_strings(payload: dict) -> dict: + ''' + TF data.external freaks out if JSON true/false values not returned as strings. + Unfortunately, json.dumps will convert True/False to true/false, but not turn to string. + This function fixes the json.dumps payload so it's acceptable to TF. + ''' + for k,v in payload.items(): + external_logger.debug(f"k is {k} and v is {v}.") + if isinstance(v, bool): + payload[k] = "true" if v == True else "false" + return payload + + +def return_tf_payload(status: str, values: dict): + ''' + Package the payload to return to TF data.external. + Note: Using external_logger due to the stdout/stderr problem. + ''' + external_logger.debug(f"Payload is: {values}") + + payload = {'status': status, **values} + payload = convert_booleans_to_strings(payload) + print(json.dumps(payload)) + + external_logger.error("Flushing.") + exit(0) \ No newline at end of file diff --git a/.githooks/utils/extractors.py b/.githooks/utils/extractors.py index 37234e8b..88ae5699 100644 --- a/.githooks/utils/extractors.py +++ b/.githooks/utils/extractors.py @@ -65,7 +65,8 @@ def convert_tfvars_to_dictionary(file): elif flag_skip_block_comment: indices_to_pop.append(i) - logger.debug(f"Indices to pop: {indices_to_pop}") + # This breaks my `external.data` implementation when logging set to debug. Commenting out. + # logger.debug(f"Indices to pop: {indices_to_pop}") purge_indices_in_reverse(indices_to_pop) diff --git a/.githooks/utils/logger.py b/.githooks/utils/logger.py index df7efbdd..73feec0a 100644 --- a/.githooks/utils/logger.py +++ b/.githooks/utils/logger.py @@ -1,17 +1,52 @@ import logging +import logging.handlers import sys -file_handler = logging.FileHandler(filename='tmp.log') -stdout_handler = logging.StreamHandler(stream=sys.stdout) -handlers = [file_handler, stdout_handler] -logging.basicConfig( - level=logging.INFO, - # format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', - # https://stackoverflow.com/questions/57925917/python-logging-left-align-with-brackets - format='%(asctime)s %(filename)-15s:%(lineno)-4d %(levelname)-12s %(message)s', - handlers=handlers +# WARNING (June 17, 2024) +# +# Original logger configuration logic worked well for verification logic but broke when using TF `data.external` mechanism. +# (broke due to how the `data.external` mechanism reads stdout/stderr ... which log events wrote to by defaul). This was resolved +# by splitting the logger into two: one to handle verification via stdout/file, the other to handle TF external mechanism by +# buffering all log events in memory until a proper payload is returned to TF (with the buffered logs emitted afterwards). +# https://docs.python.org/3/library/logging.handlers.html +# +# Trial-and-error reworking logic finally got that working, but allowing logging.basicConfig to remain resulted in +# double-entry log events. Not sure why and don't care to spend more time right now investigating. +# +# Ultimately made things more granular and less DRY, but it works so ..... + + +formatter = logging.Formatter('%(asctime)s %(filename)-15s:%(lineno)-4d %(levelname)-12s %(message)s') + +# Validation Logger +logger = logging.getLogger('VALIDATION') +logger.setLevel(logging.DEBUG) + +validation_file_handler = logging.FileHandler(filename='verify.log') +validation_stdout_handler = logging.StreamHandler(stream=sys.stdout) +validation_file_handler.setFormatter(formatter) +validation_stdout_handler.setFormatter(formatter) +logger.addHandler(validation_file_handler) +logger.addHandler(validation_stdout_handler) + + +# External Logger +external_logger = logging.getLogger('EXTERNAL') +external_logger.setLevel(logging.DEBUG) + +data_external_file_handler = logging.FileHandler(filename="data_external.log") +data_external_memory_handler = logging.handlers.MemoryHandler( + capacity=1024*100, + flushLevel=logging.ERROR, + target=data_external_file_handler, + #target=logging.FileHandler(filename="data_external.log"), + #flushOnClose=False, ) +data_external_file_handler.setFormatter(formatter) +external_logger.addHandler(data_external_memory_handler) + + + -logger = logging.getLogger('LOGGER_NAME') \ No newline at end of file diff --git a/.gitignore b/.gitignore index d425bd48..6d5a264f 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,5 @@ tmp.log **/venv **/.terraform.lock.hcl **/__pycache__ +verify.log +**/data_external.log \ No newline at end of file diff --git a/000_main.tf b/000_main.tf index ad37321e..d54b2b43 100644 --- a/000_main.tf +++ b/000_main.tf @@ -49,15 +49,42 @@ resource "random_pet" "stackname" { data "aws_caller_identity" "current" {} +data "aws_vpc" "preexisting" { + id = local.vpc_id +} + +# https://stackoverflow.com/questions/67562197/terraform-loop-through-ids-list-and-generate-data-blocks-from-it-and-access-it +data "aws_subnet" "existing" { + # Creates a map with the keys being the CIDRs -- e.g. `data.aws_subnet.public["10.0.0.0/20"].id + for_each = toset(local.subnets_all) + vpc_id = local.vpc_id + cidr_block = each.key +} # https://medium.com/@leslie.alldridge/terraform-external-data-source-using-custom-python-script-with-example-cea5e618d83e data "external" "generate_db_connection_string" { program = ["python3", "${path.module}/.githooks/data_external/generate_db_connection_string.py"] + query = {} +} + +data "external" "generate_flags" { + program = ["python3", "${path.module}/.githooks/data_external/generate_flags.py"] + query = {} +} + +data "external" "generate_dns_values" { + program = ["python3", "${path.module}/.githooks/data_external/generate_dns_values.py"] query = { - tower_container_version = var.tower_container_version - flag_use_container_db = var.flag_use_container_db - db_container_engine_version = var.db_container_engine_version - db_engine_version = var.db_engine_version + # jsonencoding necessary for empty objects + zone_private_new = jsonencode(aws_route53_zone.private) + zone_private_existing = jsonencode(data.aws_route53_zone.private) + zone_public_existing = jsonencode(data.aws_route53_zone.public) + + tower_host_instance = jsonencode(aws_instance.ec2) + tower_host_eip = jsonencode(aws_eip.towerhost) + + # subnets = data.aws_subnet.existing # Adding in these values wont be known til created. + # ec2 = jsonencode(aws_instance.ec2) # Adding in these values wont be known til created. } } @@ -71,56 +98,27 @@ locals { # --------------------------------------------------------------------------------------- global_prefix = var.flag_use_custom_resource_naming_prefix == true ? var.custom_resource_naming_prefix : "tf-${var.app_name}-${random_pet.stackname.id}" - # Networking # --------------------------------------------------------------------------------------- vpc_id = var.flag_create_new_vpc == true ? module.vpc[0].vpc_id : var.vpc_existing_id - vpc_private_route_table_ids = var.flag_create_new_vpc == true ? module.vpc[0].private_route_table_ids : data.aws_route_tables.preexisting.ids - - # If creating VPC from scratch, map all subnet CIDRS to corresponding subnet ID - # zipmap -- turn 2 lists into a dictionary. https://developer.hashicorp.com/terraform/language/functions/zipmap - # merge -- join two dictionaries. https://developer.hashicorp.com/terraform/language/functions/merge - vpc_new_cidr_block_to_id_public = var.flag_create_new_vpc == true ? zipmap(module.vpc[0].public_subnets_cidr_blocks, module.vpc[0].public_subnets) : {} - vpc_new_cidr_block_to_id_private = var.flag_create_new_vpc == true ? zipmap(module.vpc[0].private_subnets_cidr_blocks, module.vpc[0].private_subnets) : {} - vpc_new_cidr_block_to_id_unified = var.flag_create_new_vpc == true ? merge(local.vpc_new_cidr_block_to_id_public, local.vpc_new_cidr_block_to_id_private) : {} - - # Regardless of whether we build a new VPC or use existing, assign the subnet CIDRs to a common variable for subsequent subnet ID lookup. - # concat -- join lists of strings. https://developer.hashicorp.com/terraform/language/functions/concat - subnets_ec2 = var.flag_create_new_vpc == true ? var.vpc_new_ec2_subnets : var.vpc_existing_ec2_subnets - subnets_batch = var.flag_create_new_vpc == true ? var.vpc_new_batch_subnets : var.vpc_existing_batch_subnets - subnets_db = var.flag_create_new_vpc == true ? var.vpc_new_db_subnets : var.vpc_existing_db_subnets - subnets_redis = var.flag_create_new_vpc == true ? var.vpc_new_redis_subnets : var.vpc_existing_redis_subnets - subnets_alb = var.flag_create_new_vpc == true ? var.vpc_new_alb_subnets : var.vpc_existing_alb_subnets - subnets_all = concat(local.subnets_ec2, local.subnets_batch, local.subnets_db, local.subnets_redis, local.subnets_alb) + vpc_private_route_table_ids = data.aws_route_tables.preexisting.ids - # If using existing VPC, get subnet IDs by querying datasources with subnet CIDR. - # If building new VPC, make dictionary from cidr_block and subnet id (two different list outputs from VPC module). - subnet_ids_ec2 = (var.flag_create_new_vpc == true ? - [for cidr in local.subnets_ec2 : lookup(local.vpc_new_cidr_block_to_id_unified, cidr)] : - [for cidr in local.subnets_ec2 : data.aws_subnet.existing[cidr].id] - ) - - subnet_ids_batch = (var.flag_create_new_vpc == true ? - [for cidr in local.subnets_batch : lookup(local.vpc_new_cidr_block_to_id_unified, cidr)] : - [for cidr in local.subnets_batch : data.aws_subnet.existing[cidr].id] - ) - - subnet_ids_db = (var.flag_create_new_vpc == true ? - [for cidr in local.subnets_db : lookup(local.vpc_new_cidr_block_to_id_unified, cidr)] : - [for cidr in local.subnets_db : data.aws_subnet.existing[cidr].id] - ) - - subnet_ids_redis = (var.flag_create_new_vpc == true ? - [for cidr in local.subnets_redis : lookup(local.vpc_new_cidr_block_to_id_unified, cidr)] : - [for cidr in local.subnets_redis : data.aws_subnet.existing[cidr].id] - ) - - subnet_ids_alb = ( - var.flag_create_load_balancer == true && var.flag_create_new_vpc == true ? - [for cidr in var.vpc_new_alb_subnets : lookup(local.vpc_new_cidr_block_to_id_unified, cidr)] : - var.flag_create_load_balancer == true && var.flag_use_existing_vpc == true ? - [for cidr in var.vpc_existing_alb_subnets : data.aws_subnet.existing[cidr].id] : [] + # Map CIDR blocks to subnet IDs (depending on tf resource, either/or needed). + # Cant delegate this to Python due to need to make multiple data calls. + subnets_ec2 = var.flag_create_new_vpc == true ? var.vpc_new_ec2_subnets : var.vpc_existing_ec2_subnets + subnets_batch = var.flag_create_new_vpc == true ? var.vpc_new_batch_subnets : var.vpc_existing_batch_subnets + subnets_db = var.flag_create_new_vpc == true ? var.vpc_new_db_subnets : var.vpc_existing_db_subnets + subnets_redis = var.flag_create_new_vpc == true ? var.vpc_new_redis_subnets : var.vpc_existing_redis_subnets + subnets_alb = var.flag_create_new_vpc == true ? var.vpc_new_alb_subnets : var.vpc_existing_alb_subnets + subnets_all = concat(local.subnets_ec2, local.subnets_batch, local.subnets_db, local.subnets_redis, local.subnets_alb) + + subnet_ids_ec2 = [for cidr in local.subnets_ec2 : data.aws_subnet.existing[cidr].id] + subnet_ids_batch = [for cidr in local.subnets_batch : data.aws_subnet.existing[cidr].id] + subnet_ids_db = [for cidr in local.subnets_db : data.aws_subnet.existing[cidr].id] + subnet_ids_redis = [for cidr in local.subnets_redis : data.aws_subnet.existing[cidr].id] + subnet_ids_alb = (var.flag_create_load_balancer == true ? + [for cidr in local.subnets_alb : data.aws_subnet.existing[cidr].id] : [] ) @@ -148,23 +146,12 @@ locals { # --------------------------------------------------------------------------------------- # All values here refer to Route53 in same AWS account as Tower instance. # If R53 record not generated, will create entry in EC2 hosts file. - dns_create_alb_record = var.flag_create_load_balancer == true && var.flag_create_hosts_file_entry == false ? true : false - dns_create_ec2_record = var.flag_create_load_balancer == false && var.flag_create_hosts_file_entry == false ? true : false - - dns_zone_id = ( - var.flag_create_route53_private_zone == true ? aws_route53_zone.private[0].id : - var.flag_use_existing_route53_public_zone == true ? data.aws_route53_zone.public[0].id : - var.flag_use_existing_route53_private_zone == true ? data.aws_route53_zone.private[0].id : - "No_Match_Found" - ) + dns_create_alb_record = jsondecode(data.external.generate_flags.result.dns_create_alb_record) + dns_create_ec2_record = jsondecode(data.external.generate_flags.result.dns_create_ec2_record) + + dns_zone_id = data.external.generate_dns_values.result.dns_zone_id + dns_instance_ip = data.external.generate_dns_values.result.dns_instance_ip - dns_instance_ip = ( - var.flag_make_instance_private == true ? aws_instance.ec2.private_ip : - var.flag_make_instance_private_behind_public_alb == true ? aws_instance.ec2.private_ip : - var.flag_private_tower_without_eice == true ? aws_instance.ec2.private_ip : - var.flag_make_instance_public == true ? aws_eip.towerhost[0].public_ip : - "No_Match_Found" - ) # If no HTTPS and no load-balancer, use `http` prefix and expose port in URL. Otherwise, use `https` prefix and no port. tower_server_url = ( @@ -212,7 +199,7 @@ locals { # tower_db_url = var.flag_create_external_db == true ? module.rds[0].db_instance_address : var.tower_db_url tower_db_root = ( var.flag_use_container_db == true? var.tower_db_url : module.rds[0].db_instance_address ) - tower_db_url = "${local.tower_db_root}/${var.db_database_name}${data.external.generate_db_connection_string.result.value}" + tower_db_url = "${local.tower_db_root}/${var.db_database_name}${data.external.generate_db_connection_string.result.connection_string}" # Redis diff --git a/001_vpc.tf b/001_vpc.tf index a5ae23b9..c1c2806d 100644 --- a/001_vpc.tf +++ b/001_vpc.tf @@ -35,22 +35,6 @@ module "vpc" { } -# https://stackoverflow.com/questions/67562197/terraform-loop-through-ids-list-and-generate-data-blocks-from-it-and-access-it -data "aws_subnet" "existing" { - # Creates a map with the keys being the CIDRs -- e.g. `data.aws_subnet.public["10.0.0.0/20"].id - # Only make a data query if we are using an existing VPC - for_each = var.flag_use_existing_vpc == true ? toset(local.subnets_all) : [] - - vpc_id = local.vpc_id - cidr_block = each.key -} - - -# Needed to add this to get existing CIDR range to limit ALB listeners -data "aws_vpc" "preexisting" { - id = local.vpc_id -} - # Needed to grab route tables from pre-existing VPC to create VPC endpoints. data "aws_route_tables" "preexisting" { vpc_id = local.vpc_id diff --git a/012_outputs.tf b/012_outputs.tf index 568e1753..2ff3bed9 100644 --- a/012_outputs.tf +++ b/012_outputs.tf @@ -27,5 +27,5 @@ output "redis_endpoint" { # Example for how to get values dynamically generated by `data.external` output "database_connection_string" { description = "Dynamically generated db connectino string based on tfvars selections." - value = data.external.generate_db_connection_string.result.value + value = data.external.generate_db_connection_string.result.connection_string } \ No newline at end of file diff --git a/assets/src/tower_config/tower.env.tpl b/assets/src/tower_config/tower.env.tpl index 874df340..80c4001f 100755 --- a/assets/src/tower_config/tower.env.tpl +++ b/assets/src/tower_config/tower.env.tpl @@ -2,6 +2,7 @@ # Generic Tower configuration values # ------------------------------------------------ TOWER_ENABLE_AWS_SSM=true +TOWER_ENABLE_ARM64=true LICENSE_SERVER_URL=https://licenses.seqera.io