diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md
index 1e542933..09fe78a7 100644
--- a/docs/CONFIG-VARS.md
+++ b/docs/CONFIG-VARS.md
@@ -127,8 +127,8 @@ By default, two custom IAM policies and two custom IAM roles (with instance prof
|
Name
| Description
| Type
| Default
| Notes
|
| :--- | :--- | :--- | :--- | :--- |
-| cluster_iam_role_name | Name of existing IAM role for the EKS cluster | string | "" | |
-| workers_iam_role_name | Name of existing IAM role for the cluster node VMs | string | "" | |
+| cluster_iam_role_arn | ARN of the pre-existing IAM role for the EKS cluster | string | null | If an existing EKS cluster IAM role is being used, the IAM role's 'ARN' is required. |
+| workers_iam_role_arn | ARN of the pre-existing IAM role for the cluster node VMs | string | null | If an existing EKS node IAM role is being used, the IAM role's 'ARN' is required. |
The cluster IAM role must include three AWS-managed policies and one custom policy.
@@ -274,6 +274,18 @@ When `storage_type=ha`, the [AWS Elastic File System](https://aws.amazon.com/efs
| Name
| Description
| Type
| Default
| Notes
|
| :--- | :--- | :--- | :--- | :--- |
| efs_performance_mode | EFS performance mode | string | generalPurpose | Supported values are `generalPurpose` or `maxIO` |
+| enable_efs_encryption | Enable encryption on EFS file systems | bool | false | When set to 'true', the EFS file systems will be encrypted. |
+
+### AWS Elastic Block Store (EBS)
+
+[AWS Elastic Block Store](https://aws.amazon.com/ebs/) is a block-level storage service provided by AWS for use with EC2 instances. EBS provides persistent storage for EC2 instances, allowing data to persist even after an EC2 instance is stopped or terminated. EBS volumes can be used as the root device for an EC2 instance, or as additional storage volumes. They can be attached and detached from instances as needed and can also be encrypted for increased security.
+
+To encrypt EBS volumes the following variable is applicable:
+
+
+| Name
| Description
| Type
| Default
| Notes
|
+| :--- | :--- | :--- | :--- | :--- |
+| enable_ebs_encryption | Enable encryption on EBS volumes | bool | false | When set to 'true', the EBS volumes will be encrypted. |
## PostgreSQL Server
diff --git a/examples/sample-input-byo.tfvars b/examples/sample-input-byo.tfvars
index dc523de8..98f872b0 100644
--- a/examples/sample-input-byo.tfvars
+++ b/examples/sample-input-byo.tfvars
@@ -4,30 +4,30 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# Bring your own existing resources
-vpc_id = "" # only needed if using pre-existing VPC
-subnet_ids = { # only needed if using pre-existing subnets
+vpc_id = "" # only needed if using pre-existing VPC
+subnet_ids = { # only needed if using pre-existing subnets
"public" : ["existing-public-subnet-id1", "existing-public-subnet-id2"],
"private" : ["existing-private-subnet-id1", "existing-private-subnet-id2"],
"database" : ["existing-database-subnet-id1", "existing-database-subnet-id2"] # only when 'create_postgres=true'
}
-nat_id = ""
+nat_id = ""
security_group_id = "" # only needed if using pre-existing Security Group
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -37,82 +37,82 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 2
-default_nodepool_vm_type = "m5.2xlarge"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "standard"
+efs_performance_mode = "maxIO"
+storage_type = "standard"
## Cluster Node Pools config
node_pools = {
cas = {
- "vm_type" = "m5.2xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.2xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
compute = {
- "vm_type" = "m5.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateless = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateful = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 3
- "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -120,4 +120,4 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
+create_jump_vm = true
diff --git a/examples/sample-input-connect.tfvars b/examples/sample-input-connect.tfvars
index ea9b7e76..28485cbb 100644
--- a/examples/sample-input-connect.tfvars
+++ b/examples/sample-input-connect.tfvars
@@ -4,20 +4,20 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -27,100 +27,100 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 2
-default_nodepool_vm_type = "m5.2xlarge"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "standard"
+efs_performance_mode = "maxIO"
+storage_type = "standard"
## Cluster Node Pools config
node_pools = {
cas = {
- "vm_type" = "m5.2xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.2xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
compute = {
- "vm_type" = "m5.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
connect = {
- "vm_type" = "m5.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=connect:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=connect:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "connect"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateless = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateful = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 3
- "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -128,4 +128,4 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
+create_jump_vm = true
diff --git a/examples/sample-input-custom-data.tfvars b/examples/sample-input-custom-data.tfvars
index 3b03b855..2cbefc5d 100644
--- a/examples/sample-input-custom-data.tfvars
+++ b/examples/sample-input-custom-data.tfvars
@@ -4,20 +4,20 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -27,82 +27,82 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 2
-default_nodepool_vm_type = "m5.2xlarge"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "standard"
+efs_performance_mode = "maxIO"
+storage_type = "standard"
## Cluster Node Pools config
node_pools = {
cas = {
- "vm_type" = "i3.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "i3.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = "./files/custom-data/additional_userdata.sh"
+ "custom_data" = "./files/custom-data/additional_userdata.sh"
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
compute = {
- "vm_type" = "m5.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateless = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateful = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 3
- "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -110,4 +110,4 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
+create_jump_vm = true
diff --git a/examples/sample-input-defaults.tfvars b/examples/sample-input-defaults.tfvars
index c848ef58..69a6c5e1 100644
--- a/examples/sample-input-defaults.tfvars
+++ b/examples/sample-input-defaults.tfvars
@@ -4,17 +4,17 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
diff --git a/examples/sample-input-gpu.tfvars b/examples/sample-input-gpu.tfvars
index 9668d9fb..297adb3b 100644
--- a/examples/sample-input-gpu.tfvars
+++ b/examples/sample-input-gpu.tfvars
@@ -4,20 +4,20 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -27,99 +27,99 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 2
-default_nodepool_vm_type = "m5.2xlarge"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "standard"
+efs_performance_mode = "maxIO"
+storage_type = "standard"
## Cluster Node Pools config
node_pools = {
cas = {
- "vm_type" = "m5.2xlarge"
+ "vm_type" = "m5.2xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
gpu_cas = {
- "vm_type" = "p2.8xlarge"
+ "vm_type" = "p2.8xlarge"
"cpu_type" = "AL2_x86_64_GPU"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["nvidia.com/gpu=present:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["nvidia.com/gpu=present:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
compute = {
- "vm_type" = "m5.8xlarge"
+ "vm_type" = "m5.8xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateless = {
- "vm_type" = "m5.4xlarge"
+ "vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateful = {
- "vm_type" = "m5.4xlarge"
+ "vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 3
- "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -127,4 +127,4 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
+create_jump_vm = true
diff --git a/examples/sample-input-ha.tfvars b/examples/sample-input-ha.tfvars
index 4b50514a..e5b9c83e 100644
--- a/examples/sample-input-ha.tfvars
+++ b/examples/sample-input-ha.tfvars
@@ -4,20 +4,20 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
-ssh_public_key = "~/.ssh/id_rsa.pub"
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+ssh_public_key = "~/.ssh/id_rsa.pub" # SSH public key for VMs
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -26,86 +26,83 @@ postgres_servers = {
default = {},
}
-# SSH public key for VMs
-ssh_public_key = "~/.ssh/id_rsa.pub"
-
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 2
-default_nodepool_vm_type = "m5.2xlarge"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "ha"
+efs_performance_mode = "maxIO"
+storage_type = "ha"
## Cluster Node Pools config
node_pools = {
cas = {
- "vm_type" = "i3.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "i3.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = "./files/custom-data/additional_userdata.sh"
+ "custom_data" = "./files/custom-data/additional_userdata.sh"
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
compute = {
- "vm_type" = "m5.8xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.8xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateless = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateful = {
- "vm_type" = "m5.4xlarge"
- "cpu_type" = "AL2_x86_64"
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 3
- "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -113,4 +110,4 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
+create_jump_vm = true
diff --git a/examples/sample-input-minimal.tfvars b/examples/sample-input-minimal.tfvars
index 4c5aebf4..97a50a06 100644
--- a/examples/sample-input-minimal.tfvars
+++ b/examples/sample-input-minimal.tfvars
@@ -4,20 +4,20 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -27,49 +27,49 @@ tags = { } # e.g., { "key1" = "value1", "key2
# }
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 1
-default_nodepool_vm_type = "m5.large"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 1
+default_nodepool_vm_type = "m5.large"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "standard"
+efs_performance_mode = "maxIO"
+storage_type = "standard"
## Cluster Node Pools config - minimal
-cluster_node_pool_mode = "minimal"
+cluster_node_pool_mode = "minimal"
node_pools = {
cas = {
- "vm_type" = "r5.xlarge"
- "cpu_type" = "AL2_x86_64"
- "os_disk_type" = "gp2"
- "os_disk_size" = 200
- "os_disk_iops" = 0
- "min_nodes" = 0
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "vm_type" = "r5.xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 0
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
generic = {
- "vm_type" = "m5.2xlarge"
- "cpu_type" = "AL2_x86_64"
- "os_disk_type" = "gp2"
- "os_disk_size" = 200
- "os_disk_iops" = 0
- "min_nodes" = 0
- "max_nodes" = 5
- "node_taints" = []
+ "vm_type" = "m5.2xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 0
+ "max_nodes" = 5
+ "node_taints" = []
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -77,12 +77,12 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
-jump_vm_admin = "jumpuser"
-jump_vm_type = "t3.medium"
+create_jump_vm = true
+jump_vm_admin = "jumpuser"
+jump_vm_type = "t3.medium"
# NFS Server
# required ONLY when storage_type is "standard" to create NFS Server VM
-create_nfs_public_ip = false
-nfs_vm_admin = "nfsuser"
-nfs_vm_type = "m5.xlarge"
+create_nfs_public_ip = false
+nfs_vm_admin = "nfsuser"
+nfs_vm_type = "m5.xlarge"
diff --git a/examples/sample-input-singlestore.tfvars b/examples/sample-input-singlestore.tfvars
new file mode 100644
index 00000000..dcb4e072
--- /dev/null
+++ b/examples/sample-input-singlestore.tfvars
@@ -0,0 +1,130 @@
+# !NOTE! - These are only a subset of the variables in CONFIG-VARS.md provided
+# as examples. Customize this file to add any variables from CONFIG-VARS.md whose
+# default values you want to change.
+
+# **************** REQUIRED VARIABLES ****************
+# These required variables' values MUST be provided by the User
+prefix = ""
+location = "" # e.g., "us-east-1"
+# **************** REQUIRED VARIABLES ****************
+
+# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
+# to your cluster will be blocked by default.
+
+# ************** RECOMMENDED VARIABLES ***************
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+ssh_public_key = "~/.ssh/id_rsa.pub"
+# ************** RECOMMENDED VARIABLES ***************
+
+# Tags for all tagable items in your cluster.
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
+
+# Postgres config - By having this entry a database server is created. If you do not
+# need an external database server remove the 'postgres_servers'
+# block below.
+postgres_servers = {
+ default = {},
+}
+
+## Cluster config
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
+
+## General
+efs_performance_mode = "maxIO"
+storage_type = "standard"
+
+## Cluster Node Pools config
+node_pools = {
+ cas = {
+ "vm_type" = "m5.2xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "node_labels" = {
+ "workload.sas.com/class" = "cas"
+ }
+ "custom_data" = ""
+ "metadata_http_endpoint" = "enabled"
+ "metadata_http_tokens" = "required"
+ "metadata_http_put_response_hop_limit" = 1
+ },
+ compute = {
+ "vm_type" = "m5.8xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "node_labels" = {
+ "workload.sas.com/class" = "compute"
+ "launcher.sas.com/prepullImage" = "sas-programming-environment"
+ }
+ "custom_data" = ""
+ "metadata_http_endpoint" = "enabled"
+ "metadata_http_tokens" = "required"
+ "metadata_http_put_response_hop_limit" = 1
+ },
+ stateless = {
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "node_labels" = {
+ "workload.sas.com/class" = "stateless"
+ }
+ "custom_data" = ""
+ "metadata_http_endpoint" = "enabled"
+ "metadata_http_tokens" = "required"
+ "metadata_http_put_response_hop_limit" = 1
+ },
+ stateful = {
+ "vm_type" = "m5.4xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "node_labels" = {
+ "workload.sas.com/class" = "stateful"
+ }
+ "custom_data" = ""
+ "metadata_http_endpoint" = "enabled"
+ "metadata_http_tokens" = "required"
+ "metadata_http_put_response_hop_limit" = 1
+ },
+ singlestore = {
+ "vm_type" = "r4.4xlarge"
+ "cpu_type" = "AL2_x86_64"
+ "os_disk_type" = "gp2"
+ "os_disk_size" = 200
+ "os_disk_iops" = 0
+ "min_nodes" = 0
+ "max_nodes" = 7
+ "node_taints" = ["workload.sas.com/class=singlestore:NoSchedule"]
+ "node_labels" = {
+ "workload.sas.com/class" = "singlestore"
+ }
+ "custom_data" = ""
+ "metadata_http_endpoint" = "enabled"
+ "metadata_http_tokens" = "required"
+ "metadata_http_put_response_hop_limit" = 1
+ }
+}
+
+# Jump Server
+create_jump_vm = true
diff --git a/examples/sample-input.tfvars b/examples/sample-input.tfvars
index e4e24a81..f39c9460 100644
--- a/examples/sample-input.tfvars
+++ b/examples/sample-input.tfvars
@@ -4,20 +4,20 @@
# **************** REQUIRED VARIABLES ****************
# These required variables' values MUST be provided by the User
-prefix = ""
-location = "" # e.g., "us-east-1"
+prefix = ""
+location = "" # e.g., "us-east-1"
# **************** REQUIRED VARIABLES ****************
# !NOTE! - Without specifying your CIDR block access rules, ingress traffic
# to your cluster will be blocked by default.
# ************** RECOMMENDED VARIABLES ***************
-default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
+default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"]
ssh_public_key = "~/.ssh/id_rsa.pub"
# ************** RECOMMENDED VARIABLES ***************
# Tags for all tagable items in your cluster.
-tags = { } # e.g., { "key1" = "value1", "key2" = "value2" }
+tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# Postgres config - By having this entry a database server is created. If you do not
# need an external database server remove the 'postgres_servers'
@@ -27,82 +27,82 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.23"
-default_nodepool_node_count = 2
-default_nodepool_vm_type = "m5.2xlarge"
-default_nodepool_custom_data = ""
+kubernetes_version = "1.23"
+default_nodepool_node_count = 2
+default_nodepool_vm_type = "m5.2xlarge"
+default_nodepool_custom_data = ""
## General
-efs_performance_mode = "maxIO"
-storage_type = "standard"
+efs_performance_mode = "maxIO"
+storage_type = "standard"
## Cluster Node Pools config
node_pools = {
cas = {
- "vm_type" = "m5.2xlarge"
+ "vm_type" = "m5.2xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
compute = {
- "vm_type" = "m5.8xlarge"
+ "vm_type" = "m5.8xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=compute:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateless = {
- "vm_type" = "m5.4xlarge"
+ "vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 5
- "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 5
+ "node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
stateful = {
- "vm_type" = "m5.4xlarge"
+ "vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
"os_disk_size" = 200
"os_disk_iops" = 0
- "min_nodes" = 1
- "max_nodes" = 3
- "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
+ "min_nodes" = 1
+ "max_nodes" = 3
+ "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -110,4 +110,4 @@ node_pools = {
}
# Jump Server
-create_jump_vm = true
+create_jump_vm = true
diff --git a/locals.tf b/locals.tf
index 1065d4bc..a6ddf040 100755
--- a/locals.tf
+++ b/locals.tf
@@ -2,10 +2,10 @@
locals {
# General
- security_group_id = var.security_group_id == null ? aws_security_group.sg[0].id : data.aws_security_group.sg[0].id
- cluster_security_group_id = var.cluster_security_group_id == null ? aws_security_group.cluster_security_group.0.id : var.cluster_security_group_id
- workers_security_group_id = var.workers_security_group_id == null ? aws_security_group.workers_security_group.0.id : var.workers_security_group_id
- cluster_name = "${var.prefix}-eks"
+ security_group_id = var.security_group_id == null ? aws_security_group.sg[0].id : data.aws_security_group.sg[0].id
+ cluster_security_group_id = var.cluster_security_group_id == null ? aws_security_group.cluster_security_group.0.id : var.cluster_security_group_id
+ workers_security_group_id = var.workers_security_group_id == null ? aws_security_group.workers_security_group.0.id : var.workers_security_group_id
+ cluster_name = "${var.prefix}-eks"
# CIDRs
default_public_access_cidrs = var.default_public_access_cidrs == null ? [] : var.default_public_access_cidrs
@@ -15,124 +15,126 @@ locals {
postgres_public_access_cidrs = var.postgres_public_access_cidrs == null ? local.default_public_access_cidrs : var.postgres_public_access_cidrs
# Subnets
- jump_vm_subnet = var.create_jump_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
- nfs_vm_subnet = var.create_nfs_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
- nfs_vm_subnet_az = var.create_nfs_public_ip ? module.vpc.public_subnet_azs[0] : module.vpc.private_subnet_azs[0]
+ jump_vm_subnet = var.create_jump_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
+ nfs_vm_subnet = var.create_nfs_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
+ nfs_vm_subnet_az = var.create_nfs_public_ip ? module.vpc.public_subnet_azs[0] : module.vpc.private_subnet_azs[0]
- ssh_public_key = ( var.create_jump_vm || var.storage_type == "standard"
- ? file(var.ssh_public_key)
- : null
- )
+ ssh_public_key = (var.create_jump_vm || var.storage_type == "standard"
+ ? file(var.ssh_public_key)
+ : null
+ )
# Kubernetes
- kubeconfig_filename = "${local.cluster_name}-kubeconfig.conf"
- kubeconfig_path = var.iac_tooling == "docker" ? "/workspace/${local.kubeconfig_filename}" : local.kubeconfig_filename
- kubeconfig_ca_cert = data.aws_eks_cluster.cluster.certificate_authority.0.data
+ kubeconfig_filename = "${local.cluster_name}-kubeconfig.conf"
+ kubeconfig_path = var.iac_tooling == "docker" ? "/workspace/${local.kubeconfig_filename}" : local.kubeconfig_filename
+ kubeconfig_ca_cert = data.aws_eks_cluster.cluster.certificate_authority.0.data
# Mapping node_pools to node_groups
default_node_pool = {
default = {
- name = "default"
- instance_types = [var.default_nodepool_vm_type]
- block_device_mappings = {
+ name = "default"
+ instance_types = [var.default_nodepool_vm_type]
+ block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
- volume_type = var.default_nodepool_os_disk_type
- volume_size = var.default_nodepool_os_disk_size
- iops = var.default_nodepool_os_disk_iops
+ volume_type = var.default_nodepool_os_disk_type
+ volume_size = var.default_nodepool_os_disk_size
+ iops = var.default_nodepool_os_disk_iops
+ encrypted = var.enable_ebs_encryption
}
}
}
- desired_size = var.default_nodepool_node_count
- min_size = var.default_nodepool_min_nodes
- max_size = var.default_nodepool_max_nodes
- taints = { for i, taint in var.default_nodepool_taints : "default-${i}"=> {
- "key" = split("=", taint)[0],
- "value"= split(":", split("=", taint)[1])[0],
- "effect"=length(regexall(":No", taint)) > 0 ? upper(replace(split(":", split("=", taint)[1])[1], "No", "NO_")) : upper(replace(split(":", split("=", taint)[1])[1], "No", "_NO_"))
- }
- }
- labels = var.default_nodepool_labels
+ desired_size = var.default_nodepool_node_count
+ min_size = var.default_nodepool_min_nodes
+ max_size = var.default_nodepool_max_nodes
+ taints = { for i, taint in var.default_nodepool_taints : "default-${i}" => {
+ "key" = split("=", taint)[0],
+ "value" = split(":", split("=", taint)[1])[0],
+ "effect" = length(regexall(":No", taint)) > 0 ? upper(replace(split(":", split("=", taint)[1])[1], "No", "NO_")) : upper(replace(split(":", split("=", taint)[1])[1], "No", "_NO_"))
+ }
+ }
+ labels = var.default_nodepool_labels
# User data
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=${replace(replace(jsonencode(var.default_nodepool_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", var.default_nodepool_taints)} ' "
- pre_bootstrap_user_data = (var.default_nodepool_custom_data != "" ? file(var.default_nodepool_custom_data) : "")
- metadata_options = {
- http_endpoint = var.default_nodepool_metadata_http_endpoint
- http_tokens = var.default_nodepool_metadata_http_tokens
- http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=${replace(replace(jsonencode(var.default_nodepool_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", var.default_nodepool_taints)} ' "
+ pre_bootstrap_user_data = (var.default_nodepool_custom_data != "" ? file(var.default_nodepool_custom_data) : "")
+ metadata_options = {
+ http_endpoint = var.default_nodepool_metadata_http_endpoint
+ http_tokens = var.default_nodepool_metadata_http_tokens
+ http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit
}
# Launch Template
create_launch_template = true
launch_template_name = "${local.cluster_name}-default-lt"
launch_template_use_name_prefix = true
launch_template_tags = { Name = "${local.cluster_name}-default" }
- tags = var.autoscaling_enabled ? merge(var.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true}) : var.tags
+ tags = var.autoscaling_enabled ? merge(var.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true }) : var.tags
}
}
user_node_pool = {
for key, np_value in var.node_pools :
- key => {
- name = key
- instance_types = [np_value.vm_type]
- ami_type = np_value.cpu_type
- disk_size = np_value.os_disk_size
- block_device_mappings = {
- xvda = {
- device_name = "/dev/xvda"
- ebs = {
- volume_type = np_value.os_disk_type
- volume_size = np_value.os_disk_size
- iops = np_value.os_disk_iops
- }
+ key => {
+ name = key
+ instance_types = [np_value.vm_type]
+ ami_type = np_value.cpu_type
+ disk_size = np_value.os_disk_size
+ block_device_mappings = {
+ xvda = {
+ device_name = "/dev/xvda"
+ ebs = {
+ volume_type = np_value.os_disk_type
+ volume_size = np_value.os_disk_size
+ iops = np_value.os_disk_iops
+ encrypted = var.enable_ebs_encryption
}
}
- desired_size = var.autoscaling_enabled ? np_value.min_nodes == 0 ? 1 : np_value.min_nodes : np_value.min_nodes # TODO - Remove when moving to managed nodes
- min_size = np_value.min_nodes
- max_size = np_value.max_nodes
- # AWS EKS Taints - https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html
- taints ={ for i, taint in np_value.node_taints: "${key}-${i}"=> { # to handle multiple taints, add index i to key for uniqueness
- "key" = split("=", taint)[0],
- "value"= split(":", split("=", taint)[1])[0],
- "effect"=length(regexall(":No", taint)) > 0 ? upper(replace(split(":", split("=", taint)[1])[1], "No", "NO_")) : upper(replace(split(":", split("=", taint)[1])[1], "No", "_NO_"))
- }
- }
- labels = np_value.node_labels
- # User data
- bootstrap_extra_args = "--kubelet-extra-args '--node-labels=${replace(replace(jsonencode(np_value.node_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", np_value.node_taints)}' "
- pre_bootstrap_user_data = (np_value.custom_data != "" ? file(np_value.custom_data) : "")
- metadata_options = {
- http_endpoint = var.default_nodepool_metadata_http_endpoint
- http_tokens = var.default_nodepool_metadata_http_tokens
- http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit
+ }
+ desired_size = var.autoscaling_enabled ? np_value.min_nodes == 0 ? 1 : np_value.min_nodes : np_value.min_nodes # TODO - Remove when moving to managed nodes
+ min_size = np_value.min_nodes
+ max_size = np_value.max_nodes
+ # AWS EKS Taints - https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html
+ taints = { for i, taint in np_value.node_taints : "${key}-${i}" => { # to handle multiple taints, add index i to key for uniqueness
+ "key" = split("=", taint)[0],
+ "value" = split(":", split("=", taint)[1])[0],
+ "effect" = length(regexall(":No", taint)) > 0 ? upper(replace(split(":", split("=", taint)[1])[1], "No", "NO_")) : upper(replace(split(":", split("=", taint)[1])[1], "No", "_NO_"))
}
- # Launch Template
- create_launch_template = true
- launch_template_name = "${local.cluster_name}-${key}-lt"
- launch_template_use_name_prefix = true
- launch_template_tags = { Name = "${local.cluster_name}-${key}" }
- tags = var.autoscaling_enabled ? merge(var.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true}) : var.tags
}
+ labels = np_value.node_labels
+ # User data
+ bootstrap_extra_args = "--kubelet-extra-args '--node-labels=${replace(replace(jsonencode(np_value.node_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", np_value.node_taints)}' "
+ pre_bootstrap_user_data = (np_value.custom_data != "" ? file(np_value.custom_data) : "")
+ metadata_options = {
+ http_endpoint = var.default_nodepool_metadata_http_endpoint
+ http_tokens = var.default_nodepool_metadata_http_tokens
+ http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit
+ }
+ # Launch Template
+ create_launch_template = true
+ launch_template_name = "${local.cluster_name}-${key}-lt"
+ launch_template_use_name_prefix = true
+ launch_template_tags = { Name = "${local.cluster_name}-${key}" }
+ tags = var.autoscaling_enabled ? merge(var.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true }) : var.tags
+ }
}
# Merging the default_node_pool into the work_groups node pools
node_groups = merge(local.default_node_pool, local.user_node_pool)
# PostgreSQL
- postgres_servers = var.postgres_servers == null ? {} : { for k, v in var.postgres_servers : k => merge( var.postgres_server_defaults, v, )}
- postgres_sgr_ports = var.postgres_servers != null ? length(local.postgres_servers) != 0 ? [ for k,v in local.postgres_servers :
- v.server_port
+ postgres_servers = var.postgres_servers == null ? {} : { for k, v in var.postgres_servers : k => merge(var.postgres_server_defaults, v, ) }
+ postgres_sgr_ports = var.postgres_servers != null ? length(local.postgres_servers) != 0 ? [for k, v in local.postgres_servers :
+ v.server_port
] : [] : null
- postgres_outputs = length(module.postgresql) != 0 ? { for k,v in module.postgresql :
+ postgres_outputs = length(module.postgresql) != 0 ? { for k, v in module.postgresql :
k => {
"server_name" : module.postgresql[k].db_instance_id,
"fqdn" : module.postgresql[k].db_instance_address,
"admin" : module.postgresql[k].db_instance_username,
"password" : module.postgresql[k].db_instance_password,
- "server_port" : module.postgresql[k].db_instance_port
+ "server_port" : module.postgresql[k].db_instance_port
"ssl_enforcement_enabled" : local.postgres_servers[k].ssl_enforcement_enabled,
"internal" : false
}
diff --git a/main.tf b/main.tf
index 17a233ea..9f4a17fc 100755
--- a/main.tf
+++ b/main.tf
@@ -5,12 +5,12 @@
#
provider "aws" {
- region = var.location
- profile = var.aws_profile
- shared_credentials_file = var.aws_shared_credentials_file
- access_key = var.aws_access_key_id
- secret_key = var.aws_secret_access_key
- token = var.aws_session_token
+ region = var.location
+ profile = var.aws_profile
+ shared_credentials_file = var.aws_shared_credentials_file
+ access_key = var.aws_access_key_id
+ secret_key = var.aws_secret_access_key
+ token = var.aws_session_token
}
data "aws_eks_cluster" "cluster" {
@@ -76,32 +76,32 @@ module "vpc" {
subnets = var.subnets
existing_nat_id = var.nat_id
- tags = var.tags
+ tags = var.tags
public_subnet_tags = merge(var.tags, { "kubernetes.io/role/elb" = "1" }, { "kubernetes.io/cluster/${local.cluster_name}" = "shared" })
private_subnet_tags = merge(var.tags, { "kubernetes.io/role/internal-elb" = "1" }, { "kubernetes.io/cluster/${local.cluster_name}" = "shared" })
}
# EKS Setup - https://github.com/terraform-aws-modules/terraform-aws-eks
module "eks" {
- source = "terraform-aws-modules/eks/aws"
- version = "18.7.1"
- cluster_name = local.cluster_name
- cluster_version = var.kubernetes_version
- cluster_enabled_log_types = [] # disable cluster control plan logging
- create_cloudwatch_log_group = false
- cluster_endpoint_private_access = true
- cluster_endpoint_public_access = var.cluster_api_mode == "public" ? true : false
- cluster_endpoint_public_access_cidrs = local.cluster_endpoint_public_access_cidrs
-
- subnet_ids = module.vpc.private_subnets
- vpc_id = module.vpc.vpc_id
- tags = var.tags
- enable_irsa = var.autoscaling_enabled
+ source = "terraform-aws-modules/eks/aws"
+ version = "18.7.1"
+ cluster_name = local.cluster_name
+ cluster_version = var.kubernetes_version
+ cluster_enabled_log_types = [] # disable cluster control plan logging
+ create_cloudwatch_log_group = false
+ cluster_endpoint_private_access = true
+ cluster_endpoint_public_access = var.cluster_api_mode == "public" ? true : false
+ cluster_endpoint_public_access_cidrs = local.cluster_endpoint_public_access_cidrs
+
+ subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ tags = var.tags
+ enable_irsa = var.autoscaling_enabled
################################################################################
# Cluster Security Group
################################################################################
- create_cluster_security_group = false # v17: cluster_create_security_group
- cluster_security_group_id = local.cluster_security_group_id
+ create_cluster_security_group = false
+ cluster_security_group_id = local.cluster_security_group_id
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
@@ -113,12 +113,12 @@ module "eks" {
source_node_security_group = true
}
}
-
+
################################################################################
# Node Security Group
################################################################################
- create_node_security_group = false #v17: worker_create_security_group
- node_security_group_id = local.workers_security_group_id #v17: worker_security_group_id
+ create_node_security_group = false
+ node_security_group_id = local.workers_security_group_id
# Extend node-to-node security group rules
node_security_group_additional_rules = {
ingress_self_all = {
@@ -141,27 +141,33 @@ module "eks" {
}
################################################################################
- # Handle BYO IAM policy
+ # Handle BYO IAM Roles & Policies
################################################################################
- create_iam_role = var.cluster_iam_role_name == null ? true : false # v17: manage_cluster_iam_resources
- iam_role_name = var.cluster_iam_role_name # v17: cluster_iam_role_name
+ # BYO - EKS Cluster IAM Role
+ create_iam_role = var.cluster_iam_role_arn == null ? true : false
+ iam_role_arn = var.cluster_iam_role_arn
+
iam_role_additional_policies = [
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
]
## Use this to define any values that are common and applicable to all Node Groups
eks_managed_node_group_defaults = {
- create_security_group = false
- vpc_security_group_ids = [local.workers_security_group_id]
+ create_security_group = false
+ vpc_security_group_ids = [local.workers_security_group_id]
+
+ # BYO - EKS Workers IAM Role
+ create_iam_role = var.workers_iam_role_arn == null ? true : false
+ iam_role_arn = var.workers_iam_role_arn
}
-
+
## Any individual Node Group customizations should go here
- eks_managed_node_groups = local.node_groups
+ eks_managed_node_groups = local.node_groups
}
module "autoscaling" {
- source = "./modules/aws_autoscaling"
- count = var.autoscaling_enabled ? 1 : 0
+ source = "./modules/aws_autoscaling"
+ count = var.autoscaling_enabled ? 1 : 0
prefix = var.prefix
cluster_name = local.cluster_name
@@ -170,7 +176,7 @@ module "autoscaling" {
}
module "ebs" {
- source = "./modules/aws_ebs_csi"
+ source = "./modules/aws_ebs_csi"
prefix = var.prefix
cluster_name = local.cluster_name
@@ -185,10 +191,10 @@ module "kubeconfig" {
path = local.kubeconfig_path
namespace = "kube-system"
- cluster_name = local.cluster_name
- region = var.location
- endpoint = module.eks.cluster_endpoint
- ca_crt = local.kubeconfig_ca_cert
+ cluster_name = local.cluster_name
+ region = var.location
+ endpoint = module.eks.cluster_endpoint
+ ca_crt = local.kubeconfig_ca_cert
depends_on = [module.eks.cluster_id] # The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready.
}
@@ -198,7 +204,7 @@ module "postgresql" {
source = "terraform-aws-modules/rds/aws"
version = "3.3.0"
- for_each = local.postgres_servers != null ? length(local.postgres_servers) != 0 ? local.postgres_servers : {} : {}
+ for_each = local.postgres_servers != null ? length(local.postgres_servers) != 0 ? local.postgres_servers : {} : {}
identifier = lower("${var.prefix}-${each.key}-pgsql")
@@ -240,7 +246,7 @@ module "postgresql" {
multi_az = each.value.multi_az
- parameters = each.value.ssl_enforcement_enabled ? concat(each.value.parameters, [{ "apply_method": "immediate", "name": "rds.force_ssl", "value": "1" }]) : concat(each.value.parameters, [{ "apply_method": "immediate", "name": "rds.force_ssl", "value": "0" }])
+ parameters = each.value.ssl_enforcement_enabled ? concat(each.value.parameters, [{ "apply_method" : "immediate", "name" : "rds.force_ssl", "value" : "1" }]) : concat(each.value.parameters, [{ "apply_method" : "immediate", "name" : "rds.force_ssl", "value" : "0" }])
options = each.value.options
# Flags for module to flag if postgres should be created or not.
diff --git a/modules/aws_autoscaling/main.tf b/modules/aws_autoscaling/main.tf
index 62c5999f..c6de6979 100644
--- a/modules/aws_autoscaling/main.tf
+++ b/modules/aws_autoscaling/main.tf
@@ -51,10 +51,10 @@ module "iam_assumable_role_with_oidc" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "4.1.0"
- create_role = true
- role_name = "${var.prefix}-cluster-autoscaler"
- provider_url = replace(var.oidc_url, "https://", "")
- role_policy_arns = [aws_iam_policy.worker_autoscaling.arn]
+ create_role = true
+ role_name = "${var.prefix}-cluster-autoscaler"
+ provider_url = replace(var.oidc_url, "https://", "")
+ role_policy_arns = [aws_iam_policy.worker_autoscaling.arn]
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:cluster-autoscaler"]
tags = {
diff --git a/modules/aws_autoscaling/output.tf b/modules/aws_autoscaling/output.tf
index 88ace2b9..9efe2c7c 100644
--- a/modules/aws_autoscaling/output.tf
+++ b/modules/aws_autoscaling/output.tf
@@ -1,3 +1,3 @@
output "autoscaler_account" {
- value = module.iam_assumable_role_with_oidc.iam_role_arn
+ value = module.iam_assumable_role_with_oidc.iam_role_arn
}
diff --git a/modules/aws_ebs_csi/main.tf b/modules/aws_ebs_csi/main.tf
index afcfdd3d..61db3c0f 100644
--- a/modules/aws_ebs_csi/main.tf
+++ b/modules/aws_ebs_csi/main.tf
@@ -156,12 +156,12 @@ module "iam_assumable_role_with_oidc" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "4.12.0"
- create_role = true
- role_name = "${var.prefix}-ebs-csi-role"
- provider_url = replace(var.oidc_url, "https://", "")
- role_policy_arns = [aws_iam_policy.ebs_csi.arn]
+ create_role = true
+ role_name = "${var.prefix}-ebs-csi-role"
+ provider_url = replace(var.oidc_url, "https://", "")
+ role_policy_arns = [aws_iam_policy.ebs_csi.arn]
oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
- oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
+ oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
tags = {
Role = "${var.prefix}-ebs-csi-role"
diff --git a/modules/aws_ebs_csi/outputs.tf b/modules/aws_ebs_csi/outputs.tf
index c792ce7b..f8680d01 100644
--- a/modules/aws_ebs_csi/outputs.tf
+++ b/modules/aws_ebs_csi/outputs.tf
@@ -1,3 +1,3 @@
output "ebs_csi_account" {
- value = module.iam_assumable_role_with_oidc.iam_role_arn
+ value = module.iam_assumable_role_with_oidc.iam_role_arn
}
diff --git a/modules/aws_vm/main.tf b/modules/aws_vm/main.tf
index 049a7bec..4cf78c05 100644
--- a/modules/aws_vm/main.tf
+++ b/modules/aws_vm/main.tf
@@ -62,12 +62,12 @@ resource "aws_key_pair" "admin" {
}
resource "aws_instance" "vm" {
- ami = data.aws_ami.ubuntu.id
- instance_type = var.vm_type
- user_data = (var.cloud_init != "" ? var.cloud_init : null)
- key_name = aws_key_pair.admin.key_name
+ ami = data.aws_ami.ubuntu.id
+ instance_type = var.vm_type
+ user_data = (var.cloud_init != "" ? var.cloud_init : null)
+ key_name = aws_key_pair.admin.key_name
availability_zone = var.data_disk_availability_zone
-
+
vpc_security_group_ids = var.security_group_ids
subnet_id = var.subnet_id
associate_public_ip_address = var.create_public_ip
@@ -77,17 +77,18 @@ resource "aws_instance" "vm" {
volume_size = var.os_disk_size
delete_on_termination = var.os_disk_delete_on_termination
iops = var.os_disk_iops
+ encrypted = var.enable_ebs_encryption
}
- tags = merge(var.tags, tomap({ Name: "${var.name}-vm" }))
+ tags = merge(var.tags, tomap({ Name : "${var.name}-vm" }))
}
resource "aws_eip" "eip" {
- count = var.create_public_ip ? 1 : 0
- vpc = true
+ count = var.create_public_ip ? 1 : 0
+ vpc = true
instance = aws_instance.vm.id
- tags = merge(var.tags, tomap({ Name: "${var.name}-eip" }))
+ tags = merge(var.tags, tomap({ Name : "${var.name}-eip" }))
}
resource "aws_volume_attachment" "data-volume-attachment" {
@@ -103,5 +104,6 @@ resource "aws_ebs_volume" "raid_disk" {
size = var.data_disk_size
type = var.data_disk_type
iops = var.data_disk_iops
- tags = merge(var.tags, tomap({ Name: "${var.name}-vm" }))
+ tags = merge(var.tags, tomap({ Name : "${var.name}-vm" }))
+ encrypted = var.enable_ebs_encryption
}
diff --git a/modules/aws_vm/outputs.tf b/modules/aws_vm/outputs.tf
index 549906f3..8df65885 100644
--- a/modules/aws_vm/outputs.tf
+++ b/modules/aws_vm/outputs.tf
@@ -3,7 +3,7 @@ output "private_ip_address" {
}
output "public_ip_address" {
- value = var.create_public_ip ? coalesce(aws_eip.eip.0.public_ip,aws_instance.vm.public_ip) : null
+ value = var.create_public_ip ? coalesce(aws_eip.eip.0.public_ip, aws_instance.vm.public_ip) : null
}
output "admin_username" {
@@ -15,5 +15,5 @@ output "private_dns" {
}
output "public_dns" {
- value = var.create_public_ip ? coalesce(aws_eip.eip.0.public_dns,aws_instance.vm.public_dns) : null
+ value = var.create_public_ip ? coalesce(aws_eip.eip.0.public_dns, aws_instance.vm.public_dns) : null
}
diff --git a/modules/aws_vm/variables.tf b/modules/aws_vm/variables.tf
index c7c8473f..e45a9678 100644
--- a/modules/aws_vm/variables.tf
+++ b/modules/aws_vm/variables.tf
@@ -4,7 +4,7 @@ variable "name" {
variable "tags" {
description = "Map of common tags to be placed on the Resources"
- type = map
+ type = map(any)
default = { project_name = "viya401", cost_center = "rnd", environment = "dev" }
}
@@ -78,3 +78,8 @@ variable "os_disk_iops" {
variable "subnet_id" {
type = string
}
+
+variable "enable_ebs_encryption" {
+ description = "Enable encryption on EBS volumes."
+ default = false
+}
diff --git a/modules/aws_vpc/main.tf b/modules/aws_vpc/main.tf
index 1c2eef76..a24463d8 100644
--- a/modules/aws_vpc/main.tf
+++ b/modules/aws_vpc/main.tf
@@ -4,8 +4,8 @@ locals {
vpc_id = var.vpc_id == null ? aws_vpc.vpc[0].id : data.aws_vpc.vpc[0].id
existing_subnets = length(var.existing_subnet_ids) > 0 ? true : false
- existing_public_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "public") ? (length(var.existing_subnet_ids["public"]) > 0 ? true : false) : false
- existing_private_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "private") ? (length(var.existing_subnet_ids["private"]) > 0 ? true : false) : false
+ existing_public_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "public") ? (length(var.existing_subnet_ids["public"]) > 0 ? true : false) : false
+ existing_private_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "private") ? (length(var.existing_subnet_ids["private"]) > 0 ? true : false) : false
existing_database_subnets = local.existing_subnets && contains(keys(var.existing_subnet_ids), "database") ? (length(var.existing_subnet_ids["database"]) > 0 ? true : false) : false
public_subnets = local.existing_public_subnets ? data.aws_subnet.public : aws_subnet.public
@@ -39,7 +39,7 @@ resource "aws_vpc_endpoint" "private_endpoints" {
vpc_id = local.vpc_id
service_name = "com.amazonaws.${var.region}.${var.vpc_private_endpoints[count.index]}"
vpc_endpoint_type = "Interface"
- security_group_ids = [ var.security_group_id ]
+ security_group_ids = [var.security_group_id]
tags = merge(
{
@@ -48,7 +48,7 @@ resource "aws_vpc_endpoint" "private_endpoints" {
var.tags,
)
- subnet_ids = [
+ subnet_ids = [
for subnet in local.private_subnets : subnet.id
]
}
@@ -96,7 +96,7 @@ resource "aws_subnet" "public" {
# Internet Gateway
###################
resource "aws_internet_gateway" "this" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? 1 : 0
vpc_id = local.vpc_id
@@ -112,7 +112,7 @@ resource "aws_internet_gateway" "this" {
# PubliŃ routes
################
resource "aws_route_table" "public" {
- count = local.existing_public_subnets ? 0 : 1
+ count = local.existing_public_subnets ? 0 : 1
vpc_id = local.vpc_id
tags = merge(
@@ -150,7 +150,7 @@ resource "aws_route_table_association" "private" {
}
resource "aws_route_table_association" "public" {
- count = local.existing_public_subnets ? 0 :length(var.subnets["public"])
+ count = local.existing_public_subnets ? 0 : length(var.subnets["public"])
subnet_id = element(aws_subnet.public.*.id, count.index)
route_table_id = element(aws_route_table.public.*.id, 0)
@@ -263,14 +263,14 @@ resource "aws_eip" "nat" {
data "aws_nat_gateway" "nat_gateway" {
count = var.existing_nat_id != null ? 1 : 0
- id = var.existing_nat_id # alt. support vpc_id or subnet_id where NAT
+ id = var.existing_nat_id # alt. support vpc_id or subnet_id where NAT
}
resource "aws_nat_gateway" "nat_gateway" {
count = var.existing_nat_id == null ? 1 : 0
allocation_id = element(aws_eip.nat.*.id, 0)
- subnet_id = local.existing_public_subnets ? element(data.aws_subnet.public.*.id, 0) : element(aws_subnet.public.*.id,0)
+ subnet_id = local.existing_public_subnets ? element(data.aws_subnet.public.*.id, 0) : element(aws_subnet.public.*.id, 0)
tags = merge(
{
diff --git a/modules/aws_vpc/variables.tf b/modules/aws_vpc/variables.tf
index 2a78705b..87aa777f 100644
--- a/modules/aws_vpc/variables.tf
+++ b/modules/aws_vpc/variables.tf
@@ -4,7 +4,7 @@ variable "azs" {
default = []
}
-variable vpc_id {
+variable "vpc_id" {
description = "Existing vpc id"
default = null
}
@@ -20,7 +20,7 @@ variable "cidr" {
}
variable "subnets" {
- type = map
+ type = map(any)
description = "Map of list of subnet cidr_blocks"
}
@@ -31,8 +31,8 @@ variable "existing_subnet_ids" {
}
variable "existing_nat_id" {
- type = string
- default = null
+ type = string
+ default = null
description = "Pre-existing VPC NAT Gateway id"
}
@@ -104,9 +104,9 @@ variable "map_public_ip_on_launch" {
}
variable "vpc_private_endpoints" {
- description = "Endpoints needed for private cluster"
- type = list(string)
- default = [ "ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling" ]
+ description = "Endpoints needed for private cluster"
+ type = list(string)
+ default = ["ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling"]
}
variable "region" {
diff --git a/modules/kubeconfig/main.tf b/modules/kubeconfig/main.tf
index 9ad61c71..b773f01c 100644
--- a/modules/kubeconfig/main.tf
+++ b/modules/kubeconfig/main.tf
@@ -6,7 +6,7 @@ locals {
# Provider based kube config data/template/resources
data "template_file" "kubeconfig_provider" {
- count = var.create_static_kubeconfig ? 0 : 1
+ count = var.create_static_kubeconfig ? 0 : 1
template = file("${path.module}/templates/kubeconfig-provider.tmpl")
vars = {
@@ -28,15 +28,15 @@ data "kubernetes_secret" "sa_secret" {
}
data "template_file" "kubeconfig_sa" {
- count = var.create_static_kubeconfig ? 1 : 0
+ count = var.create_static_kubeconfig ? 1 : 0
template = file("${path.module}/templates/kubeconfig-sa.tmpl")
vars = {
cluster_name = var.cluster_name
endpoint = var.endpoint
name = local.service_account_name
- ca_crt = base64encode(lookup(data.kubernetes_secret.sa_secret.0.data,"ca.crt", ""))
- token = lookup(data.kubernetes_secret.sa_secret.0.data,"token", "")
+ ca_crt = base64encode(lookup(data.kubernetes_secret.sa_secret.0.data, "ca.crt", ""))
+ token = lookup(data.kubernetes_secret.sa_secret.0.data, "token", "")
namespace = var.namespace
}
depends_on = [data.kubernetes_secret.sa_secret]
@@ -52,7 +52,7 @@ resource "kubernetes_secret" "sa_secret" {
"kubernetes.io/service-account.name" = local.service_account_name
}
}
- type = "kubernetes.io/service-account-token"
+ type = "kubernetes.io/service-account-token"
depends_on = [kubernetes_service_account.kubernetes_sa]
}
@@ -69,7 +69,7 @@ resource "kubernetes_service_account" "kubernetes_sa" {
resource "kubernetes_cluster_role_binding" "kubernetes_crb" {
count = var.create_static_kubeconfig ? 1 : 0
metadata {
- name = local.cluster_role_binding_name
+ name = local.cluster_role_binding_name
}
role_ref {
api_group = "rbac.authorization.k8s.io"
diff --git a/outputs.tf b/outputs.tf
index 76d7b9d6..2bb005dd 100755
--- a/outputs.tf
+++ b/outputs.tf
@@ -9,81 +9,90 @@ output "kube_config" {
}
output "cluster_iam_role_arn" {
- value = module.eks.cluster_iam_role_arn
+ description = "The ARN of the IAM role for the EKS cluster."
+ value = (module.eks.cluster_iam_role_arn == var.cluster_iam_role_arn
+ ? false
+ : var.cluster_iam_role_arn
+ )
+}
+
+output "workers_iam_role_arn" {
+ description = "The ARN of the IAM role for the Node VMs."
+ value = var.workers_iam_role_arn
}
output "rwx_filestore_id" {
- value = var.storage_type == "ha" ? aws_efs_file_system.efs-fs.0.id : null
+ value = var.storage_type == "ha" ? aws_efs_file_system.efs-fs[0].id : null
}
output "rwx_filestore_endpoint" {
- value = ( var.storage_type == "none"
- ? null
- : var.storage_type == "ha" ? aws_efs_file_system.efs-fs.0.dns_name : module.nfs.0.private_dns
- )
+ value = (var.storage_type == "none"
+ ? null
+ : var.storage_type == "ha" ? aws_efs_file_system.efs-fs[0].dns_name : module.nfs[0].private_dns
+ )
}
output "rwx_filestore_path" {
- value = ( var.storage_type == "none"
- ? null
- : var.storage_type == "ha" ? "/" : "/export"
- )
+ value = (var.storage_type == "none"
+ ? null
+ : var.storage_type == "ha" ? "/" : "/export"
+ )
}
output "efs_arn" {
- value = var.storage_type == "ha" ? aws_efs_file_system.efs-fs.0.arn : null
+ value = var.storage_type == "ha" ? aws_efs_file_system.efs-fs[0].arn : null
}
output "jump_private_ip" {
- value = var.create_jump_vm ? module.jump.0.private_ip_address : null
+ value = var.create_jump_vm ? module.jump[0].private_ip_address : null
}
output "jump_public_ip" {
- value = var.create_jump_vm ? module.jump.0.public_ip_address : null
+ value = var.create_jump_vm ? module.jump[0].public_ip_address : null
}
-output jump_admin_username {
- value = var.create_jump_vm ? module.jump.0.admin_username : null
+output "jump_admin_username" {
+ value = var.create_jump_vm ? module.jump[0].admin_username : null
}
output "jump_private_dns" {
- value = var.create_jump_vm ? module.jump.0.private_dns : null
+ value = var.create_jump_vm ? module.jump[0].private_dns : null
}
output "jump_public_dns" {
- value = var.create_jump_vm ? module.jump.0.public_dns : null
+ value = var.create_jump_vm ? module.jump[0].public_dns : null
}
-output jump_rwx_filestore_path {
- value = ( var.storage_type != "none"
- ? var.create_jump_vm ? var.jump_rwx_filestore_path : null
- : null
- )
+output "jump_rwx_filestore_path" {
+ value = (var.storage_type != "none"
+ ? var.create_jump_vm ? var.jump_rwx_filestore_path : null
+ : null
+ )
}
output "nfs_private_ip" {
- value = var.storage_type == "standard" ? module.nfs.0.private_ip_address : null
+ value = var.storage_type == "standard" ? module.nfs[0].private_ip_address : null
}
output "nfs_public_ip" {
- value = var.storage_type == "standard" ? module.nfs.0.public_ip_address : null
+ value = var.storage_type == "standard" ? module.nfs[0].public_ip_address : null
}
output "nfs_admin_username" {
- value = var.storage_type == "standard" ? module.nfs.0.admin_username : null
+ value = var.storage_type == "standard" ? module.nfs[0].admin_username : null
}
output "nfs_private_dns" {
- value = var.storage_type == "standard" ? module.nfs.0.private_dns : null
+ value = var.storage_type == "standard" ? module.nfs[0].private_dns : null
}
output "nfs_public_dns" {
- value = var.storage_type == "standard" ? module.nfs.0.public_dns : null
+ value = var.storage_type == "standard" ? module.nfs[0].public_dns : null
}
#postgres
output "postgres_servers" {
- value = length(module.postgresql) != 0 ? local.postgres_outputs : null
+ value = length(module.postgresql) != 0 ? local.postgres_outputs : null
sensitive = true
}
@@ -117,7 +126,7 @@ output "cluster_node_pool_mode" {
}
output "autoscaler_account" {
- value = var.autoscaling_enabled ? module.autoscaling.0.autoscaler_account : null
+ value = var.autoscaling_enabled ? module.autoscaling[0].autoscaler_account : null
}
output "cluster_api_mode" {
diff --git a/security.tf b/security.tf
index 4954f519..cc2a661c 100644
--- a/security.tf
+++ b/security.tf
@@ -1,11 +1,11 @@
-data "aws_security_group" sg {
+data "aws_security_group" "sg" {
count = var.security_group_id == null ? 0 : 1
- id = var.security_group_id
+ id = var.security_group_id
}
# Security Groups - https://www.terraform.io/docs/providers/aws/r/security_group.html
resource "aws_security_group" "sg" {
- count = var.security_group_id == null ? 1 : 0
+ count = var.security_group_id == null ? 1 : 0
name = "${var.prefix}-sg"
vpc_id = module.vpc.vpc_id
@@ -16,17 +16,17 @@ resource "aws_security_group" "sg" {
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
- tags = merge(var.tags, { "Name": "${var.prefix}-sg" })
+ tags = merge(var.tags, { "Name" : "${var.prefix}-sg" })
}
resource "aws_security_group_rule" "vms" {
- count = ( length(local.vm_public_access_cidrs) > 0
- && var.security_group_id == null
- && ( (var.create_jump_public_ip && var.create_jump_vm )
- || (var.create_nfs_public_ip && var.storage_type == "standard")
- )
- ? 1 : 0
- )
+ count = (length(local.vm_public_access_cidrs) > 0
+ && var.security_group_id == null
+ && ((var.create_jump_public_ip && var.create_jump_vm)
+ || (var.create_nfs_public_ip && var.storage_type == "standard")
+ )
+ ? 1 : 0
+ )
type = "ingress"
description = "Allow SSH from source"
from_port = 22
@@ -59,12 +59,12 @@ resource "aws_security_group_rule" "postgres_internal" {
}
resource "aws_security_group_rule" "postgres_external" {
- for_each = ( length(local.postgres_public_access_cidrs) > 0
- ? local.postgres_sgr_ports != null
- ? toset(local.postgres_sgr_ports)
- : toset([])
- : toset([])
- )
+ for_each = (length(local.postgres_public_access_cidrs) > 0
+ ? local.postgres_sgr_ports != null
+ ? toset(local.postgres_sgr_ports)
+ : toset([])
+ : toset([])
+ )
type = "ingress"
description = "Allow Postgres from source"
from_port = each.key
@@ -80,7 +80,7 @@ resource "aws_security_group" "cluster_security_group" {
vpc_id = module.vpc.vpc_id
tags = merge(var.tags, { "Name" : "${var.prefix}-eks_cluster_sg" })
- count = var.cluster_security_group_id == null ? 1 : 0
+ count = var.cluster_security_group_id == null ? 1 : 0
description = "EKS cluster security group."
egress {
@@ -95,27 +95,27 @@ resource "aws_security_group" "cluster_security_group" {
resource "aws_security_group_rule" "cluster_ingress" {
- count = var.cluster_security_group_id == null ? 1 : 0
+ count = var.cluster_security_group_id == null ? 1 : 0
- type = "ingress"
- description = "Allow pods to communicate with the EKS cluster API."
- from_port = 443
- to_port = 443
- protocol = "tcp"
- source_security_group_id = aws_security_group.workers_security_group.0.id
- security_group_id = local.cluster_security_group_id
- }
+ type = "ingress"
+ description = "Allow pods to communicate with the EKS cluster API."
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ source_security_group_id = aws_security_group.workers_security_group.0.id
+ security_group_id = local.cluster_security_group_id
+}
resource "aws_security_group" "workers_security_group" {
name = "${var.prefix}-eks_worker_sg"
vpc_id = module.vpc.vpc_id
- tags = merge(var.tags,
- { "Name" : "${var.prefix}-eks_worker_sg" },
- { "kubernetes.io/cluster/${local.cluster_name}" : "owned" }
- )
+ tags = merge(var.tags,
+ { "Name" : "${var.prefix}-eks_worker_sg" },
+ { "kubernetes.io/cluster/${local.cluster_name}" : "owned" }
+ )
- count = var.workers_security_group_id == null ? 1 : 0
+ count = var.workers_security_group_id == null ? 1 : 0
description = "Security group for all nodes in the cluster."
egress = [
@@ -135,10 +135,10 @@ resource "aws_security_group" "workers_security_group" {
]
}
-
+
resource "aws_security_group_rule" "worker_self" {
- count = var.workers_security_group_id == null ? 1 : 0
+ count = var.workers_security_group_id == null ? 1 : 0
type = "ingress"
description = "Allow node to comunicate with each other."
@@ -151,7 +151,7 @@ resource "aws_security_group_rule" "worker_self" {
resource "aws_security_group_rule" "worker_cluster_api" {
- count = var.workers_security_group_id == null ? 1 : 0
+ count = var.workers_security_group_id == null ? 1 : 0
type = "ingress"
description = "Allow workers pods to receive communication from the cluster control plane."
@@ -164,7 +164,7 @@ resource "aws_security_group_rule" "worker_cluster_api" {
resource "aws_security_group_rule" "worker_cluster_api_443" {
- count = var.workers_security_group_id == null ? 1 : 0
+ count = var.workers_security_group_id == null ? 1 : 0
type = "ingress"
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
@@ -173,7 +173,7 @@ resource "aws_security_group_rule" "worker_cluster_api_443" {
source_security_group_id = local.cluster_security_group_id
to_port = 443
security_group_id = aws_security_group.workers_security_group.0.id
-}
+}
diff --git a/variables.tf b/variables.tf
index d4fde62c..8c96f712 100644
--- a/variables.tf
+++ b/variables.tf
@@ -11,118 +11,126 @@ variable "prefix" {
## Provider
variable "location" {
- description = "AWS Region to provision all resources in this script"
+ description = "AWS Region to provision all resources in this script."
+ type = string
default = "us-east-1"
}
variable "aws_profile" {
- description = "Name of Profile in the credentials file"
+ description = "Name of Profile in the credentials file."
type = string
default = ""
}
variable "aws_shared_credentials_file" {
- description = "Name of credentials file, if using non-default location"
+ description = "Name of credentials file, if using non-default location."
type = string
default = ""
}
variable "aws_session_token" {
- description = "Session token for temporary credentials"
+ description = "Session token for temporary credentials."
type = string
default = ""
}
variable "aws_access_key_id" {
- description = "Static credential key"
+ description = "Static credential key."
type = string
default = ""
}
variable "aws_secret_access_key" {
- description = "Static credential secret"
+ description = "Static credential secret."
type = string
default = ""
}
variable "iac_tooling" {
- description = "Value used to identify the tooling used to generate this provider's infrastructure"
+ description = "Value used to identify the tooling used to generate this provider's infrastructure."
type = string
default = "terraform"
}
## Public Access
variable "default_public_access_cidrs" {
- description = "List of CIDRs to access created resources"
+ description = "List of CIDRs to access created resources."
type = list(string)
default = null
}
variable "cluster_endpoint_public_access_cidrs" {
- description = "List of CIDRs to access Kubernetes cluster - Public"
+ description = "List of CIDRs to access Kubernetes cluster - Public."
type = list(string)
default = null
}
variable "cluster_endpoint_private_access_cidrs" {
- description = "List of CIDRs to access Kubernetes cluster - Private"
+ description = "List of CIDRs to access Kubernetes cluster - Private."
type = list(string)
default = null
}
variable "vm_public_access_cidrs" {
- description = "List of CIDRs to access jump VM or NFS VM"
+ description = "List of CIDRs to access jump VM or NFS VM."
type = list(string)
default = null
}
variable "postgres_public_access_cidrs" {
- description = "List of CIDRs to access PostgreSQL server"
+ description = "List of CIDRs to access PostgreSQL server."
type = list(string)
default = null
}
## Provider Specific
variable "ssh_public_key" {
- description = "SSH public key used to access VMs"
- default = "~/.ssh/id_rsa.pub"
+ description = "SSH public key used to access VMs."
+ type = string
+ default = "~/.ssh/id_rsa.pub"
}
-variable efs_performance_mode {
- default = "generalPurpose"
+variable "efs_performance_mode" {
+ description = "EFS performance mode. Supported values are `generalPurpose` or `maxIO`."
+ type = string
+ default = "generalPurpose"
}
## Kubernetes
variable "kubernetes_version" {
- description = "The EKS cluster Kubernetes version"
+ description = "The EKS cluster Kubernetes version."
+ type = string
default = "1.23"
}
variable "tags" {
- description = "Map of common tags to be placed on the resources"
- type = map
+ description = "Map of common tags to be placed on the resources."
+ type = map(any)
default = { project_name = "viya" }
validation {
- condition = length(var.tags) > 0
+ condition = length(var.tags) > 0
error_message = "ERROR: You must provide at last one tag."
}
}
## Default node pool config
-variable "create_default_nodepool" {
- description = "Create Default Node Pool"
+variable "create_default_nodepool" { # tflint-ignore: terraform_unused_declarations
+ description = "Create Default Node Pool."
type = bool
default = true
}
variable "default_nodepool_vm_type" {
- default = "m5.2xlarge"
+ description = "Type of the default node pool VMs."
+ type = string
+ default = "m5.2xlarge"
}
variable "default_nodepool_os_disk_type" {
- type = string
- default = "gp2"
+ description = "Disk type for default node pool VMs."
+ type = string
+ default = "gp2"
validation {
condition = contains(["gp2", "io1"], lower(var.default_nodepool_os_disk_type))
@@ -131,56 +139,76 @@ variable "default_nodepool_os_disk_type" {
}
variable "default_nodepool_os_disk_size" {
- default = 200
+ description = "Disk size for default node pool VMs."
+ type = number
+ default = 200
}
variable "default_nodepool_os_disk_iops" {
- default = 0
+ description = "Disk IOPS for default node pool VMs."
+ type = number
+ default = 0
}
variable "default_nodepool_node_count" {
- default = 1
+ description = "Initial number of nodes in the default node pool."
+ type = number
+ default = 1
}
variable "default_nodepool_max_nodes" {
- default = 5
+ description = "Maximum number of nodes in the default node pool."
+ type = number
+ default = 5
}
variable "default_nodepool_min_nodes" {
- default = 1
+ description = "Minimum and initial number of nodes for the node pool."
+ type = number
+ default = 1
}
variable "default_nodepool_taints" {
- type = list
- default = []
+ description = "Taints for the default node pool VMs."
+ type = list(any)
+ default = []
}
variable "default_nodepool_labels" {
- type = map
+ description = "Labels to add to the default node pool."
+ type = map(any)
default = {
"kubernetes.azure.com/mode" = "system"
}
}
variable "default_nodepool_custom_data" {
- default = ""
+ description = "Additional user data that will be appended to the default user data."
+ type = string
+ default = ""
}
variable "default_nodepool_metadata_http_endpoint" {
- default = "enabled"
+ description = "The state of the default node pool's metadata service."
+ type = string
+ default = "enabled"
}
variable "default_nodepool_metadata_http_tokens" {
- default = "required"
+ description = "The state of the session tokens for the default node pool."
+ type = string
+ default = "required"
}
variable "default_nodepool_metadata_http_put_response_hop_limit" {
- default = 1
+ description = "The desired HTTP PUT response hop limit for instance metadata requests for the default node pool."
+ type = number
+ default = 1
}
## Dynamic node pool config
-variable node_pools {
- description = "Node pool definitions"
+variable "node_pools" {
+ description = "Node Pool Definitions."
type = map(object({
vm_type = string
cpu_type = string
@@ -210,7 +238,7 @@ variable node_pools {
"node_labels" = {
"workload.sas.com/class" = "cas"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -228,7 +256,7 @@ variable node_pools {
"workload.sas.com/class" = "compute"
"launcher.sas.com/prepullImage" = "sas-programming-environment"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -245,7 +273,7 @@ variable node_pools {
"node_labels" = {
"workload.sas.com/class" = "stateless"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -262,7 +290,7 @@ variable node_pools {
"node_labels" = {
"workload.sas.com/class" = "stateful"
}
- "custom_data" = ""
+ "custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
@@ -272,15 +300,15 @@ variable node_pools {
# Networking
variable "vpc_id" {
- type = string
- default = null
- description = "Pre-exising VPC id. Leave blank to have one created"
+ description = "Pre-exising VPC id. Leave blank to have one created."
+ type = string
+ default = null
}
variable "subnet_ids" {
- type = map(list(string))
+ description = "Map subnet usage roles to list of existing subnet ids."
+ type = map(list(string))
default = {}
- description = "Map subnet usage roles to list of existing subnet ids"
# Example:
# subnet_ids = { # only needed if using pre-existing subnets
# "public" : ["existing-public-subnet-id1", "existing-public-subnet-id2"],
@@ -290,166 +318,186 @@ variable "subnet_ids" {
}
variable "vpc_cidr" {
- description = "VPC CIDR - NOTE: Subnets below must fall into this range"
+ description = "VPC CIDR - NOTE: Subnets below must fall into this range."
+ type = string
default = "192.168.0.0/16"
}
-variable subnets {
- type = map
- description = "value"
+variable "subnets" {
+ description = "Subnets to be created and their settings - This variable is ignored when `subnet_ids` is set (AKA bring your own subnets)."
+ type = map(list(string))
default = {
"private" : ["192.168.0.0/18", "192.168.64.0/18"],
"public" : ["192.168.129.0/25", "192.168.129.128/25"],
"database" : ["192.168.128.0/25", "192.168.128.128/25"]
- }
+ }
}
variable "security_group_id" {
- type = string
- default = null
- description = "Pre-existing Security Group id. Leave blank to have one created"
-
+ description = "Pre-existing Security Group id. Leave blank to have one created."
+ type = string
+ default = null
}
variable "cluster_security_group_id" {
- type = string
- default = null
- description = "Pre-existing Security Group id for the EKS Cluster. Leave blank to have one created"
+ description = "Pre-existing Security Group id for the EKS Cluster. Leave blank to have one created."
+ type = string
+ default = null
}
variable "workers_security_group_id" {
- type = string
- default = null
- description = "Pre-existing Security Group id for the Cluster Node VM. Leave blank to have one created"
+ description = "Pre-existing Security Group id for the Cluster Node VM. Leave blank to have one created."
+ type = string
+ default = null
}
variable "nat_id" {
- type = string
- default = null
- description = "Pre-existing NAT Gateway id"
+ description = "Pre-existing NAT Gateway id."
+ type = string
+ default = null
}
-variable "cluster_iam_role_name" {
- type = string
- default = null
- description = "Pre-existing IAM Role for the EKS cluster"
+variable "cluster_iam_role_arn" {
+ description = "ARN of the pre-existing IAM Role for the EKS cluster."
+ type = string
+ default = null
}
-variable "workers_iam_role_name" {
- type = string
- default = null
- description = "Pre-existing IAM Role for the Node VMs"
+variable "workers_iam_role_arn" {
+ description = "ARN of the pre-existing IAM Role for the cluster node VMs."
+ type = string
+ default = null
}
-
variable "create_jump_vm" {
- description = "Create bastion host VM"
- default = true
+ description = "Create bastion Host VM."
+ type = bool
+ default = true
}
variable "create_jump_public_ip" {
- type = bool
- default = true
+ description = "Add public IP address to Jump VM."
+ type = bool
+ default = true
}
variable "jump_vm_admin" {
- description = "OS Admin User for Jump VM"
+ description = "OS Admin User for Jump VM."
+ type = string
default = "jumpuser"
}
variable "jump_vm_type" {
- description = "Jump VM type"
+ description = "Jump VM type."
+ type = string
default = "m5.4xlarge"
}
variable "jump_rwx_filestore_path" {
- description = "OS path used in cloud-init for NFS integration"
+ description = "OS path used in cloud-init for NFS integration."
+ type = string
default = "/viya-share"
}
variable "nfs_raid_disk_size" {
- description = "Size in GB for each disk of the RAID0 cluster, when storage_type=standard"
+ description = "Size in GB for each disk of the RAID0 cluster, when storage_type=standard."
+ type = number
default = 128
}
variable "nfs_raid_disk_type" {
- default = "gp2"
+ description = "Disk type for the NFS server EBS volumes."
+ type = string
+ default = "gp2"
}
variable "nfs_raid_disk_iops" {
- default = 0
+ description = "IOPS for the the NFS server EBS volumes."
+ type = number
+ default = 0
}
variable "create_nfs_public_ip" {
- type = bool
- default = false
+ description = "Add public IP address to the NFS server VM."
+ type = bool
+ default = false
}
variable "nfs_vm_admin" {
- description = "OS Admin User for NFS VM, when storage_type=standard"
+ description = "OS Admin User for NFS VM, when storage_type=standard."
+ type = string
default = "nfsuser"
}
variable "nfs_vm_type" {
- description = "NFS VM type"
- default = "m5.4xlarge"
+ description = "NFS VM type."
+ type = string
+ default = "m5.4xlarge"
}
variable "os_disk_size" {
- default = 64
+ description = "Disk size for default node pool VMs in GB."
+ type = number
+ default = 64
}
variable "os_disk_type" {
- default = "standard"
+ description = "Disk type for default node pool VMs."
+ type = string
+ default = "standard"
}
variable "os_disk_delete_on_termination" {
- default = true
+ description = "Delete Disk on termination."
+ type = bool
+ default = true
}
variable "os_disk_iops" {
- default = 0
+ description = "Disk IOPS for default node pool VMs."
+ type = number
+ default = 0
}
## PostgresSQL
# Defaults
variable "postgres_server_defaults" {
- description = ""
+ description = "Map of PostgresSQL server default objects."
type = any
default = {
- instance_type = "db.m5.xlarge"
- storage_size = 50
- storage_encrypted = false
- backup_retention_days = 7
- multi_az = false
- deletion_protection = false
- administrator_login = "pgadmin"
- administrator_password = "my$up3rS3cretPassw0rd"
- server_version = "13"
- server_port = "5432"
- ssl_enforcement_enabled = true
- parameters = []
- options = []
+ instance_type = "db.m5.xlarge"
+ storage_size = 50
+ storage_encrypted = false
+ backup_retention_days = 7
+ multi_az = false
+ deletion_protection = false
+ administrator_login = "pgadmin"
+ administrator_password = "my$up3rS3cretPassw0rd"
+ server_version = "13"
+ server_port = "5432"
+ ssl_enforcement_enabled = true
+ parameters = []
+ options = []
}
}
# User inputs
variable "postgres_servers" {
- description = "Map of PostgreSQL server objects"
+ description = "Map of PostgreSQL server objects provided by the user."
type = any
default = null
# Checking for user provided "default" server
validation {
- condition = var.postgres_servers != null ? length(var.postgres_servers) != 0 ? contains(keys(var.postgres_servers), "default") : false : true
+ condition = var.postgres_servers != null ? length(var.postgres_servers) != 0 ? contains(keys(var.postgres_servers), "default") : false : true
error_message = "ERROR: The provided map of PostgreSQL server objects does not contain the required 'default' key."
}
# Checking server name
validation {
condition = var.postgres_servers != null ? length(var.postgres_servers) != 0 ? alltrue([
- for k,v in var.postgres_servers : alltrue([
+ for k, v in var.postgres_servers : alltrue([
length(k) > 0,
length(k) < 61,
can(regex("^[a-zA-Z]+[a-zA-Z0-9-]*[a-zA-Z0-9]$", k)),
@@ -461,12 +509,12 @@ variable "postgres_servers" {
# Checking user provided login
validation {
condition = var.postgres_servers != null ? length(var.postgres_servers) != 0 ? alltrue([
- for k,v in var.postgres_servers : contains(keys(v),"administrator_login") ? alltrue([
+ for k, v in var.postgres_servers : contains(keys(v), "administrator_login") ? alltrue([
v.administrator_login != "admin",
length(v.administrator_login) > 0,
length(v.administrator_login) < 17,
can(regex("^[a-zA-Z][a-zA-Z0-9_]+$", v.administrator_login)),
- ]) : true
+ ]) : true
]) : false : true
error_message = "ERROR: The admin login name can not be 'admin', must start with a letter, and must be between 1-16 characters in length, and can only contain underscores, letters, and numbers."
}
@@ -474,7 +522,7 @@ variable "postgres_servers" {
# Checking user provided password
validation {
condition = var.postgres_servers != null ? length(var.postgres_servers) != 0 ? alltrue([
- for k,v in var.postgres_servers : contains(keys(v),"administrator_password") ? alltrue([
+ for k, v in var.postgres_servers : contains(keys(v), "administrator_password") ? alltrue([
length(v.administrator_password) > 7,
can(regex("^[^/'\"@]+$", v.administrator_password)),
]) : true
@@ -484,8 +532,9 @@ variable "postgres_servers" {
}
variable "storage_type" {
- type = string
- default = "standard"
+ description = "Type of Storage. A value of 'standard' creates an NFS server VM; a value of 'ha' creates an AWS EFS mountpoint."
+ type = string
+ default = "standard"
# NOTE: storage_type=none is for internal use only
validation {
condition = contains(["standard", "ha", "none"], lower(var.storage_type))
@@ -494,13 +543,13 @@ variable "storage_type" {
}
variable "create_static_kubeconfig" {
- description = "Allows the user to create a provider- or service account-based kubeconfig file"
+ description = "Allows the user to create a provider- or service account-based kubeconfig file."
type = bool
default = true
}
variable "cluster_api_mode" {
- description = "Use Public or Private IP address for the cluster API endpoint"
+ description = "Use Public or Private IP address for the cluster API endpoint."
type = string
default = "public"
@@ -510,10 +559,10 @@ variable "cluster_api_mode" {
}
}
-variable "vpc_private_endpoints" {
- description = "Endpoints needed for private cluster"
- type = list(string)
- default = [ "ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling" ]
+variable "vpc_private_endpoints" { # tflint-ignore: terraform_unused_declarations
+ description = "Endpoints needed for private cluster."
+ type = list(string)
+ default = ["ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling"]
}
variable "cluster_node_pool_mode" {
@@ -524,7 +573,19 @@ variable "cluster_node_pool_mode" {
}
variable "autoscaling_enabled" {
- description = "Enable autoscaling for your AWS cluster."
- type = bool
- default = true
+ description = "Enable autoscaling for your AWS cluster."
+ type = bool
+ default = true
+}
+
+variable "enable_ebs_encryption" {
+ description = "Enable encryption on EBS volumes."
+ type = bool
+ default = false
+}
+
+variable "enable_efs_encryption" {
+ description = "Enable encryption on EFS file systems."
+ type = bool
+ default = false
}
diff --git a/vms.tf b/vms.tf
index 1b8f4c6d..697807a8 100644
--- a/vms.tf
+++ b/vms.tf
@@ -1,12 +1,12 @@
locals {
- rwx_filestore_endpoint = ( var.storage_type == "none"
- ? ""
- : var.storage_type == "ha" ? aws_efs_file_system.efs-fs.0.dns_name : module.nfs.0.private_ip_address
- )
- rwx_filestore_path = ( var.storage_type == "none"
- ? ""
- : var.storage_type == "ha" ? "/" : "/export"
- )
+ rwx_filestore_endpoint = (var.storage_type == "none"
+ ? ""
+ : var.storage_type == "ha" ? aws_efs_file_system.efs-fs.0.dns_name : module.nfs.0.private_ip_address
+ )
+ rwx_filestore_path = (var.storage_type == "none"
+ ? ""
+ : var.storage_type == "ha" ? "/" : "/export"
+ )
}
# EFS File System - https://www.terraform.io/docs/providers/aws/r/efs_file_system.html
@@ -14,7 +14,8 @@ resource "aws_efs_file_system" "efs-fs" {
count = var.storage_type == "ha" ? 1 : 0
creation_token = "${var.prefix}-efs"
performance_mode = var.efs_performance_mode
- tags = merge(var.tags, { "Name": "${var.prefix}-efs" })
+ tags = merge(var.tags, { "Name" : "${var.prefix}-efs" })
+ encrypted = var.enable_efs_encryption
}
# EFS Mount Target - https://www.terraform.io/docs/providers/aws/r/efs_mount_target.html
@@ -31,18 +32,18 @@ resource "aws_efs_mount_target" "efs-mt" {
data "template_file" "jump-cloudconfig" {
count = var.create_jump_vm ? 1 : 0
template = file("${path.module}/files/cloud-init/jump/cloud-config")
- vars = {
- mounts = ( var.storage_type == "none"
- ? "[]"
- : jsonencode(
- [ "${local.rwx_filestore_endpoint}:${local.rwx_filestore_path}",
- "${var.jump_rwx_filestore_path}",
- "nfs",
- "rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport",
- "0",
- "0"
- ])
- )
+ vars = {
+ mounts = (var.storage_type == "none"
+ ? "[]"
+ : jsonencode(
+ ["${local.rwx_filestore_endpoint}:${local.rwx_filestore_path}",
+ "${var.jump_rwx_filestore_path}",
+ "nfs",
+ "rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport",
+ "0",
+ "0"
+ ])
+ )
rwx_filestore_endpoint = local.rwx_filestore_endpoint
rwx_filestore_path = local.rwx_filestore_path
@@ -79,9 +80,10 @@ module "jump" {
os_disk_delete_on_termination = var.os_disk_delete_on_termination
os_disk_iops = var.os_disk_iops
- vm_type = var.jump_vm_type
- vm_admin = var.jump_vm_admin
- ssh_public_key = local.ssh_public_key
+ vm_type = var.jump_vm_type
+ vm_admin = var.jump_vm_admin
+ ssh_public_key = local.ssh_public_key
+ enable_ebs_encryption = var.enable_ebs_encryption
cloud_init = data.template_cloudinit_config.jump.0.rendered
@@ -94,7 +96,7 @@ data "template_file" "nfs-cloudconfig" {
count = var.storage_type == "standard" ? 1 : 0
vars = {
- vm_admin = var.nfs_vm_admin
+ vm_admin = var.nfs_vm_admin
public_subnet_cidrs = join(" ", module.vpc.public_subnet_cidrs)
private_subnet_cidrs = join(" ", module.vpc.private_subnet_cidrs)
}
@@ -135,9 +137,10 @@ module "nfs" {
data_disk_iops = var.nfs_raid_disk_iops
data_disk_availability_zone = local.nfs_vm_subnet_az
- vm_type = var.nfs_vm_type
- vm_admin = var.nfs_vm_admin
- ssh_public_key = local.ssh_public_key
+ vm_type = var.nfs_vm_type
+ vm_admin = var.nfs_vm_admin
+ ssh_public_key = local.ssh_public_key
+ enable_ebs_encryption = var.enable_ebs_encryption
cloud_init = data.template_cloudinit_config.nfs.0.rendered
}