diff --git a/Dockerfile b/Dockerfile
index 9d39bfb4..1033a6f3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -3,7 +3,7 @@ ARG AWS_CLI_VERSION=2.1.29
FROM hashicorp/terraform:$TERRAFORM_VERSION as terraform
FROM amazon/aws-cli:$AWS_CLI_VERSION
-ARG KUBECTL_VERSION=1.21.7
+ARG KUBECTL_VERSION=1.22.10
WORKDIR /viya4-iac-aws
@@ -17,7 +17,7 @@ RUN yum -y install git openssh jq which \
&& chmod g=u -R /etc/passwd /etc/group /viya4-iac-aws \
&& git config --system --add safe.directory /viya4-iac-aws \
&& terraform init
-
+
ENV TF_VAR_iac_tooling=docker
ENTRYPOINT ["/viya4-iac-aws/docker-entrypoint.sh"]
VOLUME ["/workspace"]
diff --git a/README.md b/README.md
index 041915e2..1711aa40 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ This project contains Terraform scripts to provision the AWS cloud infrastructur
This project helps you to automate the cluster-provisioning phase of SAS Viya deployment. To learn about all phases and options of the
SAS Viya deployment process, see [Getting Started with SAS Viya and Azure Kubernetes Service](https://go.documentation.sas.com/doc/en/itopscdc/default/itopscon/n1d7qc4nfr3s5zn103a1qy0kj4l1.htm) in _SAS® Viya® Operations_.
-Once the cloud resources are provisioned, use the [viya4-deployment](https://github.com/sassoftware/viya4-deployment) project to deploy
+Once the cloud resources are provisioned, use the [viya4-deployment](https://github.com/sassoftware/viya4-deployment) project to deploy
SAS Viya 4 in your cloud environment. For more information about SAS Viya 4 requirements and documentation for the deployment
process, refer to the [SAS Viya 4 Operations Guide](https://go.documentation.sas.com/doc/en/itopscdc/default/itopswlcm/home.htm).
@@ -35,22 +35,22 @@ Use of these tools requires operational knowledge of the following technologies:
This project supports two options for running Terraform scripts:
- Terraform installed on your local machine
- Using a Docker container to run Terraform (Docker is required)
-
+
For more information, see [Docker Usage](./docs/user/DockerUsage.md). Using Docker to run the Terraform scripts is recommended.
-
+
The following are also required:
- Access to an **AWS account** with a user that is associated with the applied [IAM Policy](./files/policies/devops-iac-eks-policy.json)
- Subscription to [Ubuntu 20.04 LTS - Focal](https://aws.amazon.com/marketplace/pp/prodview-iftkyuwv2sjxi)
-
+
#### Terraform Requirements:
- [Terraform](https://www.terraform.io/downloads.html) v1.0.0
-- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - v1.21.7
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - v1.22.10
- [jq](https://stedolan.github.io/jq/) v1.6
- [AWS CLI](https://aws.amazon.com/cli) (optional; useful as an alternative to the AWS Web Console) v2.1.29
-
+
#### Docker Requirements:
-
+
- [Docker](https://docs.docker.com/get-docker/)
## Getting Started
@@ -75,12 +75,12 @@ In order to create and destroy AWS resources on your behalf, Terraform needs an
### Customize Input Values
Terraform scripts require variable definitions as input. Review and modify default values to meet your requirements. Create a file named
-`terraform.tfvars` to customize any input variable value documented in the [CONFIG-VARS.md](docs/CONFIG-VARS.md) file.
+`terraform.tfvars` to customize any input variable value documented in the [CONFIG-VARS.md](docs/CONFIG-VARS.md) file.
To get started, you can copy one of the example variable definition files provided in the [examples](./examples) folder. For more information about the
variables that are declared in each file, refer to the [CONFIG-VARS.md](docs/CONFIG-VARS.md) file.
-**NOTE:** You will need to update the `cidr_blocks` in the [variables.tf](variables.tf) file to allow traffic from your current network. Without these rules,
+**NOTE:** You will need to update the `cidr_blocks` in the [variables.tf](variables.tf) file to allow traffic from your current network. Without these rules,
access to the cluster will only be allowed via the AWS Console.
You have the option to specify variable definitions that are not included in `terraform.tfvars` or to use a variable definition file other than
@@ -88,7 +88,7 @@ You have the option to specify variable definitions that are not included in `te
## Create and Manage Cloud Resources
-Create and manage the required cloud resources. Perform one of the following steps, based on whether you are using Docker:
+Create and manage the required cloud resources. Perform one of the following steps, based on whether you are using Docker:
- run [Terraform](docs/user/TerraformUsage.md) directly on your workstation
- run the [Docker container](docs/user/DockerUsage.md) (recommended)
diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md
index 1b9d7576..72fcbdbf 100644
--- a/docs/CONFIG-VARS.md
+++ b/docs/CONFIG-VARS.md
@@ -197,7 +197,7 @@ Custom policy:
|
Name
| Description
| Type
| Default
| Notes
|
| :--- | :--- | :--- | :--- | :--- |
| create_static_kubeconfig | Allows the user to create a provider- or service account-based kubeconfig file | bool | false | A value of `false` defaults to using the cloud provider's mechanism for generating the kubeconfig file. A value of `true` creates a static kubeconfig that uses a service account and cluster role binding to provide credentials. |
-| kubernetes_version | The EKS cluster Kubernetes version | string | "1.21" | |
+| kubernetes_version | The EKS cluster Kubernetes version | string | "1.22" | |
| create_jump_vm | Create bastion host (jump VM) | bool | true| |
| create_jump_public_ip | Add public IP address to jump VM | bool | true | |
| jump_vm_admin | OS admin user for the jump VM | string | "jumpuser" | |
@@ -298,7 +298,7 @@ Each server element, like `foo = {}`, can contain none, some, or all of the para
| Name
| Description
| Type
| Default
| Notes
|
| :--- | :--- | :--- | :--- | :--- |
-| server_version | The version of the PostgreSQL server | string | "11" | Changing this value trigger resource recreation |
+| server_version | The version of the PostgreSQL server | string | "13" | Refer to the [Viya 4 Administration Guide](https://go.documentation.sas.com/doc/en/sasadmincdc/default/itopssr/p05lfgkwib3zxbn1t6nyihexp12n.htm?fromDefault=#p1wq8ouke3c6ixn1la636df9oa1u) for the supported versions of PostgreSQL for SAS Viya. |
| instance_type | The VM type for the PostgreSQL Server | string | "db.m5.xlarge" | |
| storage_size | Max storage allowed for the PostgreSQL server in MB | number | 50 | |
| backup_retention_days | Backup retention days for the PostgreSQL server | number | 7 | Supported values are between 7 and 35 days. |
@@ -328,7 +328,7 @@ database_servers = {
deletion_protection = false
administrator_login = "cpsadmin"
administrator_password = "1tsAB3aut1fulDay"
- server_version = "12"
+ server_version = "13"
server_port = "5432"
ssl_enforcement_enabled = true
parameters = [{ "apply_method": "immediate", "name": "foo" "value": "true" }, { "apply_method": "immediate", "name": "bar" "value": "false" }]
diff --git a/examples/sample-input-byo.tfvars b/examples/sample-input-byo.tfvars
index 167ff88d..55696d90 100644
--- a/examples/sample-input-byo.tfvars
+++ b/examples/sample-input-byo.tfvars
@@ -1,5 +1,5 @@
# !NOTE! - These are only a subset of the variables in CONFIG-VARS.md provided
-# as examples. Customize this file to add any variables from CONFIG-VARS.md whose
+# as examples. Customize this file to add any variables from CONFIG-VARS.md whose
# default values you want to change.
# **************** REQUIRED VARIABLES ****************
@@ -13,7 +13,7 @@ vpc_id = "" # only needed if using pre-existing VPC
subnet_ids = { # only needed if using pre-existing subnets
"public" : ["existing-public-subnet-id1", "existing-public-subnet-id2"],
"private" : ["existing-private-subnet-id1", "existing-private-subnet-id2"],
- "database" : ["existing-database-subnet-id1", "existing-database-subnet-id2"] # only when 'create_postgres=true'
+ "database" : ["existing-database-subnet-id1", "existing-database-subnet-id2"] # only when 'create_postgres=true'
}
nat_id = ""
security_group_id = "" # only needed if using pre-existing Security Group
@@ -37,12 +37,12 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 2
default_nodepool_vm_type = "m5.2xlarge"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "standard"
diff --git a/examples/sample-input-connect.tfvars b/examples/sample-input-connect.tfvars
index 4e1c86de..6f26e0bf 100644
--- a/examples/sample-input-connect.tfvars
+++ b/examples/sample-input-connect.tfvars
@@ -27,12 +27,12 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 2
default_nodepool_vm_type = "m5.2xlarge"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "standard"
diff --git a/examples/sample-input-custom-data.tfvars b/examples/sample-input-custom-data.tfvars
index c13ad084..afcfbeba 100644
--- a/examples/sample-input-custom-data.tfvars
+++ b/examples/sample-input-custom-data.tfvars
@@ -27,18 +27,18 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 2
default_nodepool_vm_type = "m5.2xlarge"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "standard"
## Cluster Node Pools config
node_pools = {
- cas = {
+ cas = {
"vm_type" = "i3.8xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -47,15 +47,15 @@ node_pools = {
"min_nodes" = 1
"max_nodes" = 5
"node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "cas"
+ "node_labels" = {
+ "workload.sas.com/class" = "cas"
}
"custom_data" = "./files/custom-data/additional_userdata.sh"
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
- compute = {
+ compute = {
"vm_type" = "m5.8xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -73,7 +73,7 @@ node_pools = {
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
- stateless = {
+ stateless = {
"vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -82,15 +82,15 @@ node_pools = {
"min_nodes" = 1
"max_nodes" = 5
"node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "stateless"
+ "node_labels" = {
+ "workload.sas.com/class" = "stateless"
}
"custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
- },
- stateful = {
+ },
+ stateful = {
"vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -99,8 +99,8 @@ node_pools = {
"min_nodes" = 1
"max_nodes" = 3
"node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "stateful"
+ "node_labels" = {
+ "workload.sas.com/class" = "stateful"
}
"custom_data" = ""
"metadata_http_endpoint" = "enabled"
diff --git a/examples/sample-input-gpu.tfvars b/examples/sample-input-gpu.tfvars
index 068c9885..5522c286 100644
--- a/examples/sample-input-gpu.tfvars
+++ b/examples/sample-input-gpu.tfvars
@@ -27,12 +27,12 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 2
default_nodepool_vm_type = "m5.2xlarge"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "standard"
diff --git a/examples/sample-input-ha.tfvars b/examples/sample-input-ha.tfvars
index 59a0a72f..a59efe8d 100644
--- a/examples/sample-input-ha.tfvars
+++ b/examples/sample-input-ha.tfvars
@@ -30,18 +30,18 @@ postgres_servers = {
ssh_public_key = "~/.ssh/id_rsa.pub"
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 2
default_nodepool_vm_type = "m5.2xlarge"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "ha"
## Cluster Node Pools config
node_pools = {
- cas = {
+ cas = {
"vm_type" = "i3.8xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -50,15 +50,15 @@ node_pools = {
"min_nodes" = 1
"max_nodes" = 5
"node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "cas"
+ "node_labels" = {
+ "workload.sas.com/class" = "cas"
}
"custom_data" = "./files/custom-data/additional_userdata.sh"
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
- compute = {
+ compute = {
"vm_type" = "m5.8xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -76,7 +76,7 @@ node_pools = {
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
},
- stateless = {
+ stateless = {
"vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -85,15 +85,15 @@ node_pools = {
"min_nodes" = 1
"max_nodes" = 5
"node_taints" = ["workload.sas.com/class=stateless:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "stateless"
+ "node_labels" = {
+ "workload.sas.com/class" = "stateless"
}
"custom_data" = ""
"metadata_http_endpoint" = "enabled"
"metadata_http_tokens" = "required"
"metadata_http_put_response_hop_limit" = 1
- },
- stateful = {
+ },
+ stateful = {
"vm_type" = "m5.4xlarge"
"cpu_type" = "AL2_x86_64"
"os_disk_type" = "gp2"
@@ -102,8 +102,8 @@ node_pools = {
"min_nodes" = 1
"max_nodes" = 3
"node_taints" = ["workload.sas.com/class=stateful:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "stateful"
+ "node_labels" = {
+ "workload.sas.com/class" = "stateful"
}
"custom_data" = ""
"metadata_http_endpoint" = "enabled"
diff --git a/examples/sample-input-minimal.tfvars b/examples/sample-input-minimal.tfvars
index 91a2287f..f89a66ba 100644
--- a/examples/sample-input-minimal.tfvars
+++ b/examples/sample-input-minimal.tfvars
@@ -27,12 +27,12 @@ tags = { } # e.g., { "key1" = "value1", "key2
# }
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 1
default_nodepool_vm_type = "m5.large"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "standard"
@@ -48,8 +48,8 @@ node_pools = {
"min_nodes" = 0
"max_nodes" = 5
"node_taints" = ["workload.sas.com/class=cas:NoSchedule"]
- "node_labels" = {
- "workload.sas.com/class" = "cas"
+ "node_labels" = {
+ "workload.sas.com/class" = "cas"
}
"custom_data" = ""
"metadata_http_endpoint" = "enabled"
diff --git a/examples/sample-input.tfvars b/examples/sample-input.tfvars
index 9a3174cd..4655a045 100644
--- a/examples/sample-input.tfvars
+++ b/examples/sample-input.tfvars
@@ -27,12 +27,12 @@ postgres_servers = {
}
## Cluster config
-kubernetes_version = "1.21"
+kubernetes_version = "1.22"
default_nodepool_node_count = 2
default_nodepool_vm_type = "m5.2xlarge"
default_nodepool_custom_data = ""
-## General
+## General
efs_performance_mode = "maxIO"
storage_type = "standard"
diff --git a/locals.tf b/locals.tf
index 7cbae0ee..5571203d 100755
--- a/locals.tf
+++ b/locals.tf
@@ -33,7 +33,7 @@ locals {
# Mapping node_pools to node_groups
default_node_pool = {
default = {
- name = "default"
+ name = "${local.cluster_name}-default"
instance_types = [var.default_nodepool_vm_type]
block_device_mappings = {
xvda = {
@@ -57,7 +57,7 @@ locals {
labels = var.default_nodepool_labels
# User data
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=${replace(replace(jsonencode(var.default_nodepool_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", var.default_nodepool_taints)} ' "
- post_bootstrap_user_data = (var.default_nodepool_custom_data != "" ? file(var.default_nodepool_custom_data) : "")
+ pre_bootstrap_user_data = (var.default_nodepool_custom_data != "" ? file(var.default_nodepool_custom_data) : "")
metadata_options = {
http_endpoint = var.default_nodepool_metadata_http_endpoint
http_tokens = var.default_nodepool_metadata_http_tokens
@@ -74,7 +74,7 @@ locals {
user_node_pool = {
for key, np_value in var.node_pools :
key => {
- name = key
+ name = "${local.cluster_name}-${key}"
instance_types = [np_value.vm_type]
ami_type = np_value.cpu_type
disk_size = np_value.os_disk_size
@@ -101,7 +101,7 @@ locals {
labels = np_value.node_labels
# User data
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=${replace(replace(jsonencode(np_value.node_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", np_value.node_taints)}' "
- post_bootstrap_user_data = (np_value.custom_data != "" ? file(np_value.custom_data) : "")
+ pre_bootstrap_user_data = (np_value.custom_data != "" ? file(np_value.custom_data) : "")
metadata_options = {
http_endpoint = var.default_nodepool_metadata_http_endpoint
http_tokens = var.default_nodepool_metadata_http_tokens
diff --git a/main.tf b/main.tf
index 045b87a5..aead0837 100755
--- a/main.tf
+++ b/main.tf
@@ -54,6 +54,10 @@ EOT
# EKS Provider
provider "kubernetes" {
+ # The endpoint attribute reference from the aws_eks_cluster data source in the line below will
+ # delay the initialization of the k8s provider until the cluster is ready with a defined endpoint value.
+ # It establishes a dependency on the entire EKS cluster being ready and also provides a desired input to
+ # the kubernetes provider.
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(local.kubeconfig_ca_cert)
token = data.aws_eks_cluster_auth.cluster.token
@@ -165,6 +169,15 @@ module "autoscaling" {
oidc_url = module.eks.cluster_oidc_issuer_url
}
+module "ebs" {
+ source = "./modules/aws_ebs_csi"
+
+ prefix = var.prefix
+ cluster_name = local.cluster_name
+ tags = var.tags
+ oidc_url = module.eks.cluster_oidc_issuer_url
+}
+
module "kubeconfig" {
source = "./modules/kubeconfig"
prefix = var.prefix
diff --git a/modules/aws_ebs_csi/main.tf b/modules/aws_ebs_csi/main.tf
new file mode 100644
index 00000000..afcfdd3d
--- /dev/null
+++ b/modules/aws_ebs_csi/main.tf
@@ -0,0 +1,175 @@
+resource "aws_iam_policy" "ebs_csi" {
+ name_prefix = "${var.prefix}-ebs-csi-policy"
+ description = "EKS ebs csi policy for cluster ${var.cluster_name}"
+ tags = var.tags
+
+ policy = < 7,
can(regex("^[^/'\"@]+$", v.administrator_password)),
- ]) : true
+ ]) : true
]) : false : true
error_message = "ERROR: The admin passsword must have more than 8 characters, and be composed of any printable characters except the following / ' \" @ characters."
}
diff --git a/versions.tf b/versions.tf
index 9fb1b15c..88e1874c 100644
--- a/versions.tf
+++ b/versions.tf
@@ -27,7 +27,7 @@ terraform {
}
kubernetes = {
source = "hashicorp/kubernetes"
- version = "2.2.0"
+ version = "2.12.0"
}
tls = {
source = "hashicorp/tls"