diff --git a/README.md b/README.md index fb4deb2..bfb0a81 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,9 @@ Truefoundry EKS Module ## Providers -No providers. +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.57 | ## Modules @@ -22,7 +24,9 @@ No providers. ## Resources -No resources. +| Name | Type | +|------|------| +| [aws_eks_cluster.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | ## Inputs @@ -45,13 +49,17 @@ No resources. | [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `true` | no | | [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `true` | no | | [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[
"0.0.0.0/0"
]
| no | -| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | n/a | yes | +| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. If use\_existing\_cluster is set to true, cluster\_name will be used to fetch details only | `string` | n/a | yes | | [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no | | [cluster\_version](#input\_cluster\_version) | EKS cluster version | `string` | `"1.30"` | no | | [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no | | [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Managed node group defaults | `any` | `{}` | no | | [enable\_cluster\_log](#input\_enable\_cluster\_log) | Enable cluster control plane logs | `bool` | `true` | no | | [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no | +| [existing\_cluster\_node\_role\_arn](#input\_existing\_cluster\_node\_role\_arn) | IAM node role ARN for an existing cluster. This will only be used when use\_existing\_cluster is true | `string` | `""` | no | +| [existing\_cluster\_node\_security\_group\_id](#input\_existing\_cluster\_node\_security\_group\_id) | Node security group for an existing cluster. This will only be used when use\_existing\_cluster is true. | `string` | `""` | no | +| [existing\_cluster\_oidc\_issuer\_arn](#input\_existing\_cluster\_oidc\_issuer\_arn) | OIDC issuer ARN for an existing cluster. This will only be used when use\_existing\_cluster is true. | `string` | `""` | no | +| [existing\_cluster\_oidc\_issuer\_url](#input\_existing\_cluster\_oidc\_issuer\_url) | OIDC issuer URL for an existing cluster. This will only be used when use\_existing\_cluster is true. | `string` | `""` | no | | [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | | [inital\_node\_pool\_capacity\_type](#input\_inital\_node\_pool\_capacity\_type) | capacity type for the initial node pool | `string` | `"SPOT"` | no | | [initial\_node\_pool\_ami\_type](#input\_initial\_node\_pool\_ami\_type) | AMI type for the initial node pool | `string` | `"AL2023_x86_64_STANDARD"` | no | @@ -81,6 +89,7 @@ No resources. | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | `any` | `{}` | no | | [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs where the EKS cluster (ENIs) will be provisioned along with the nodes/node groups. Node groups can be deployed within a different set of subnet IDs from within the node group configuration | `list(string)` | `[]` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | +| [use\_existing\_cluster](#input\_use\_existing\_cluster) | Flag to use an existing cluster. If this is true, a new EKS cluster will not be created | `bool` | `false` | no | | [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster and its nodes will be provisioned | `string` | `null` | no | ## Outputs @@ -110,6 +119,7 @@ No resources. | [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | | [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | | [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | +| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider | | [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | +| [use\_existing\_cluster](#output\_use\_existing\_cluster) | Flag to check if you are using an already existing cluster | \ No newline at end of file diff --git a/data.tf b/data.tf new file mode 100644 index 0000000..8c24aaf --- /dev/null +++ b/data.tf @@ -0,0 +1,7 @@ +data "aws_eks_cluster" "eks_cluster" { + count = var.use_existing_cluster ? 1 : 0 + name = var.cluster_name +} + +# To do +# apply data block for openid_connect_provider to fetch openid arn directly using eks cluster oidc[0].issuer[0].url \ No newline at end of file diff --git a/eks.tf b/eks.tf index 1f21797..0776b85 100644 --- a/eks.tf +++ b/eks.tf @@ -3,6 +3,7 @@ ################################################################################### module "aws-eks-kubernetes-cluster" { + count = var.use_existing_cluster ? 0 : 1 source = "terraform-aws-modules/eks/aws" version = "v20.17.2" cluster_name = var.cluster_name diff --git a/eks_addons.tf b/eks_addons.tf index 5cc844f..bbe629a 100644 --- a/eks_addons.tf +++ b/eks_addons.tf @@ -3,13 +3,14 @@ ################################################################################### module "eks_blueprints_addons" { + count = var.use_existing_cluster ? 0 : 1 source = "aws-ia/eks-blueprints-addons/aws" version = "1.16.3" - cluster_name = module.aws-eks-kubernetes-cluster.cluster_name - cluster_endpoint = module.aws-eks-kubernetes-cluster.cluster_endpoint - cluster_version = module.aws-eks-kubernetes-cluster.cluster_version - oidc_provider_arn = module.aws-eks-kubernetes-cluster.oidc_provider_arn + cluster_name = module.aws-eks-kubernetes-cluster[0].cluster_name + cluster_endpoint = module.aws-eks-kubernetes-cluster[0].cluster_endpoint + cluster_version = module.aws-eks-kubernetes-cluster[0].cluster_version + oidc_provider_arn = module.aws-eks-kubernetes-cluster[0].oidc_provider_arn eks_addons = { coredns = { diff --git a/locals.tf b/locals.tf index 0a90ef9..acfc285 100644 --- a/locals.tf +++ b/locals.tf @@ -104,4 +104,26 @@ locals { } : {}) karpenter_profile_name = "${var.cluster_name}-karpenter" + + // this is used when use_existing_cluster is set to true, so that we don't have to modify eks_managed_node_groups + output_eks_managed_node_groups = tomap({ + "initial" = { + iam_role_arn = var.existing_cluster_node_role_arn + launch_template_id = "" + autoscaling_group_schedule_arns = {} + iam_role_name = "" + iam_role_unique_id = "" + launch_template_arn = "" + launch_template_latest_version = 0 + launch_template_name = "" + node_group_arn = "" + node_group_autoscaling_group_names = [] + node_group_id = "" + node_group_labels = {} + node_group_resources = [] + node_group_status = "" + node_group_taints = [] + platform = "" + } + }) } \ No newline at end of file diff --git a/output.tf b/output.tf index 483878a..72ed665 100644 --- a/output.tf +++ b/output.tf @@ -3,50 +3,55 @@ ################################################################################ # Cluster ################################################################################ +output "use_existing_cluster" { + description = "Flag to check if you are using an already existing cluster" + value = var.use_existing_cluster +} output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster" - value = module.aws-eks-kubernetes-cluster.cluster_arn + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].arn : module.aws-eks-kubernetes-cluster[0].cluster_arn } output "cluster_certificate_authority_data" { description = "Base64 encoded certificate data required to communicate with the cluster" - value = module.aws-eks-kubernetes-cluster.cluster_certificate_authority_data + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].certificate_authority[0].data : module.aws-eks-kubernetes-cluster[0].cluster_certificate_authority_data } output "cluster_endpoint" { description = "Endpoint for your Kubernetes API server" - value = module.aws-eks-kubernetes-cluster.cluster_endpoint + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].endpoint : module.aws-eks-kubernetes-cluster[0].cluster_endpoint } output "cluster_id" { description = "DEPRECATED - Use cluster_name" - value = module.aws-eks-kubernetes-cluster.cluster_name + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].id : module.aws-eks-kubernetes-cluster[0].cluster_name } output "cluster_name" { description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.aws-eks-kubernetes-cluster.cluster_name + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].name : module.aws-eks-kubernetes-cluster[0].cluster_name } output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = module.aws-eks-kubernetes-cluster.cluster_oidc_issuer_url + # value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].identity[0].oidc[0].issuer : module.aws-eks-kubernetes-cluster[0].cluster_oidc_issuer_url + value = var.use_existing_cluster ? var.existing_cluster_oidc_issuer_url : module.aws-eks-kubernetes-cluster[0].cluster_oidc_issuer_url } output "cluster_platform_version" { description = "Platform version for the cluster" - value = module.aws-eks-kubernetes-cluster.cluster_platform_version + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].platform_version : module.aws-eks-kubernetes-cluster[0].cluster_platform_version } output "cluster_status" { description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = module.aws-eks-kubernetes-cluster.cluster_status + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].status : module.aws-eks-kubernetes-cluster[0].cluster_status } output "cluster_primary_security_group_id" { description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = module.aws-eks-kubernetes-cluster.cluster_primary_security_group_id + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].vpc_config[0].cluster_security_group_id : module.aws-eks-kubernetes-cluster[0].cluster_primary_security_group_id } ################################################################################ @@ -55,12 +60,12 @@ output "cluster_primary_security_group_id" { output "cluster_security_group_arn" { description = "Amazon Resource Name (ARN) of the cluster security group" - value = module.aws-eks-kubernetes-cluster.cluster_security_group_arn + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].cluster_security_group_arn } output "cluster_security_group_id" { description = "ID of the cluster security group" - value = module.aws-eks-kubernetes-cluster.cluster_security_group_id + value = var.use_existing_cluster ? data.aws_eks_cluster.eks_cluster[0].vpc_config[0].cluster_security_group_id : module.aws-eks-kubernetes-cluster[0].cluster_security_group_id } ################################################################################ @@ -69,12 +74,12 @@ output "cluster_security_group_id" { output "node_security_group_arn" { description = "Amazon Resource Name (ARN) of the node shared security group" - value = module.aws-eks-kubernetes-cluster.node_security_group_arn + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].node_security_group_arn } output "node_security_group_id" { description = "ID of the node shared security group" - value = module.aws-eks-kubernetes-cluster.node_security_group_id + value = var.use_existing_cluster ? var.existing_cluster_node_security_group_id : module.aws-eks-kubernetes-cluster[0].node_security_group_id } ################################################################################ @@ -82,8 +87,8 @@ output "node_security_group_id" { ################################################################################ output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = module.aws-eks-kubernetes-cluster.oidc_provider_arn + description = "The ARN of the OIDC Provider" + value = var.use_existing_cluster ? var.existing_cluster_oidc_issuer_arn : module.aws-eks-kubernetes-cluster[0].oidc_provider_arn } ################################################################################ @@ -92,17 +97,17 @@ output "oidc_provider_arn" { output "cluster_iam_role_name" { description = "IAM role name of the EKS cluster" - value = module.aws-eks-kubernetes-cluster.cluster_iam_role_name + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].cluster_iam_role_name } output "cluster_iam_role_arn" { description = "IAM role ARN of the EKS cluster" - value = module.aws-eks-kubernetes-cluster.cluster_iam_role_arn + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].cluster_iam_role_arn } output "cluster_iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = module.aws-eks-kubernetes-cluster.cluster_iam_role_unique_id + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].cluster_iam_role_unique_id } ################################################################################ @@ -111,7 +116,7 @@ output "cluster_iam_role_unique_id" { output "cluster_addons" { description = "Map of attribute maps for all EKS cluster addons enabled" - value = module.eks_blueprints_addons.eks_addons + value = var.use_existing_cluster ? {} : module.eks_blueprints_addons[0].eks_addons } ################################################################################ @@ -120,7 +125,7 @@ output "cluster_addons" { output "cluster_identity_providers" { description = "Map of attribute maps for all EKS identity providers enabled" - value = module.aws-eks-kubernetes-cluster.cluster_identity_providers + value = var.use_existing_cluster ? {} : module.aws-eks-kubernetes-cluster[0].cluster_identity_providers sensitive = true } @@ -130,12 +135,12 @@ output "cluster_identity_providers" { output "cloudwatch_log_group_name" { description = "Name of cloudwatch log group created" - value = module.aws-eks-kubernetes-cluster.cloudwatch_log_group_name + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].cloudwatch_log_group_name } output "cloudwatch_log_group_arn" { description = "Arn of cloudwatch log group created" - value = module.aws-eks-kubernetes-cluster.cloudwatch_log_group_arn + value = var.use_existing_cluster ? "" : module.aws-eks-kubernetes-cluster[0].cloudwatch_log_group_arn } ################################################################################ @@ -144,7 +149,7 @@ output "cloudwatch_log_group_arn" { output "fargate_profiles" { description = "Map of attribute maps for all EKS Fargate Profiles created" - value = module.aws-eks-kubernetes-cluster.fargate_profiles + value = var.use_existing_cluster ? {} : module.aws-eks-kubernetes-cluster[0].fargate_profiles } ################################################################################ @@ -153,7 +158,7 @@ output "fargate_profiles" { output "eks_managed_node_groups" { description = "Map of attribute maps for all EKS managed node groups created" - value = module.aws-eks-kubernetes-cluster.eks_managed_node_groups + value = var.use_existing_cluster ? local.output_eks_managed_node_groups : module.aws-eks-kubernetes-cluster[0].eks_managed_node_groups } ################################################################################ @@ -162,7 +167,7 @@ output "eks_managed_node_groups" { output "self_managed_node_groups" { description = "Map of attribute maps for all self managed node groups created" - value = module.aws-eks-kubernetes-cluster.self_managed_node_groups + value = var.use_existing_cluster ? {} : module.aws-eks-kubernetes-cluster[0].self_managed_node_groups } ################################################################################ @@ -171,5 +176,5 @@ output "self_managed_node_groups" { output "aws_access_entries" { description = "Access entries for the EKS cluster security group" - value = module.aws-eks-kubernetes-cluster.access_entries + value = var.use_existing_cluster ? {} : module.aws-eks-kubernetes-cluster[0].access_entries } \ No newline at end of file diff --git a/scripts/migration-script-7.sh b/scripts/migration-script-7.sh new file mode 100644 index 0000000..bbd8925 --- /dev/null +++ b/scripts/migration-script-7.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_ec2_tag.cluster_primary_security_group["cluster-name"]' 'module.aws-eks-kubernetes-cluster[0].aws_ec2_tag.cluster_primary_security_group["cluster-name"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_ec2_tag.cluster_primary_security_group["terraform"]' 'module.aws-eks-kubernetes-cluster[0].aws_ec2_tag.cluster_primary_security_group["terraform"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_ec2_tag.cluster_primary_security_group["terraform-module"]' 'module.aws-eks-kubernetes-cluster[0].aws_ec2_tag.cluster_primary_security_group["terraform-module"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_eks_access_entry.this["cluster_creator"]' 'module.aws-eks-kubernetes-cluster[0].aws_eks_access_entry.this["cluster_creator"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_eks_access_policy_association.this["cluster_creator_admin"]' 'module.aws-eks-kubernetes-cluster[0].aws_eks_access_policy_association.this["cluster_creator_admin"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_eks_cluster.this[0]' 'module.aws-eks-kubernetes-cluster[0].aws_eks_cluster.this[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_iam_openid_connect_provider.oidc_provider[0]' 'module.aws-eks-kubernetes-cluster[0].aws_iam_openid_connect_provider.oidc_provider[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_iam_policy.cluster_encryption[0]' 'module.aws-eks-kubernetes-cluster[0].aws_iam_policy.cluster_encryption[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_iam_role.this[0]' ' module.aws-eks-kubernetes-cluster[0].aws_iam_role.this[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_iam_role_policy_attachment.cluster_encryption[0]' 'module.aws-eks-kubernetes-cluster[0].aws_iam_role_policy_attachment.cluster_encryption[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"]' 'module.aws-eks-kubernetes-cluster[0].aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"]' 'module.aws-eks-kubernetes-cluster[0].aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group.cluster[0]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group.cluster[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group.node[0]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group.node[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.cluster["egress_nodes_ephemeral_ports_tcp"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.cluster["egress_nodes_ephemeral_ports_tcp"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.cluster["ingress_nodes_443"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.cluster["ingress_nodes_443"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["egress_all"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["egress_all"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_cluster_443"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_cluster_443"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_cluster_4443_webhook"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_cluster_4443_webhook"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_cluster_6443_webhook"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_cluster_6443_webhook"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_cluster_8443_webhook"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_cluster_8443_webhook"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_cluster_9443_webhook"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_cluster_9443_webhook"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_cluster_kubelet"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_cluster_kubelet"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_control_plane_all"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_control_plane_all"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_nodes_ephemeral"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_nodes_ephemeral"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_self_all"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_self_all"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_self_coredns_tcp"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_self_coredns_tcp"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["ingress_self_coredns_udp"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["ingress_self_coredns_udp"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["metrics_server_10250_eg"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["metrics_server_10250_eg"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.aws_security_group_rule.node["metrics_server_10250_ing"]' 'module.aws-eks-kubernetes-cluster[0].aws_security_group_rule.node["metrics_server_10250_ing"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.time_sleep.this[0]' 'module.aws-eks-kubernetes-cluster[0].time_sleep.this[0]' +# addons +terraform state mv 'module.eks_blueprints_addons.aws_eks_addon.this["coredns"]' 'module.eks_blueprints_addons[0].aws_eks_addon.this["coredns"]' +terraform state mv 'module.eks_blueprints_addons.aws_eks_addon.this["eks-pod-identity-agent"]' 'module.eks_blueprints_addons[0].aws_eks_addon.this["eks-pod-identity-agent"]' +terraform state mv 'module.eks_blueprints_addons.aws_eks_addon.this["kube-proxy"]' 'module.eks_blueprints_addons[0].aws_eks_addon.this["kube-proxy"]' +terraform state mv 'module.eks_blueprints_addons.aws_eks_addon.this["vpc-cni"]' 'module.eks_blueprints_addons[0].aws_eks_addon.this["vpc-cni"]' +terraform state mv 'module.eks_blueprints_addons.time_sleep.this' 'module.eks_blueprints_addons[0].time_sleep.this' +# node group +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_eks_node_group.this[0]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_eks_node_group.this[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_iam_role.this[0]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_iam_role.this[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.additional["karpenter"]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.additional["karpenter"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].aws_launch_template.this[0]' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].aws_launch_template.this[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.kms.aws_kms_alias.this["cluster"]' 'module.aws-eks-kubernetes-cluster[0].module.kms.aws_kms_alias.this["cluster"]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.kms.aws_kms_key.this[0]' 'module.aws-eks-kubernetes-cluster[0].module.kms.aws_kms_key.this[0]' +terraform state mv 'module.aws-eks-kubernetes-cluster.module.eks_managed_node_group["initial"].module.user_data.null_resource.validate_cluster_service_cidr' 'module.aws-eks-kubernetes-cluster[0].module.eks_managed_node_group["initial"].module.user_data.null_resource.validate_cluster_service_cidr' \ No newline at end of file diff --git a/upgrade-guide.md b/upgrade-guide.md index 8943ea3..30a7c45 100644 --- a/upgrade-guide.md +++ b/upgrade-guide.md @@ -1,6 +1,21 @@ # terraform-aws-truefoundry-cluster This guide will help you to migrate your terraform code across versions. Keeping your terraform state to the latest version is always recommeneded +## Upgrade guide from 0.6.x to 0.7.x +1. Ensure that you are running on the latest version of 0.6.x +2. Move to `0.7.0` and run the following command + ```bash + terraform init -upgrade + ``` +3. Run the migration script present in the `scripts/` directory + ```bash + bash migration-script-7.sh + ``` +7. Run terraform plan to check if there are is diff + ```bash + terraform plan + ``` + ## Upgrade guide from 0.5.x to 0.6.x ### Pre-requisites @@ -9,8 +24,10 @@ This guide will help you to migrate your terraform code across versions. Keeping ## Upgrade changes (manual) 1. Execute the terraform apply with version `0.6.1`. If it fails run the below command to import access entry for cluster creator -``` -terragrunt import 'module.aws-eks-kubernetes-cluster.aws_eks_access_entry.this["cluster_creator"]' "$IAM_PRINCIPAL_ARN" -terragrunt import 'module.aws-eks-kubernetes-cluster.aws_eks_access_policy_association.this["cluster_creator_admin"]' $CLUSTER_NAME#$IAM_PRINCIPAL_ARN#arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy -``` + + ```bash + terragrunt import 'module.aws-eks-kubernetes-cluster.aws_eks_access_entry.this["cluster_creator"]' "$IAM_PRINCIPAL_ARN" + terragrunt import 'module.aws-eks-kubernetes-cluster.aws_eks_access_policy_association.this["cluster_creator_admin"]' $CLUSTER_NAME#$IAM_PRINCIPAL_ARN#arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy + ``` + 2. If you still face any issue, go ahead and delete the access entry created in the EKS console Access tab and then run `terraform apply` \ No newline at end of file diff --git a/variables.tf b/variables.tf index eaae62a..9d7030f 100644 --- a/variables.tf +++ b/variables.tf @@ -1,11 +1,64 @@ # From https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/variables.tf +################################################################################ +# Existing cluster +################################################################################ +variable "use_existing_cluster" { + description = "Flag to use an existing cluster. If this is true, a new EKS cluster will not be created" + default = false + type = bool +} + +variable "existing_cluster_node_role_arn" { + description = "IAM node role ARN for an existing cluster. This will only be used when use_existing_cluster is true" + default = "" + type = string + + validation { + condition = var.use_existing_cluster == false || var.existing_cluster_node_role_arn != "" + error_message = "existing_cluster_node_role_arn must be non-empty if use_existing_cluster is true." + } +} + +variable "existing_cluster_node_security_group_id" { + description = "Node security group for an existing cluster. This will only be used when use_existing_cluster is true." + default = "" + type = string + + validation { + condition = var.use_existing_cluster == false || var.existing_cluster_node_security_group_id != "" + error_message = "existing_cluster_node_security_group_id must be non-empty if use_existing_cluster is true." + } +} + +variable "existing_cluster_oidc_issuer_arn" { + description = "OIDC issuer ARN for an existing cluster. This will only be used when use_existing_cluster is true." + default = "" + type = string + + validation { + condition = var.use_existing_cluster == false || var.existing_cluster_oidc_issuer_arn != "" + error_message = "existing_cluster_oidc_issuer_arn must be non-empty if use_existing_cluster is true." + } +} + +variable "existing_cluster_oidc_issuer_url" { + description = "OIDC issuer URL for an existing cluster. This will only be used when use_existing_cluster is true." + default = "" + type = string + + validation { + condition = var.use_existing_cluster == false || var.existing_cluster_oidc_issuer_url != "" + error_message = "existing_cluster_oidc_issuer_url must be non-empty if use_existing_cluster is true." + } +} + ################################################################################ # Cluster ################################################################################ variable "cluster_name" { - description = "Name of the EKS cluster" + description = "Name of the EKS cluster. If use_existing_cluster is set to true, cluster_name will be used to fetch details only" type = string }