diff --git a/eks-cluster.cfhighlander.rb b/eks-cluster.cfhighlander.rb index 044e342..199097e 100644 --- a/eks-cluster.cfhighlander.rb +++ b/eks-cluster.cfhighlander.rb @@ -19,8 +19,19 @@ ComponentParam 'DesiredCapacity', '1' ComponentParam 'MinSize', '1' ComponentParam 'MaxSize', '2' - end - LambdaFunctions 'draining_lambda' + fargate_profiles.each do |profile| + name = profile['name'].gsub('-','').gsub('_','').capitalize + ComponentParam "#{name}FargateProfileName", '' + end if defined? fargate_profiles + + if defined?(managed_node_group['enabled']) && managed_node_group['enabled'] + ComponentParam 'ForceUpdateEnabled', false + ComponentParam 'InstanceTypes', '' + end + + end + + LambdaFunctions 'draining_lambda' if !defined?(managed_node_group['enabled']) end diff --git a/eks-cluster.cfndsl.rb b/eks-cluster.cfndsl.rb index f6a1287..66d2119 100644 --- a/eks-cluster.cfndsl.rb +++ b/eks-cluster.cfndsl.rb @@ -5,7 +5,7 @@ tags = [] extra_tags = external_parameters.fetch(:extra_tags, {}) - extra_tags.each { |key,value| tags << { Key: FnSub(key), Value: FnSub(value) } } + extra_tags.each { |key,value| tags << { Key: key, Value: FnSub(value) } } IAM_Role(:EksClusterRole) { AssumeRolePolicyDocument service_assume_role_policy('eks') @@ -16,26 +16,36 @@ ]) } - AutoScaling_LifecycleHook(:DrainingLifecycleHook) { - AutoScalingGroupName Ref('EksNodeAutoScalingGroup') - HeartbeatTimeout 450 - LifecycleTransition 'autoscaling:EC2_INSTANCE_TERMINATING' - } + fargate_profiles = external_parameters.fetch(:fargate_profiles, []) - Lambda_Permission(:DrainingLambdaPermission) { - Action 'lambda:InvokeFunction' - FunctionName FnGetAtt('Drainer', 'Arn') - Principal 'events.amazonaws.com' - SourceArn FnGetAtt('LifecycleEvent', 'Arn') - } + IAM_Role(:PodExecutionRoleArn) { + AssumeRolePolicyDocument service_assume_role_policy('eks-fargate-pods') + Path '/' + ManagedPolicyArns([ + 'arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy' + ]) + } unless fargate_profiles == [] - draining_lambda = external_parameters[:draining_lambda] - Events_Rule(:LifecycleEvent) { - Description FnSub("Rule for ${EnvironmentName} eks draining lifecycle hook") - State 'ENABLED' - EventPattern draining_lambda['event']['pattern'] - Targets draining_lambda['event']['targets'] - } + fargate_profiles.each do |profile| + name = profile['name'].gsub('-','').gsub('_','').capitalize + unless profile.has_key?('selectors') + raise ArgumentError, "Selectors must be defined for fargate profiles" + end + Condition("#{name}FargateProfileNameSet", FnNot(FnEquals(Ref("#{name}FargateProfileName"), ''))) + Resource("#{name}FargateProfile") do + Type 'AWS::EKS::FargateProfile' + Property('ClusterName', Ref(:EksCluster)) + Property('FargateProfileName', + FnIf("#{name}FargateProfileNameSet", + Ref("#{name}FargateProfileName"), + FnSub("${EnvironmentName}-#{name}-fargate-profile")) + ) + Property('PodExecutionRoleArn', FnGetAtt(:PodExecutionRoleArn, :Arn)) + Property('Subnets', FnSplit(',', Ref('SubnetIds'))) + Property('Tags', [{ Key: 'Name', Value: FnSub("${EnvironmentName}-#{name}-fargate-profile")}] + tags) + Property('Selectors', profile['selectors']) + end + end EC2_SecurityGroup(:EksClusterSecurityGroup) { VpcId Ref('VPCId') @@ -151,88 +161,149 @@ Roles [Ref(:EksNodeRole)] end - # Setup userdata string - node_userdata = "#!/bin/bash\nset -o xtrace\n" - node_userdata << external_parameters.fetch(:eks_bootstrap, '') - node_userdata << userdata = external_parameters.fetch(:userdata, '') - node_userdata << cfnsignal = external_parameters.fetch(:cfnsignal, '') - - launch_template_tags = [ - { Key: 'Name', Value: FnSub("${EnvironmentName}-eks-node-xx") }, - { Key: FnSub("kubernetes.io/cluster/${EksCluster}"), Value: 'owned' } - ] - launch_template_tags += tags - - template_data = { - SecurityGroupIds: [ Ref(:EksNodeSecurityGroup) ], - TagSpecifications: [ - { ResourceType: 'instance', Tags: launch_template_tags }, - { ResourceType: 'volume', Tags: launch_template_tags } - ], - UserData: FnBase64(FnSub(node_userdata)), - IamInstanceProfile: { Name: Ref(:EksNodeInstanceProfile) }, - KeyName: FnIf('KeyNameSet', Ref('KeyName'), Ref('AWS::NoValue')), - ImageId: Ref('ImageId'), - Monitoring: { Enabled: detailed_monitoring }, - InstanceType: Ref('InstanceType') - } + managed_node_group = external_parameters.fetch(:managed_node_group, {}) + managed_node_group_use_launch_template = managed_node_group['launch_template'] ? managed_node_group['launch_template'] : false + if !managed_node_group['enabled'] || managed_node_group_use_launch_template + # Setup userdata string + node_userdata = "#!/bin/bash\nset -o xtrace\n" + node_userdata << external_parameters.fetch(:eks_bootstrap, '') + node_userdata << userdata = external_parameters.fetch(:userdata, '') + node_userdata << cfnsignal = external_parameters.fetch(:cfnsignal, '') - spot = external_parameters.fetch(:spot, {}) - unless spot.empty? - spot_options = { - MarketType: 'spot', - SpotOptions: { - SpotInstanceType: (defined?(spot['type']) ? spot['type'] : 'one-time'), - MaxPrice: FnSub(spot['price']) - } + launch_template_tags = [ + { Key: 'Name', Value: FnSub("${EnvironmentName}-eks-node-xx") }, + { Key: FnSub("kubernetes.io/cluster/${EksCluster}"), Value: 'owned' } + ] + launch_template_tags += tags + + template_data = { + SecurityGroupIds: [ Ref(:EksNodeSecurityGroup) ], + TagSpecifications: [ + { ResourceType: 'instance', Tags: launch_template_tags }, + { ResourceType: 'volume', Tags: launch_template_tags } + ], + UserData: FnBase64(FnSub(node_userdata)), + IamInstanceProfile: { Name: Ref(:EksNodeInstanceProfile) }, + KeyName: FnIf('KeyNameSet', Ref('KeyName'), Ref('AWS::NoValue')), + ImageId: Ref('ImageId'), + Monitoring: { Enabled: detailed_monitoring }, + InstanceType: Ref('InstanceType') } - template_data[:InstanceMarketOptions] = FnIf('SpotEnabled', spot_options, Ref('AWS::NoValue')) + + spot = external_parameters.fetch(:spot, {}) + unless spot.empty? + spot_options = { + MarketType: 'spot', + SpotOptions: { + SpotInstanceType: (defined?(spot['type']) ? spot['type'] : 'one-time'), + MaxPrice: FnSub(spot['price']) + } + } + template_data[:InstanceMarketOptions] = FnIf('SpotEnabled', spot_options, Ref('AWS::NoValue')) + + end + # Remove options that are not allowed with node groups if we specify our own launch template + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-nodegroup-launchtemplatespecification.html + [:InstanceMarketOptions, :IamInstanceProfile].each {|k| template_data.delete(k) if template_data.has_key?(k)} if managed_node_group_use_launch_template + + EC2_LaunchTemplate(:EksNodeLaunchTemplate) { + LaunchTemplateData(template_data) + } end - EC2_LaunchTemplate(:EksNodeLaunchTemplate) { - LaunchTemplateData(template_data) - } + if managed_node_group['enabled'] + node_group_tags = [{ Key: 'Name', Value: FnSub("${EnvironmentName}-eks-managed-node-group")}] + tags + Condition("InstancesSpecified", FnNot(FnEquals(Ref('InstanceTypes'), ''))) + Resource(:ManagedNodeGroup) do + Type 'AWS::EKS::Nodegroup' + Property('ClusterName', Ref(:EksCluster)) + Property('NodegroupName', FnSub(managed_node_group['name'])) if managed_node_group.has_key?('name') + Property('NodeRole', FnGetAtt(:EksNodeRole, :Arn)) + Property('Subnets', FnSplit(',', Ref('SubnetIds'))) + Property('Tags', Hash[node_group_tags.collect {|obj| [obj[:Key], obj[:Value]]}]) + Property('DiskSize', managed_node_group['disk_size']) if managed_node_group.has_key?('disk_size') && !managed_node_group_use_launch_template + Property('LaunchTemplate', { + Id: Ref(:EksNodeLaunchTemplate), + Version: FnGetAtt(:EksNodeLaunchTemplate, :LatestVersionNumber) + }) if managed_node_group_use_launch_template + Property('ForceUpdateEnabled', Ref(:ForceUpdateEnabled)) + Property('InstanceTypes', FnIf('InstancesSpecified', Ref('InstanceTypes'), Ref('AWS::NoValue'))) #Default is t3.medium + Property('ScalingConfig', { + DesiredSize: Ref('DesiredCapacity'), + MinSize: Ref('MinSize'), + MaxSize: Ref('MaxSize') + }) + Property('Labels', managed_node_group['labels']) if managed_node_group.has_key?('labels') + end + else + AutoScaling_LifecycleHook(:DrainingLifecycleHook) { + AutoScalingGroupName Ref('EksNodeAutoScalingGroup') + HeartbeatTimeout 450 + LifecycleTransition 'autoscaling:EC2_INSTANCE_TERMINATING' + } - asg_tags = [ - { Key: FnSub("k8s.io/cluster/${EksCluster}"), Value: 'owned' }, - { Key: 'k8s.io/cluster-autoscaler/enabled', Value: Ref('EnableScaling') } - ] - asg_tags = tags.clone.map(&:clone).concat(asg_tags).uniq.each {|tag| tag[:PropagateAtLaunch] = false } - AutoScaling_AutoScalingGroup(:EksNodeAutoScalingGroup) { - UpdatePolicy(:AutoScalingRollingUpdate, { - MaxBatchSize: '1', - MinInstancesInService: FnIf('SpotEnabled', 0, Ref('DesiredCapacity')), - SuspendProcesses: %w(HealthCheck ReplaceUnhealthy AZRebalance AlarmNotification ScheduledActions), - PauseTime: 'PT5M' - }) - DesiredCapacity Ref('DesiredCapacity') - MinSize Ref('MinSize') - MaxSize Ref('MaxSize') - VPCZoneIdentifiers FnSplit(',', Ref('SubnetIds')) - LaunchTemplate({ - LaunchTemplateId: Ref(:EksNodeLaunchTemplate), - Version: FnGetAtt(:EksNodeLaunchTemplate, :LatestVersionNumber) - }) - Tags asg_tags - } + Lambda_Permission(:DrainingLambdaPermission) { + Action 'lambda:InvokeFunction' + FunctionName FnGetAtt('Drainer', 'Arn') + Principal 'events.amazonaws.com' + SourceArn FnGetAtt('LifecycleEvent', 'Arn') + } + + draining_lambda = external_parameters[:draining_lambda] + Events_Rule(:LifecycleEvent) { + Description FnSub("Rule for ${EnvironmentName} eks draining lifecycle hook") + State 'ENABLED' + EventPattern draining_lambda['event']['pattern'] + Targets draining_lambda['event']['targets'] + } + + Output(:DrainingLambdaRole) { + Value(FnGetAtt(:LambdaRoleDraining, :Arn)) + Export FnSub("${EnvironmentName}-#{external_parameters[:component_name]}-DrainingLambdaRole") + } + + asg_tags = [ + { Key: FnSub("k8s.io/cluster/${EksCluster}"), Value: 'owned' }, + { Key: 'k8s.io/cluster-autoscaler/enabled', Value: Ref('EnableScaling') } + ] + asg_tags = tags.clone.map(&:clone).concat(asg_tags).uniq.each {|tag| tag[:PropagateAtLaunch] = false } + AutoScaling_AutoScalingGroup(:EksNodeAutoScalingGroup) { + UpdatePolicy(:AutoScalingRollingUpdate, { + MaxBatchSize: '1', + MinInstancesInService: FnIf('SpotEnabled', 0, Ref('DesiredCapacity')), + SuspendProcesses: %w(HealthCheck ReplaceUnhealthy AZRebalance AlarmNotification ScheduledActions), + PauseTime: 'PT5M' + }) + DesiredCapacity Ref('DesiredCapacity') + MinSize Ref('MinSize') + MaxSize Ref('MaxSize') + VPCZoneIdentifiers FnSplit(',', Ref('SubnetIds')) + LaunchTemplate({ + LaunchTemplateId: Ref(:EksNodeLaunchTemplate), + Version: FnGetAtt(:EksNodeLaunchTemplate, :LatestVersionNumber) + }) + Tags asg_tags + } + end Output(:EksNodeSecurityGroup) { Value(Ref(:EksNodeSecurityGroup)) } - Output(:EksClusterName) { - Value(Ref(:EksCluster)) + Output(:EksClusterSecurityGroup) { + Value(Ref(:EksClusterSecurityGroup)) } - Output(:DrainingLambdaRole) { - Value(FnGetAtt(:LambdaRoleDraining, :Arn)) + Output(:EksClusterName) { + Value(Ref(:EksCluster)) } Output(:EksNodeRole) { Value(FnGetAtt(:EksNodeRole, :Arn)) + Export FnSub("${EnvironmentName}-#{external_parameters[:component_name]}-EksNodeRole") } end diff --git a/tests/fargate.test.yaml b/tests/fargate.test.yaml new file mode 100644 index 0000000..01a895c --- /dev/null +++ b/tests/fargate.test.yaml @@ -0,0 +1,31 @@ +test_metadata: + type: config + name: fargate + description: test for creating a fargate profile + +cluster_name: ${EnvironmentName}-Cluster +eks_version: 1.18 + +fargate_profiles: + - + name: batch + selectors: + - + Namespace: + Fn::Sub: ${EnvironmentName} + Labels: + - + Key: MyKey + Value: MyValue + +extra_tags: + Cluster: ${EnvironmentName}-Cluster + +iam: + services: + - ec2 + - ssm + policies: + ssm_get_parameters: + action: + - ssm:GetParametersByPath diff --git a/tests/managed_node_group.test.yaml b/tests/managed_node_group.test.yaml new file mode 100644 index 0000000..ddd57af --- /dev/null +++ b/tests/managed_node_group.test.yaml @@ -0,0 +1,29 @@ +test_metadata: + type: config + name: managed_node_group + description: test for creating a fargate profile + +cluster_name: ${EnvironmentName}-Cluster +eks_version: 1.18 + +managed_node_group: + name: my-node-group + enabled: true + disk_size: 40 + labels: + Key1: Value1 + Key2: + Fn::Sub: Value2 + + +extra_tags: + Cluster: ${EnvironmentName}-Cluster + +iam: + services: + - ec2 + - ssm + policies: + ssm_get_parameters: + action: + - ssm:GetParametersByPath diff --git a/tests/managed_node_group_launch_template.test.yaml b/tests/managed_node_group_launch_template.test.yaml new file mode 100644 index 0000000..e5ee564 --- /dev/null +++ b/tests/managed_node_group_launch_template.test.yaml @@ -0,0 +1,30 @@ +test_metadata: + type: config + name: managed_node_group_with_launch_template + description: test for creating a fargate profile + +cluster_name: ${EnvironmentName}-Cluster +eks_version: 1.18 + +managed_node_group: + name: my-node-group + enabled: true + launch_template: true + + +spot: + type: persistent + price: ${SpotPrice} + + +extra_tags: + Cluster: ${EnvironmentName}-Cluster + +iam: + services: + - ec2 + - ssm + policies: + ssm_get_parameters: + action: + - ssm:GetParametersByPath