-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdelete-cluster.sh
executable file
·166 lines (121 loc) · 5.1 KB
/
delete-cluster.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#!/bin/bash
# Prerequisites (macOS):
# - aws cli => to create AWS resources
# => pip install --upgrade --user awscli
# => aws configure
# - jq => to parse JSON results returned by the AWS CLI
# => brew install jq
# - chronic => to suppress output unless there's a non-zero exit code
# => brew install moreutils
# - terraform => to destroy the DB on AWS
# => brew install terraform
# - kops => to create the actual kubernetes cluster
# => brew install kops
export PREFIX="chapati"
export URL="example.com"
echo "⚠️ WARNING ⚠️"
echo "You're about to delete a cluster"
echo "Please confirm, that you know what you're doing by writing 'i know what i am doing'"
read CONFIRM
if [ "$CONFIRM" = "i know what i am doing" ]
then
echo "No risk no fun, huh? Alright, here we go…"
else
echo "Cool, it's probably better this way :)"
exit 1
fi
printf "\n"
####################
# SPECIFY CLUSTER #
###################
printf "1️⃣ Please specify the cluster name (e.g. 'canary', or 'dev'): "
read CLUSTER_NAME
if [ "$CLUSTER_NAME" != "canary" ] && [ "$CLUSTER_NAME" != "dev" ]
then
echo "Sorry, but I can only help you with the 'dev' and 'canary' clusters right now"
exit 1
fi
printf "\n"
###############################
# CLEAN UP ROUTE 53 RESOURCES #
###############################
echo "2️⃣ Clean Up Route53 Resources"
printf " What's the subdomain associated with this cluster?\n"
printf " e.g. 'development' or 'canary': "
read SUBDOMAIN
export CLUSTER_FQDN="$SUBDOMAIN.$URL"
export MAIN_HOSTED_ZONE_ID=$(aws route53 list-hosted-zones | jq -r --arg url $(echo "$URL.") '.HostedZones[] | select(.Name == $url) | .Id')
export CLUSTER_HOSTED_ZONE_ID=$(aws route53 list-hosted-zones | jq -r --arg url "$CLUSTER_FQDN." '.HostedZones[] | select(.Name == $url) | .Id')
export CLUSTER_NS_RECORDS=$(aws route53 list-resource-record-sets --hosted-zone-id $MAIN_HOSTED_ZONE_ID | jq -r --arg url "$CLUSTER_FQDN." '.ResourceRecordSets[] | select(.Name == $url) | .ResourceRecords')
export CLUSTER_A_RECORD=$(aws route53 list-resource-record-sets --hosted-zone-id $CLUSTER_HOSTED_ZONE_ID | jq -r --arg url "$CLUSTER_FQDN." '.ResourceRecordSets[] | select(.Type == "A") | .')
printf "\n"
printf " a) Deleting NS records associated with cluster from main hosted zone…"
envsubst < templates/route53-delete-cluster-ns-records-from-main-hosted-zone.template.json > delete-cluster-ns-records-from-main-hosted-zone.json
chronic aws route53 change-resource-record-sets --hosted-zone-id $MAIN_HOSTED_ZONE_ID --change-batch file://delete-cluster-ns-records-from-main-hosted-zone.json
chronic rm delete-cluster-ns-records-from-main-hosted-zone.json
printf " ✅ \n"
printf " b) Deleting A record from hosted zone…"
envsubst < templates/route53-delete-cluster-records-from-hosted-zone.template.json > delete-cluster-records-from-hosted-zone.json
chronic aws route53 change-resource-record-sets --hosted-zone-id $CLUSTER_HOSTED_ZONE_ID --change-batch file://delete-cluster-records-from-hosted-zone.json
chronic rm delete-cluster-records-from-hosted-zone.json
printf " ✅ \n"
printf " c) Deleting hosted zone itself…"
chronic aws route53 delete-hosted-zone --id $CLUSTER_HOSTED_ZONE_ID
printf " ✅ \n"
printf "\n"
###################
# DELETE DATABASE #
###################
echo "3️⃣ Delete Database"
DB_CONFIG_BUCKET=$PREFIX.$CLUSTER_NAME.db.terraform.config
printf " a) Fetching DB terraform state from S3…"
chronic aws s3 cp s3://$DB_CONFIG_BUCKET/terraform.tfstate.enc .
printf " ✅ \n"
printf " b) Decrypting DB terraform state…"
chronic openssl enc -d -aes-256-cbc -salt -in terraform.tfstate.enc -out terraform.tfstate
echo " c) Destroying DB…"
terraform destroy
printf " d) Deleting terraform config S3 bucket…"
./helper/s3-delete-buckets.sh $DB_CONFIG_BUCKET
printf " ✅ \n"
printf " e) Clean up temp files…"
rm terraform.tfstate*
printf " ✅ \n"
printf "\n"
#######################
# DELETE KOPS CLUSTER #
#######################
echo "4️⃣ Delete kops cluster"
CLUSTER_URL="k8-$CLUSTER_NAME.$URL"
printf " a) Detaching autoscaling policy…"
ASG_NAME="nodes.$CLUSTER_URL"
ASG_POLICY_NAME=aws-cluster-autoscaler
ASG_POLICY_ARN=$(aws iam list-policies | jq -r --arg policy $ASG_POLICY_NAME '.Policies[] | select(.PolicyName == $policy) | .Arn')
aws iam detach-role-policy \
--role-name $ASG_POLICY_NAME \
--policy-arn $ASG_POLICY_ARN
printf " ✅ \n"
printf " b) Deleting kops cluster…"
KOPS_CONFIG_BUCKET=${PREFIX}.kops-${CLUSTER_NAME}.config
K8_CONFIG_BUCKET=${PREFIX}.k8-${CLUSTER_NAME}.config
kops delete cluster \
--state s3://${KOPS_CONFIG_BUCKET} \
--name ${CLUSTER_URL} \
--yes
printf "\n"
############################
# DELETE S3 CONFIG BUCKETS #
############################
echo "5️⃣ Delete S3 config buckets"
printf " a) Deleting kubernetes config S3 bucket…"
./helper/s3-delete-buckets.sh $K8_CONFIG_BUCKET
printf " ✅ \n"
printf " b) Deleting kops config S3 bucket…"
./helper/s3-delete-buckets.sh $KOPS_CONFIG_BUCKET
printf " ✅ \n"
printf "\n"
#########
# Done! #
#########
echo "🏁 Finished! 🏁"
echo " All cluster resources have been cleared"