-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.yaml
122 lines (112 loc) · 4.22 KB
/
main.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# =====================
# Main settings
# =====================
k3s_version: "v1.28.5+k3s1" # https://github.com/k3s-io/k3s/releases
# ============================
# Tools installed on machine
# ============================
tool_helm_version: "v3.14.0" # https://github.com/helm/helm/releases
tool_kubeseal_version: "0.25.0" # https://github.com/bitnami-labs/sealed-secrets/releases (notice: only versions with released asset "kubeseal-linux-amd64")
timezone: "Europe/Warsaw"
force_k3s_upgrade: false # set this from CLI "-e force_k3s_upgrade=true" to upgrade the cluster with Ansible
# ========================================================
# Flannel + cross-node VPN
# --------------------------------------------------------
# Cross-node VPN is out of scope of this playbook.
# Recommended: Setup a Mesh VPN for each node.
#
# https://github.com/RiotKit/ansible-role-wireguard-mesh
# ========================================================
flannel_iface: wg0 # set this per node if you have multiple VPN interfaces. In other cases, when connectivity is public then you may use e.g. eth0
vpn_enabled: true # communicate between cluster nodes over VPN, K3S will use {{ flannel_iface }} as main interface when vpn_enabled=true
vpn_flannel_backend: vxlan
# =========
# Admin VPN
# =========
admin_vpn_enabled: false
admin_vpn_ip: 10.241.0.1
admin_vpn_interface: adm0
admin_vpn_peers: []
# ----------------------------------------------------------------------------------------------
# To generate a key pair - on your computer do: wg genkey > ~/wg.priv; cat ~/wg.priv | wg pubkey
# ----------------------------------------------------------------------------------------------
#
# - public_key: xxxx
# ip: 10.1.2.3/32
# persistent_keep_alive: 60 # optional
# =========
# Firewall
# =========
firewall_enabled: true # install and configure UFW firewall
firewall_interface: eth0
firewall_inventory_hosts_group_name: cluster # a name of a group in Ansible inventory from which take the list of hosts
# ---------------------------------------------------------------------------
# Internal ports = ports available between nodes, not exposed to the internet
# ---------------------------------------------------------------------------
firewall_ports_internal:
# etcd
- port: 10250
proto: tcp
# Node Exporter (Prometheus/Victoria metrics)
- port: 9100
proto: tcp
# exposed NodePorts
- port: "30000:32767"
proto: tcp
- port: "30000:32767"
proto: udp
# API Server
- port: 6443
proto: tcp
# Ingress NGINX
- port: 8082
proto: tcp
# --------------------------------------
# Ports exposed to the public (internet)
# --------------------------------------
firewall_ports_public:
- port: 80
proto: tcp
- port: 443
proto: tcp
- port: 22
proto: tcp
# VPN
- port: 51820
proto: udp
- port: 51821
proto: udp
- port: 443
proto: udp
# configure, if you have multiple clusters and access them by same network/VPN
net_cluster_cidr: "10.42.0.0/16"
net_services_cidr: "10.43.0.0/16"
cluster_data_path: "/var/lib/rancher/k3s"
cluster_api_bind_address: "0.0.0.0"
cluster_api_restrict_access: # todo restrict access on iptables/ufw
- "{{ net_cluster_cidr }}"
- "{{ vpn_cidr }}"
- "{{ net_services_cidr }}"
administrative_services_restrict_access: # restrict access by subnet/ip to ArgoCD and other administrative services
- "{{ net_cluster_cidr }}"
- "{{ vpn_cidr }}"
- "{{ net_services_cidr }}"
node_labels: []
# - arch=arm
node_taints: ""
primary_api_allowed_ips:
- "{{ vpn_cidr }}"
kubelet_args: []
# - "--eviction-hard=memory.available<350Mi,nodefs.available<20Gi"
# ---------------------------------------
# Database: https://docs.k3s.io/datastore
# ---------------------------------------
k3s_datastore_endpoint: sqlite
# set it to true for better multi-node cluster stability
# additionally a good practice is to keep your pods to have defined requests and limits
noschedule_on_primary: false
# ====================
# Security/Sandboxing
# ====================
runsc_platform: "ptrace" # https://gvisor.dev/docs/architecture_guide/platforms/
crun_version: "1.14"