diff --git a/Makefile b/Makefile
index 5f9505de712..4ae335f81c4 100644
--- a/Makefile
+++ b/Makefile
@@ -449,7 +449,12 @@ create-workload-cluster: $(ENVSUBST)
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"
# Deploy calico
- kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico.yaml
+ @if [[ "${CLUSTER_TEMPLATE}" == *ipv6* ]]; then \
+ kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico-ipv6.yaml; \
+ else \
+ kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico.yaml; \
+ fi
+
@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'
diff --git a/api/v1alpha2/azurecluster_conversion.go b/api/v1alpha2/azurecluster_conversion.go
index 052d7590c64..9d2919c4ef2 100644
--- a/api/v1alpha2/azurecluster_conversion.go
+++ b/api/v1alpha2/azurecluster_conversion.go
@@ -59,13 +59,14 @@ func (src *AzureCluster) ConvertTo(dstRaw conversion.Hub) error { // nolint
}
dst.Status.FailureDomains = restored.Status.FailureDomains
+ dst.Spec.NetworkSpec.Vnet.CIDRBlocks = restored.Spec.NetworkSpec.Vnet.CIDRBlocks
for _, restoredSubnet := range restored.Spec.NetworkSpec.Subnets {
if restoredSubnet != nil {
for _, dstSubnet := range dst.Spec.NetworkSpec.Subnets {
if dstSubnet != nil && dstSubnet.Name == restoredSubnet.Name {
dstSubnet.RouteTable = restoredSubnet.RouteTable
-
+ dstSubnet.CIDRBlocks = restoredSubnet.CIDRBlocks
dstSubnet.SecurityGroup.IngressRules = restoredSubnet.SecurityGroup.IngressRules
}
}
@@ -203,6 +204,11 @@ func Convert_v1alpha3_NetworkSpec_To_v1alpha2_NetworkSpec(in *infrav1alpha3.Netw
return nil
}
+// Convert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec.
+func Convert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(in *infrav1alpha3.VnetSpec, out *VnetSpec, s apiconversion.Scope) error { //nolint
+ return autoConvert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(in, out, s)
+}
+
// Convert_v1alpha2_SubnetSpec_To_v1alpha3_SubnetSpec.
func Convert_v1alpha2_SubnetSpec_To_v1alpha3_SubnetSpec(in *SubnetSpec, out *infrav1alpha3.SubnetSpec, s apiconversion.Scope) error { //nolint
return autoConvert_v1alpha2_SubnetSpec_To_v1alpha3_SubnetSpec(in, out, s)
diff --git a/api/v1alpha2/azuremachine_conversion.go b/api/v1alpha2/azuremachine_conversion.go
index 09eb4e72c84..f505b176689 100644
--- a/api/v1alpha2/azuremachine_conversion.go
+++ b/api/v1alpha2/azuremachine_conversion.go
@@ -57,6 +57,7 @@ func restoreAzureMachineSpec(restored, dst *infrav1alpha3.AzureMachineSpec) {
dst.AcceleratedNetworking = restored.AcceleratedNetworking
}
dst.FailureDomain = restored.FailureDomain
+ dst.EnableIPForwarding = restored.EnableIPForwarding
if restored.SpotVMOptions != nil {
dst.SpotVMOptions = restored.SpotVMOptions.DeepCopy()
}
diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go
index 40a56c0922f..b05200f019d 100644
--- a/api/v1alpha2/zz_generated.conversion.go
+++ b/api/v1alpha2/zz_generated.conversion.go
@@ -211,11 +211,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
- if err := s.AddGeneratedConversionFunc((*v1alpha3.VnetSpec)(nil), (*VnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(a.(*v1alpha3.VnetSpec), b.(*VnetSpec), scope)
- }); err != nil {
- return err
- }
if err := s.AddConversionFunc((*AzureClusterSpec)(nil), (*v1alpha3.AzureClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_AzureClusterSpec_To_v1alpha3_AzureClusterSpec(a.(*AzureClusterSpec), b.(*v1alpha3.AzureClusterSpec), scope)
}); err != nil {
@@ -316,6 +311,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*v1alpha3.VnetSpec)(nil), (*VnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(a.(*v1alpha3.VnetSpec), b.(*VnetSpec), scope)
+ }); err != nil {
+ return err
+ }
return nil
}
@@ -582,6 +582,7 @@ func autoConvert_v1alpha3_AzureMachineSpec_To_v1alpha2_AzureMachineSpec(in *v1al
out.SSHPublicKey = in.SSHPublicKey
out.AdditionalTags = *(*Tags)(unsafe.Pointer(&in.AdditionalTags))
out.AllocatePublicIP = in.AllocatePublicIP
+ // WARNING: in.EnableIPForwarding requires manual conversion: does not exist in peer-type
// WARNING: in.AcceleratedNetworking requires manual conversion: does not exist in peer-type
// WARNING: in.SpotVMOptions requires manual conversion: does not exist in peer-type
return nil
@@ -1062,6 +1063,7 @@ func autoConvert_v1alpha3_SubnetSpec_To_v1alpha2_SubnetSpec(in *v1alpha3.SubnetS
out.ID = in.ID
out.Name = in.Name
out.CidrBlock = in.CidrBlock
+ // WARNING: in.CIDRBlocks requires manual conversion: does not exist in peer-type
out.InternalLBIPAddress = in.InternalLBIPAddress
if err := Convert_v1alpha3_SecurityGroup_To_v1alpha2_SecurityGroup(&in.SecurityGroup, &out.SecurityGroup, s); err != nil {
return err
@@ -1137,11 +1139,7 @@ func autoConvert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(in *v1alpha3.VnetSpec, o
out.ID = in.ID
out.Name = in.Name
out.CidrBlock = in.CidrBlock
+ // WARNING: in.CIDRBlocks requires manual conversion: does not exist in peer-type
out.Tags = *(*Tags)(unsafe.Pointer(&in.Tags))
return nil
}
-
-// Convert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec is an autogenerated conversion function.
-func Convert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(in *v1alpha3.VnetSpec, out *VnetSpec, s conversion.Scope) error {
- return autoConvert_v1alpha3_VnetSpec_To_v1alpha2_VnetSpec(in, out, s)
-}
diff --git a/api/v1alpha3/azurecluster_default.go b/api/v1alpha3/azurecluster_default.go
index 62c81098c9f..6807adcec2f 100644
--- a/api/v1alpha3/azurecluster_default.go
+++ b/api/v1alpha3/azurecluster_default.go
@@ -29,6 +29,15 @@ const (
DefaultNodeSubnetCIDR = "10.1.0.0/16"
)
+const (
+ // DefaultVnetIPv6CIDR is the ipv6 Vnet CIDR
+ DefaultVnetIPv6CIDR = "2001:1234:5678:9a00::/56"
+ // DefaultControlPlaneSubnetIPv6CIDR is the default Control Plane Subnet CIDR
+ DefaultControlPlaneSubnetIPv6CIDR = "2001:1234:5678:9abc::/64"
+ // DefaultNodeSubnetIPv6CIDR is the default Node Subnet CIDR
+ DefaultNodeSubnetIPv6CIDR = "2001:1234:5678:9abd::/64"
+)
+
func (c *AzureCluster) setDefaults() {
c.setNetworkSpecDefaults()
}
@@ -52,8 +61,8 @@ func (c *AzureCluster) setVnetDefaults() {
if c.Spec.NetworkSpec.Vnet.Name == "" {
c.Spec.NetworkSpec.Vnet.Name = generateVnetName(c.ObjectMeta.Name)
}
- if c.Spec.NetworkSpec.Vnet.CidrBlock == "" {
- c.Spec.NetworkSpec.Vnet.CidrBlock = DefaultVnetCIDR
+ if len(c.Spec.NetworkSpec.Vnet.CIDRBlocks) == 0 {
+ c.Spec.NetworkSpec.Vnet.CIDRBlocks = []string{DefaultVnetCIDR}
}
}
@@ -73,8 +82,8 @@ func (c *AzureCluster) setSubnetDefaults() {
if cpSubnet.Name == "" {
cpSubnet.Name = generateControlPlaneSubnetName(c.ObjectMeta.Name)
}
- if cpSubnet.CidrBlock == "" {
- cpSubnet.CidrBlock = DefaultControlPlaneSubnetCIDR
+ if len(cpSubnet.CIDRBlocks) == 0 {
+ cpSubnet.CIDRBlocks = []string{DefaultControlPlaneSubnetCIDR}
}
if cpSubnet.SecurityGroup.Name == "" {
cpSubnet.SecurityGroup.Name = generateControlPlaneSecurityGroupName(c.ObjectMeta.Name)
@@ -86,8 +95,8 @@ func (c *AzureCluster) setSubnetDefaults() {
if nodeSubnet.Name == "" {
nodeSubnet.Name = generateNodeSubnetName(c.ObjectMeta.Name)
}
- if nodeSubnet.CidrBlock == "" {
- nodeSubnet.CidrBlock = DefaultNodeSubnetCIDR
+ if len(nodeSubnet.CIDRBlocks) == 0 {
+ nodeSubnet.CIDRBlocks = []string{DefaultNodeSubnetCIDR}
}
if nodeSubnet.SecurityGroup.Name == "" {
nodeSubnet.SecurityGroup.Name = generateNodeSecurityGroupName(c.ObjectMeta.Name)
diff --git a/api/v1alpha3/azurecluster_default_test.go b/api/v1alpha3/azurecluster_default_test.go
index bb190d573d8..ec2a220581e 100644
--- a/api/v1alpha3/azurecluster_default_test.go
+++ b/api/v1alpha3/azurecluster_default_test.go
@@ -98,7 +98,7 @@ func TestVnetDefaults(t *testing.T) {
Vnet: VnetSpec{
ResourceGroup: "custom-vnet",
Name: "my-vnet",
- CidrBlock: DefaultVnetCIDR,
+ CIDRBlocks: []string{DefaultVnetCIDR},
},
Subnets: Subnets{
{
@@ -138,7 +138,7 @@ func TestVnetDefaults(t *testing.T) {
Vnet: VnetSpec{
ResourceGroup: "cluster-test",
Name: "cluster-test-vnet",
- CidrBlock: DefaultVnetCIDR,
+ CIDRBlocks: []string{DefaultVnetCIDR},
},
},
},
@@ -154,7 +154,7 @@ func TestVnetDefaults(t *testing.T) {
ResourceGroup: "cluster-test",
NetworkSpec: NetworkSpec{
Vnet: VnetSpec{
- CidrBlock: "10.0.0.0/16",
+ CIDRBlocks: []string{"10.0.0.0/16"},
},
},
},
@@ -169,7 +169,38 @@ func TestVnetDefaults(t *testing.T) {
Vnet: VnetSpec{
ResourceGroup: "cluster-test",
Name: "cluster-test-vnet",
- CidrBlock: "10.0.0.0/16",
+ CIDRBlocks: []string{"10.0.0.0/16"},
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "IPv6 enabled",
+ cluster: &AzureCluster{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "cluster-test",
+ },
+ Spec: AzureClusterSpec{
+ ResourceGroup: "cluster-test",
+ NetworkSpec: NetworkSpec{
+ Vnet: VnetSpec{
+ CIDRBlocks: []string{DefaultVnetCIDR, DefaultVnetIPv6CIDR},
+ },
+ },
+ },
+ },
+ output: &AzureCluster{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "cluster-test",
+ },
+ Spec: AzureClusterSpec{
+ ResourceGroup: "cluster-test",
+ NetworkSpec: NetworkSpec{
+ Vnet: VnetSpec{
+ ResourceGroup: "cluster-test",
+ Name: "cluster-test-vnet",
+ CIDRBlocks: []string{DefaultVnetCIDR, DefaultVnetIPv6CIDR},
},
},
},
@@ -217,14 +248,14 @@ func TestSubnetDefaults(t *testing.T) {
{
Role: SubnetControlPlane,
Name: "cluster-test-controlplane-subnet",
- CidrBlock: DefaultControlPlaneSubnetCIDR,
+ CIDRBlocks: []string{DefaultControlPlaneSubnetCIDR},
SecurityGroup: SecurityGroup{Name: "cluster-test-controlplane-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
{
Role: SubnetNode,
Name: "cluster-test-node-subnet",
- CidrBlock: DefaultNodeSubnetCIDR,
+ CIDRBlocks: []string{DefaultNodeSubnetCIDR},
SecurityGroup: SecurityGroup{Name: "cluster-test-node-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
@@ -243,14 +274,14 @@ func TestSubnetDefaults(t *testing.T) {
NetworkSpec: NetworkSpec{
Subnets: Subnets{
{
- Role: SubnetControlPlane,
- Name: "my-controlplane-subnet",
- CidrBlock: "10.0.0.16/24",
+ Role: SubnetControlPlane,
+ Name: "my-controlplane-subnet",
+ CIDRBlocks: []string{"10.0.0.16/24"},
},
{
- Role: SubnetNode,
- Name: "my-node-subnet",
- CidrBlock: "10.1.0.16/24",
+ Role: SubnetNode,
+ Name: "my-node-subnet",
+ CIDRBlocks: []string{"10.1.0.16/24"},
},
},
},
@@ -266,14 +297,14 @@ func TestSubnetDefaults(t *testing.T) {
{
Role: SubnetControlPlane,
Name: "my-controlplane-subnet",
- CidrBlock: "10.0.0.16/24",
+ CIDRBlocks: []string{"10.0.0.16/24"},
SecurityGroup: SecurityGroup{Name: "cluster-test-controlplane-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
{
Role: SubnetNode,
Name: "my-node-subnet",
- CidrBlock: "10.1.0.16/24",
+ CIDRBlocks: []string{"10.1.0.16/24"},
SecurityGroup: SecurityGroup{Name: "cluster-test-node-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
@@ -313,14 +344,14 @@ func TestSubnetDefaults(t *testing.T) {
{
Role: SubnetControlPlane,
Name: "cluster-test-controlplane-subnet",
- CidrBlock: DefaultControlPlaneSubnetCIDR,
+ CIDRBlocks: []string{DefaultControlPlaneSubnetCIDR},
SecurityGroup: SecurityGroup{Name: "cluster-test-controlplane-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
{
Role: SubnetNode,
Name: "cluster-test-node-subnet",
- CidrBlock: DefaultNodeSubnetCIDR,
+ CIDRBlocks: []string{DefaultNodeSubnetCIDR},
SecurityGroup: SecurityGroup{Name: "cluster-test-node-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
@@ -356,17 +387,72 @@ func TestSubnetDefaults(t *testing.T) {
{
Role: SubnetNode,
Name: "my-node-subnet",
- CidrBlock: DefaultNodeSubnetCIDR,
+ CIDRBlocks: []string{DefaultNodeSubnetCIDR},
SecurityGroup: SecurityGroup{Name: "cluster-test-node-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
{
Role: SubnetControlPlane,
Name: "cluster-test-controlplane-subnet",
- CidrBlock: DefaultControlPlaneSubnetCIDR,
+ CIDRBlocks: []string{DefaultControlPlaneSubnetCIDR},
+ SecurityGroup: SecurityGroup{Name: "cluster-test-controlplane-nsg"},
+ RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "subnets specified with IPv6 enabled",
+ cluster: &AzureCluster{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "cluster-test",
+ },
+ Spec: AzureClusterSpec{
+ NetworkSpec: NetworkSpec{
+ Vnet: VnetSpec{
+ CIDRBlocks: []string{"2001:be00::1/56"},
+ },
+ Subnets: Subnets{
+ {
+ Name: "cluster-test-controlplane-subnet",
+ Role: "control-plane",
+ CIDRBlocks: []string{"2001:beef::1/64"},
+ },
+ {
+ Name: "cluster-test-node-subnet",
+ Role: "node",
+ CIDRBlocks: []string{"2001:beea::1/64"},
+ },
+ },
+ },
+ },
+ },
+ output: &AzureCluster{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "cluster-test",
+ },
+ Spec: AzureClusterSpec{
+ NetworkSpec: NetworkSpec{
+ Vnet: VnetSpec{
+ CIDRBlocks: []string{"2001:be00::1/56"},
+ },
+ Subnets: Subnets{
+ {
+ Role: SubnetControlPlane,
+ Name: "cluster-test-controlplane-subnet",
+ CIDRBlocks: []string{"2001:beef::1/64"},
SecurityGroup: SecurityGroup{Name: "cluster-test-controlplane-nsg"},
RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
},
+ {
+ Role: SubnetNode,
+ Name: "cluster-test-node-subnet",
+ CIDRBlocks: []string{"2001:beea::1/64"},
+ SecurityGroup: SecurityGroup{Name: "cluster-test-node-nsg"},
+ RouteTable: RouteTable{Name: "cluster-test-node-routetable"},
+ },
},
},
},
diff --git a/api/v1alpha3/azuremachine_types.go b/api/v1alpha3/azuremachine_types.go
index f73fc743152..da16d5c5941 100644
--- a/api/v1alpha3/azuremachine_types.go
+++ b/api/v1alpha3/azuremachine_types.go
@@ -87,6 +87,12 @@ type AzureMachineSpec struct {
// +optional
AllocatePublicIP bool `json:"allocatePublicIP,omitempty"`
+ // EnableIPForwarding enables IP Forwarding in Azure which is required for some CNI's to send traffic from a pods on one machine
+ // to another. This is required for IpV6 with Calico in combination with User Defined Routes (set by the Azure Cloud Controller
+ // manager). Default is false for disabled.
+ // +optional
+ EnableIPForwarding bool `json:"enableIPForwarding,omitempty"`
+
// AcceleratedNetworking enables or disables Azure accelerated networking. If omitted, it will be set based on
// whether the requested VMSize supports accelerated networking.
// If AcceleratedNetworking is set to true with a VMSize that does not support it, Azure will return an error.
diff --git a/api/v1alpha3/types.go b/api/v1alpha3/types.go
index 3c53910d69c..8bdab71c6ca 100644
--- a/api/v1alpha3/types.go
+++ b/api/v1alpha3/types.go
@@ -60,9 +60,16 @@ type VnetSpec struct {
Name string `json:"name"`
// CidrBlock is the CIDR block to be used when the provider creates a managed virtual network.
+ // DEPRECATED: Use CIDRBlocks instead
+ // +optional
CidrBlock string `json:"cidrBlock,omitempty"`
+ // CIDRBlocks defines the virtual network's address space, specified as one or more address prefixes in CIDR notation.
+ // +optional
+ CIDRBlocks []string `json:"cidrBlocks,omitempty"`
+
// Tags is a collection of tags describing the resource.
+ // +optional
Tags Tags `json:"tags,omitempty"`
}
@@ -282,7 +289,7 @@ type AzureSharedGalleryImage struct {
// AvailabilityZone specifies an Azure Availability Zone
//
-// Deprecated: Use FailureDomain instead
+// DEPRECATED: Use FailureDomain instead
type AvailabilityZone struct {
ID *string `json:"id,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
@@ -370,9 +377,14 @@ type SubnetSpec struct {
Name string `json:"name"`
// CidrBlock is the CIDR block to be used when the provider creates a managed Vnet.
+ // DEPRECATED: Use CIDRBlocks instead
// +optional
CidrBlock string `json:"cidrBlock,omitempty"`
+ // CIDRBlocks defines the subnet's address space, specified as one or more address prefixes in CIDR notation.
+ // +optional
+ CIDRBlocks []string `json:"cidrBlocks,omitempty"`
+
// InternalLBIPAddress is the IP address that will be used as the internal LB private IP.
// For the control plane subnet only.
// +optional
diff --git a/api/v1alpha3/zz_generated.deepcopy.go b/api/v1alpha3/zz_generated.deepcopy.go
index f4450b9eea9..5993c0ae405 100644
--- a/api/v1alpha3/zz_generated.deepcopy.go
+++ b/api/v1alpha3/zz_generated.deepcopy.go
@@ -824,6 +824,11 @@ func (in *SpotVMOptions) DeepCopy() *SpotVMOptions {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) {
*out = *in
+ if in.CIDRBlocks != nil {
+ in, out := &in.CIDRBlocks, &out.CIDRBlocks
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
in.SecurityGroup.DeepCopyInto(&out.SecurityGroup)
out.RouteTable = in.RouteTable
}
@@ -931,6 +936,11 @@ func (in *VM) DeepCopy() *VM {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VnetSpec) DeepCopyInto(out *VnetSpec) {
*out = *in
+ if in.CIDRBlocks != nil {
+ in, out := &in.CIDRBlocks, &out.CIDRBlocks
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(Tags, len(*in))
diff --git a/cloud/defaults.go b/cloud/defaults.go
index 3a6e830bc69..e33fec70b37 100644
--- a/cloud/defaults.go
+++ b/cloud/defaults.go
@@ -18,6 +18,7 @@ package azure
import (
"fmt"
+
"github.com/blang/semver"
"github.com/pkg/errors"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
@@ -31,6 +32,11 @@ const (
DefaultInternalLBIPAddress = "10.0.0.100"
)
+const (
+ // DefaultInternalLBIPv6Address is the default internal load balancer ip address
+ DefaultInternalLBIPv6Address = "2001:1234:5678:9abc::100"
+)
+
const (
// DefaultImageOfferID is the default Azure Marketplace offer ID
DefaultImageOfferID = "capi"
diff --git a/cloud/interfaces.go b/cloud/interfaces.go
index 7af0171ec16..8f8515fcf4f 100644
--- a/cloud/interfaces.go
+++ b/cloud/interfaces.go
@@ -68,4 +68,5 @@ type ClusterDescriber interface {
NodeSubnet() *infrav1.SubnetSpec
ControlPlaneSubnet() *infrav1.SubnetSpec
RouteTable() *infrav1.RouteTable
+ IsIPv6Enabled() bool
}
diff --git a/cloud/mocks/service_mock.go b/cloud/mocks/service_mock.go
index bf3bb36400c..2507127e509 100644
--- a/cloud/mocks/service_mock.go
+++ b/cloud/mocks/service_mock.go
@@ -563,3 +563,17 @@ func (mr *MockClusterDescriberMockRecorder) RouteTable() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockClusterDescriber)(nil).RouteTable))
}
+
+// IsIPv6Enabled mocks base method.
+func (m *MockClusterDescriber) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockClusterDescriberMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockClusterDescriber)(nil).IsIPv6Enabled))
+}
diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go
index 0d4ceba63eb..964cde426a7 100644
--- a/cloud/scope/cluster.go
+++ b/cloud/scope/cluster.go
@@ -19,6 +19,7 @@ package scope
import (
"context"
"fmt"
+ "k8s.io/utils/net"
"strconv"
"github.com/Azure/go-autorest/autorest/to"
@@ -112,22 +113,14 @@ func (s *ClusterScope) PublicIPSpecs() []azure.PublicIPSpec {
{
Name: s.Network().APIServerIP.Name,
DNSName: s.Network().APIServerIP.DNSName,
+ IsIPv6: false, // currently azure requires a ipv4 lb rule to enable ipv6
},
}
}
// LBSpecs returns the load balancer specs.
func (s *ClusterScope) LBSpecs() []azure.LBSpec {
- return []azure.LBSpec{
- {
- // Internal control plane LB
- Name: azure.GenerateInternalLBName(s.ClusterName()),
- SubnetName: s.ControlPlaneSubnet().Name,
- SubnetCidr: s.ControlPlaneSubnet().CidrBlock,
- PrivateIPAddress: s.ControlPlaneSubnet().InternalLBIPAddress,
- APIServerPort: s.APIServerPort(),
- Role: infrav1.InternalRole,
- },
+ specs := []azure.LBSpec{
{
// Public API Server LB
Name: azure.GeneratePublicLBName(s.ClusterName()),
@@ -142,6 +135,21 @@ func (s *ClusterScope) LBSpecs() []azure.LBSpec {
Role: infrav1.NodeOutboundRole,
},
}
+ if !s.IsIPv6Enabled() {
+ // for now no internal LB is created for ipv6 enabled clusters
+ // getAvailablePrivateIP() does not work with IPv6
+ specs = append(specs, azure.LBSpec{
+ // Internal control plane LB
+ Name: azure.GenerateInternalLBName(s.ClusterName()),
+ SubnetName: s.ControlPlaneSubnet().Name,
+ SubnetCidrs: s.ControlPlaneSubnet().CIDRBlocks,
+ PrivateIPAddress: s.ControlPlaneSubnet().InternalLBIPAddress,
+ APIServerPort: s.APIServerPort(),
+ Role: infrav1.InternalRole,
+ })
+ }
+
+ return specs
}
// RouteTableSpecs returns the node route table(s)
@@ -170,7 +178,7 @@ func (s *ClusterScope) SubnetSpecs() []azure.SubnetSpec {
return []azure.SubnetSpec{
{
Name: s.ControlPlaneSubnet().Name,
- CIDR: s.ControlPlaneSubnet().CidrBlock,
+ CIDRs: s.ControlPlaneSubnet().CIDRBlocks,
VNetName: s.Vnet().Name,
SecurityGroupName: s.ControlPlaneSubnet().SecurityGroup.Name,
Role: s.ControlPlaneSubnet().Role,
@@ -179,7 +187,7 @@ func (s *ClusterScope) SubnetSpecs() []azure.SubnetSpec {
},
{
Name: s.NodeSubnet().Name,
- CIDR: s.NodeSubnet().CidrBlock,
+ CIDRs: s.NodeSubnet().CIDRBlocks,
VNetName: s.Vnet().Name,
SecurityGroupName: s.NodeSubnet().SecurityGroup.Name,
RouteTableName: s.NodeSubnet().RouteTable.Name,
@@ -194,7 +202,7 @@ func (s *ClusterScope) VNetSpecs() []azure.VNetSpec {
{
ResourceGroup: s.Vnet().ResourceGroup,
Name: s.Vnet().Name,
- CIDR: s.Vnet().CidrBlock,
+ CIDRs: s.Vnet().CIDRBlocks,
},
}
}
@@ -209,6 +217,16 @@ func (s *ClusterScope) IsVnetManaged() bool {
return s.Vnet().ID == "" || s.Vnet().Tags.HasOwned(s.ClusterName())
}
+// IsIPv6Enabled returns true if IPv6 is enabled.
+func (s *ClusterScope) IsIPv6Enabled() bool {
+ for _, cidr := range s.AzureCluster.Spec.NetworkSpec.Vnet.CIDRBlocks {
+ if net.IsIPv6CIDRString(cidr) {
+ return true
+ }
+ }
+ return false
+}
+
// Subnets returns the cluster subnets.
func (s *ClusterScope) Subnets() infrav1.Subnets {
return s.AzureCluster.Spec.NetworkSpec.Subnets
diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go
index 03a6fba9751..06287781838 100644
--- a/cloud/scope/machine.go
+++ b/cloud/scope/machine.go
@@ -152,15 +152,19 @@ func (m *MachineScope) NICSpecs() []azure.NICSpec {
SubnetName: m.Subnet().Name,
VMSize: m.AzureMachine.Spec.VMSize,
AcceleratedNetworking: m.AzureMachine.Spec.AcceleratedNetworking,
+ IPv6Enabled: m.IsIPv6Enabled(),
+ EnableIPForwarding: m.AzureMachine.Spec.EnableIPForwarding,
}
if m.Role() == infrav1.ControlPlane {
publicLBName := azure.GeneratePublicLBName(m.ClusterName())
spec.PublicLBName = publicLBName
spec.PublicLBAddressPoolName = azure.GenerateBackendAddressPoolName(publicLBName)
spec.PublicLBNATRuleName = m.Name()
- internalLBName := azure.GenerateInternalLBName(m.ClusterName())
- spec.InternalLBName = internalLBName
- spec.InternalLBAddressPoolName = azure.GenerateBackendAddressPoolName(internalLBName)
+ if !m.IsIPv6Enabled() {
+ internalLBName := azure.GenerateInternalLBName(m.ClusterName())
+ spec.InternalLBName = internalLBName
+ spec.InternalLBAddressPoolName = azure.GenerateBackendAddressPoolName(internalLBName)
+ }
} else if m.Role() == infrav1.Node {
publicLBName := m.ClusterName()
spec.PublicLBName = publicLBName
diff --git a/cloud/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go b/cloud/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go
index 82027d5ffba..d9edbc2824d 100644
--- a/cloud/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go
+++ b/cloud/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go
@@ -276,6 +276,20 @@ func (mr *MockBastionScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockBastionScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockBastionScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockBastionScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockBastionScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockBastionScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/disks/mock_disks/disks_mock.go b/cloud/services/disks/mock_disks/disks_mock.go
index 7049f50502e..a76d3377695 100644
--- a/cloud/services/disks/mock_disks/disks_mock.go
+++ b/cloud/services/disks/mock_disks/disks_mock.go
@@ -370,6 +370,20 @@ func (mr *MockDiskScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockDiskScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockDiskScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockDiskScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockDiskScope)(nil).IsIPv6Enabled))
+}
+
// DiskSpecs mocks base method.
func (m *MockDiskScope) DiskSpecs() []azure.DiskSpec {
m.ctrl.T.Helper()
diff --git a/cloud/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go b/cloud/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go
index 1ef24860e4b..49514ec1699 100644
--- a/cloud/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go
+++ b/cloud/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go
@@ -370,6 +370,20 @@ func (mr *MockInboundNatScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockInboundNatScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockInboundNatScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockInboundNatScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockInboundNatScope)(nil).IsIPv6Enabled))
+}
+
// InboundNatSpecs mocks base method.
func (m *MockInboundNatScope) InboundNatSpecs() []azure.InboundNatSpec {
m.ctrl.T.Helper()
diff --git a/cloud/services/loadbalancers/loadbalancers.go b/cloud/services/loadbalancers/loadbalancers.go
index 84832581d9d..9dc12529250 100644
--- a/cloud/services/loadbalancers/loadbalancers.go
+++ b/cloud/services/loadbalancers/loadbalancers.go
@@ -50,7 +50,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
}
} else if azure.ResourceNotFound(err) {
s.Scope.V(2).Info("internalLB not found in RG", "internal lb", lbSpec.Name, "resource group", s.Scope.ResourceGroup())
- privateIP, err = s.getAvailablePrivateIP(ctx, s.Scope.Vnet().ResourceGroup, s.Scope.Vnet().Name, lbSpec.SubnetCidr, lbSpec.PrivateIPAddress)
+ privateIP, err = s.getAvailablePrivateIP(ctx, s.Scope.Vnet().ResourceGroup, s.Scope.Vnet().Name, lbSpec.PrivateIPAddress, lbSpec.SubnetCidrs)
if err != nil {
return err
}
@@ -191,16 +191,22 @@ func (s *Service) Delete(ctx context.Context) error {
// getAvailablePrivateIP checks if the desired private IP address is available in a virtual network.
// If the IP address is taken or empty, it will make an attempt to find an available IP in the same subnet
-func (s *Service) getAvailablePrivateIP(ctx context.Context, resourceGroup, vnetName, subnetCIDR, PreferredIPAddress string) (string, error) {
+// NOTE: this does not work for VNets with ipv6 CIDRs currently
+func (s *Service) getAvailablePrivateIP(ctx context.Context, resourceGroup, vnetName, PreferredIPAddress string, subnetCIDRs []string) (string, error) {
+ if len(subnetCIDRs) == 0 {
+ return "", errors.Errorf("failed to find available IP: control plane subnet CIDRs should not be empty")
+ }
ip := PreferredIPAddress
if ip == "" {
ip = azure.DefaultInternalLBIPAddress
+ subnetCIDR := subnetCIDRs[0]
if subnetCIDR != infrav1.DefaultControlPlaneSubnetCIDR {
// If the user provided a custom subnet CIDR without providing a private IP, try finding an available IP in the subnet space
index := strings.LastIndex(subnetCIDR, ".")
ip = subnetCIDR[0:(index+1)] + "0"
}
}
+
result, err := s.VirtualNetworksClient.CheckIPAddressAvailability(ctx, resourceGroup, vnetName, ip)
if err != nil {
return "", errors.Wrap(err, "failed to check IP availability")
@@ -209,6 +215,7 @@ func (s *Service) getAvailablePrivateIP(ctx context.Context, resourceGroup, vnet
if len(to.StringSlice(result.AvailableIPAddresses)) == 0 {
return "", errors.Errorf("IP %s is not available in VNet %s and there were no other available IPs found", ip, vnetName)
}
+ // TODO: make sure that the returned IP is in the right subnet since this check is done at the VNet level
ip = to.StringSlice(result.AvailableIPAddresses)[0]
}
return ip, nil
diff --git a/cloud/services/loadbalancers/loadbalancers_test.go b/cloud/services/loadbalancers/loadbalancers_test.go
index bfc55298e51..b67c5d0a765 100644
--- a/cloud/services/loadbalancers/loadbalancers_test.go
+++ b/cloud/services/loadbalancers/loadbalancers_test.go
@@ -225,7 +225,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
s.LBSpecs().Return([]azure.LBSpec{
{
Name: "my-lb",
- SubnetCidr: "10.0.0.0/16",
+ SubnetCidrs: []string{"10.0.0.0/16"},
SubnetName: "my-subnet",
PrivateIPAddress: "10.0.0.10",
Role: infrav1.InternalRole,
@@ -240,6 +240,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
s.Location().AnyTimes().Return("testlocation")
s.ClusterName().AnyTimes().Return("cluster-name")
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
+ s.IsIPv6Enabled().AnyTimes().Return(false)
m.Get(context.TODO(), "my-rg", "my-lb").Return(network.LoadBalancer{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
mVnet.CheckIPAddressAvailability(context.TODO(), "my-rg", "my-vnet", "10.0.0.10").Return(network.IPAddressAvailabilityResult{Available: to.BoolPtr(true)}, nil)
m.CreateOrUpdate(context.TODO(), "my-rg", "my-lb", gomock.AssignableToTypeOf(network.LoadBalancer{}))
@@ -253,7 +254,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
s.LBSpecs().Return([]azure.LBSpec{
{
Name: "my-lb",
- SubnetCidr: "10.0.0.0/16",
+ SubnetCidrs: []string{"10.0.0.0/16"},
SubnetName: "my-subnet",
PrivateIPAddress: "10.0.0.10",
Role: infrav1.InternalRole,
@@ -279,7 +280,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
s.LBSpecs().Return([]azure.LBSpec{
{
Name: "my-lb",
- SubnetCidr: "10.0.0.0/16",
+ SubnetCidrs: []string{"10.0.0.0/16"},
SubnetName: "my-subnet",
PrivateIPAddress: "10.0.0.10",
Role: infrav1.InternalRole,
@@ -294,6 +295,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
})
s.Location().AnyTimes().Return("testlocation")
s.ClusterName().AnyTimes().Return("my-cluster")
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
m.Get(context.TODO(), "my-rg", "my-lb").Return(network.LoadBalancer{
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
@@ -374,7 +376,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
s.LBSpecs().Return([]azure.LBSpec{
{
Name: "my-lb",
- SubnetCidr: "10.0.0.0/16",
+ SubnetCidrs: []string{"10.0.0.0/16"},
SubnetName: "my-subnet",
PrivateIPAddress: "10.0.0.10",
Role: infrav1.InternalRole,
@@ -388,6 +390,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
})
s.Location().AnyTimes().Return("testlocation")
s.ClusterName().AnyTimes().Return("cluster-name")
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
m.Get(context.TODO(), "my-rg", "my-lb").Return(network.LoadBalancer{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
mVnet.CheckIPAddressAvailability(context.TODO(), "my-rg", "my-vnet", "10.0.0.10").Return(network.IPAddressAvailabilityResult{Available: to.BoolPtr(false)}, nil)
@@ -401,7 +404,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
s.LBSpecs().Return([]azure.LBSpec{
{
Name: "my-lb",
- SubnetCidr: "10.0.0.0/16",
+ SubnetCidrs: []string{"10.0.0.0/16"},
SubnetName: "my-subnet",
PrivateIPAddress: "10.0.0.10",
APIServerPort: 6443,
@@ -427,6 +430,7 @@ func TestReconcileLoadBalancer(t *testing.T) {
})
s.Location().AnyTimes().Return("testlocation")
s.ClusterName().AnyTimes().Return("cluster-name")
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
m.Get(context.TODO(), "my-rg", "my-lb").Return(network.LoadBalancer{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
mVnet.CheckIPAddressAvailability(context.TODO(), "my-rg", "my-vnet", "10.0.0.10").Return(network.IPAddressAvailabilityResult{Available: to.BoolPtr(true)}, nil)
@@ -559,23 +563,23 @@ func TestGetAvailablePrivateIP(t *testing.T) {
g := NewWithT(t)
testcases := []struct {
- name string
- subnetCidr string
- expectedIP string
- expect func(s *mock_loadbalancers.MockLBScopeMockRecorder, mVnet *mock_virtualnetworks.MockClientMockRecorder)
+ name string
+ subnetCidrs []string
+ expectedIP string
+ expect func(s *mock_loadbalancers.MockLBScopeMockRecorder, mVnet *mock_virtualnetworks.MockClientMockRecorder)
}{
{
- name: "internal load balancer with a valid subnet cidr",
- subnetCidr: "10.0.8.0/16",
- expectedIP: "10.0.8.0",
+ name: "internal load balancer with a valid subnet cidr",
+ subnetCidrs: []string{"10.0.8.0/16"},
+ expectedIP: "10.0.8.0",
expect: func(s *mock_loadbalancers.MockLBScopeMockRecorder, mVnet *mock_virtualnetworks.MockClientMockRecorder) {
mVnet.CheckIPAddressAvailability(context.TODO(), "my-rg", "my-vnet", "10.0.8.0").Return(network.IPAddressAvailabilityResult{Available: to.BoolPtr(true)}, nil)
},
},
{
- name: "internal load balancer subnet cidr not 8 characters in length",
- subnetCidr: "10.64.8.0",
- expectedIP: "10.64.8.0",
+ name: "internal load balancer subnet cidr not 8 characters in length",
+ subnetCidrs: []string{"10.64.8.0"},
+ expectedIP: "10.64.8.0",
expect: func(s *mock_loadbalancers.MockLBScopeMockRecorder, mVnet *mock_virtualnetworks.MockClientMockRecorder) {
mVnet.CheckIPAddressAvailability(context.TODO(), "my-rg", "my-vnet", "10.64.8.0").Return(network.IPAddressAvailabilityResult{Available: to.BoolPtr(true)}, nil)
},
@@ -594,7 +598,7 @@ func TestGetAvailablePrivateIP(t *testing.T) {
VirtualNetworksClient: vnetMock,
}
- resultIP, err := s.getAvailablePrivateIP(context.TODO(), "my-rg", "my-vnet", tc.subnetCidr, "")
+ resultIP, err := s.getAvailablePrivateIP(context.TODO(), "my-rg", "my-vnet", "", tc.subnetCidrs)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(resultIP).To(Equal(tc.expectedIP))
})
diff --git a/cloud/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go b/cloud/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go
index cfe463d41b1..ac1733cdc96 100644
--- a/cloud/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go
+++ b/cloud/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go
@@ -276,6 +276,20 @@ func (mr *MockLBScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockLBScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockLBScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockLBScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockLBScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockLBScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go b/cloud/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go
index 337d487e526..b0654a303eb 100644
--- a/cloud/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go
+++ b/cloud/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go
@@ -276,6 +276,20 @@ func (mr *MockNICScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockNICScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockNICScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockNICScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockNICScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockNICScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/networkinterfaces/networkinterfaces.go b/cloud/services/networkinterfaces/networkinterfaces.go
index d0d51b5d577..619ecfbac48 100644
--- a/cloud/services/networkinterfaces/networkinterfaces.go
+++ b/cloud/services/networkinterfaces/networkinterfaces.go
@@ -40,7 +40,10 @@ func (s *Service) Reconcile(ctx context.Context) error {
default:
nicConfig := &network.InterfaceIPConfigurationPropertiesFormat{}
- nicConfig.Subnet = &network.Subnet{ID: to.StringPtr(azure.SubnetID(s.Scope.SubscriptionID(), nicSpec.VNetResourceGroup, nicSpec.VNetName, nicSpec.SubnetName))}
+ subnet := &network.Subnet{
+ ID: to.StringPtr(azure.SubnetID(s.Scope.SubscriptionID(), nicSpec.VNetResourceGroup, nicSpec.VNetName, nicSpec.SubnetName)),
+ }
+ nicConfig.Subnet = subnet
nicConfig.PrivateIPAllocationMethod = network.Dynamic
if nicSpec.StaticIPAddress != "" {
@@ -90,19 +93,35 @@ func (s *Service) Reconcile(ctx context.Context) error {
nicSpec.AcceleratedNetworking = &accelNet
}
+ ipConfigurations := []network.InterfaceIPConfiguration{
+ {
+ Name: to.StringPtr("pipConfig"),
+ InterfaceIPConfigurationPropertiesFormat: nicConfig,
+ },
+ }
+
+ if nicSpec.IPv6Enabled {
+ ipv6Config := network.InterfaceIPConfiguration{
+ Name: to.StringPtr("ipConfigv6"),
+ InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
+ PrivateIPAddressVersion: "IPv6",
+ Primary: to.BoolPtr(false),
+ Subnet: &network.Subnet{ID: subnet.ID},
+ },
+ }
+
+ ipConfigurations = append(ipConfigurations, ipv6Config)
+ }
+
err = s.Client.CreateOrUpdate(ctx,
s.Scope.ResourceGroup(),
nicSpec.Name,
network.Interface{
Location: to.StringPtr(s.Scope.Location()),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
- IPConfigurations: &[]network.InterfaceIPConfiguration{
- {
- Name: to.StringPtr("pipConfig"),
- InterfaceIPConfigurationPropertiesFormat: nicConfig,
- },
- },
EnableAcceleratedNetworking: nicSpec.AcceleratedNetworking,
+ IPConfigurations: &ipConfigurations,
+ EnableIPForwarding: to.BoolPtr(nicSpec.EnableIPForwarding),
},
})
diff --git a/cloud/services/networkinterfaces/networkinterfaces_test.go b/cloud/services/networkinterfaces/networkinterfaces_test.go
index f13542c6b45..22c2747be3a 100644
--- a/cloud/services/networkinterfaces/networkinterfaces_test.go
+++ b/cloud/services/networkinterfaces/networkinterfaces_test.go
@@ -128,6 +128,7 @@ func TestReconcileNetworkInterface(t *testing.T) {
Location: to.StringPtr("fake-location"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
EnableAcceleratedNetworking: to.BoolPtr(true),
+ EnableIPForwarding: to.BoolPtr(false),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("pipConfig"),
@@ -171,6 +172,7 @@ func TestReconcileNetworkInterface(t *testing.T) {
Location: to.StringPtr("fake-location"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
EnableAcceleratedNetworking: to.BoolPtr(true),
+ EnableIPForwarding: to.BoolPtr(false),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("pipConfig"),
@@ -215,6 +217,7 @@ func TestReconcileNetworkInterface(t *testing.T) {
Location: to.StringPtr("fake-location"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
EnableAcceleratedNetworking: to.BoolPtr(true),
+ EnableIPForwarding: to.BoolPtr(false),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("pipConfig"),
@@ -283,6 +286,7 @@ func TestReconcileNetworkInterface(t *testing.T) {
Location: to.StringPtr("fake-location"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
EnableAcceleratedNetworking: to.BoolPtr(true),
+ EnableIPForwarding: to.BoolPtr(false),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("pipConfig"),
@@ -325,6 +329,7 @@ func TestReconcileNetworkInterface(t *testing.T) {
Location: to.StringPtr("fake-location"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
EnableAcceleratedNetworking: to.BoolPtr(false),
+ EnableIPForwarding: to.BoolPtr(false),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("pipConfig"),
@@ -339,6 +344,60 @@ func TestReconcileNetworkInterface(t *testing.T) {
}))
},
},
+ {
+ name: "network interface with ipv6 created successfully",
+ expectedError: "",
+ expect: func(s *mock_networkinterfaces.MockNICScopeMockRecorder, m *mock_networkinterfaces.MockClientMockRecorder) {
+ s.NICSpecs().Return([]azure.NICSpec{
+ {
+ Name: "my-net-interface",
+ MachineName: "azure-test1",
+ SubnetName: "my-subnet",
+ VNetName: "my-vnet",
+ IPv6Enabled: true,
+ VNetResourceGroup: "my-rg",
+ PublicLBName: "my-public-lb",
+ VMSize: "Standard_D2v2",
+ AcceleratedNetworking: nil,
+ EnableIPForwarding: true,
+ },
+ })
+ s.SubscriptionID().AnyTimes().Return("123")
+ s.ResourceGroup().AnyTimes().Return("my-rg")
+ s.Location().AnyTimes().Return("fake-location")
+ s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
+ s.IsIPv6Enabled().AnyTimes().Return(true)
+ gomock.InOrder(
+ m.Get(context.TODO(), "my-rg", "my-net-interface").
+ Return(network.Interface{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")),
+ m.CreateOrUpdate(context.TODO(), "my-rg", "my-net-interface", gomockinternal.DiffEq(network.Interface{
+ Location: to.StringPtr("fake-location"),
+ InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
+ EnableAcceleratedNetworking: to.BoolPtr(true),
+ EnableIPForwarding: to.BoolPtr(true),
+ IPConfigurations: &[]network.InterfaceIPConfiguration{
+ {
+ Name: to.StringPtr("pipConfig"),
+ InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
+ Subnet: &network.Subnet{ID: to.StringPtr("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/virtualNetworks/my-vnet/subnets/my-subnet")},
+ PrivateIPAllocationMethod: network.Dynamic,
+ LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{},
+ },
+ },
+ {
+ Name: to.StringPtr("ipConfigv6"),
+ InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
+ Subnet: &network.Subnet{ID: to.StringPtr("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/virtualNetworks/my-vnet/subnets/my-subnet")},
+ Primary: to.BoolPtr(false),
+ PrivateIPAddressVersion: "IPv6",
+ },
+ },
+ },
+ },
+ })),
+ )
+ },
+ },
}
for _, tc := range testcases {
diff --git a/cloud/services/publicips/mock_publicips/publicips_mock.go b/cloud/services/publicips/mock_publicips/publicips_mock.go
index ebb22de9ad4..dfdc0a3a0b6 100644
--- a/cloud/services/publicips/mock_publicips/publicips_mock.go
+++ b/cloud/services/publicips/mock_publicips/publicips_mock.go
@@ -370,6 +370,20 @@ func (mr *MockPublicIPScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockPublicIPScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockPublicIPScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockPublicIPScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockPublicIPScope)(nil).IsIPv6Enabled))
+}
+
// PublicIPSpecs mocks base method.
func (m *MockPublicIPScope) PublicIPSpecs() []azure.PublicIPSpec {
m.ctrl.T.Helper()
diff --git a/cloud/services/publicips/publicips.go b/cloud/services/publicips/publicips.go
index 0f9bc663043..33406277db0 100644
--- a/cloud/services/publicips/publicips.go
+++ b/cloud/services/publicips/publicips.go
@@ -30,6 +30,12 @@ import (
func (s *Service) Reconcile(ctx context.Context) error {
for _, ip := range s.Scope.PublicIPSpecs() {
s.Scope.V(2).Info("creating public IP", "public ip", ip.Name)
+
+ addressVersion := network.IPv4
+ if ip.IsIPv6 {
+ addressVersion = network.IPv6
+ }
+
err := s.Client.CreateOrUpdate(
ctx,
s.Scope.ResourceGroup(),
@@ -39,7 +45,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
Name: to.StringPtr(ip.Name),
Location: to.StringPtr(s.Scope.Location()),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
- PublicIPAddressVersion: network.IPv4,
+ PublicIPAddressVersion: addressVersion,
PublicIPAllocationMethod: network.Static,
DNSSettings: &network.PublicIPAddressDNSSettings{
DomainNameLabel: to.StringPtr(strings.ToLower(ip.Name)),
diff --git a/cloud/services/publicips/publicips_test.go b/cloud/services/publicips/publicips_test.go
index 3736c6a27f5..f8b8c3365ba 100644
--- a/cloud/services/publicips/publicips_test.go
+++ b/cloud/services/publicips/publicips_test.go
@@ -19,6 +19,7 @@ package publicips
import (
"context"
"net/http"
+ gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock"
"testing"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
@@ -27,6 +28,7 @@ import (
"sigs.k8s.io/cluster-api-provider-azure/cloud/services/publicips/mock_publicips"
"github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
@@ -62,12 +64,68 @@ func TestReconcilePublicIP(t *testing.T) {
{
Name: "my-publicip-3",
},
+ {
+ Name: "my-publicip-ipv6",
+ IsIPv6: true,
+ DNSName: "fakename",
+ },
})
s.ResourceGroup().AnyTimes().Return("my-rg")
s.Location().AnyTimes().Return("testlocation")
- m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip", gomock.AssignableToTypeOf(network.PublicIPAddress{}))
- m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip-2", gomock.AssignableToTypeOf(network.PublicIPAddress{}))
- m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip-3", gomock.AssignableToTypeOf(network.PublicIPAddress{}))
+ gomock.InOrder(
+ m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip", gomockinternal.DiffEq(network.PublicIPAddress{
+ Name: to.StringPtr("my-publicip"),
+ Sku: &network.PublicIPAddressSku{Name: network.PublicIPAddressSkuNameStandard},
+ Location: to.StringPtr("testlocation"),
+ PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
+ PublicIPAddressVersion: network.IPv4,
+ PublicIPAllocationMethod: network.Static,
+ DNSSettings: &network.PublicIPAddressDNSSettings{
+ DomainNameLabel: to.StringPtr("my-publicip"),
+ Fqdn: to.StringPtr("fakedns"),
+ },
+ },
+ })).Times(1),
+ m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip-2", gomockinternal.DiffEq(network.PublicIPAddress{
+ Name: to.StringPtr("my-publicip-2"),
+ Sku: &network.PublicIPAddressSku{Name: network.PublicIPAddressSkuNameStandard},
+ Location: to.StringPtr("testlocation"),
+ PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
+ PublicIPAddressVersion: network.IPv4,
+ PublicIPAllocationMethod: network.Static,
+ DNSSettings: &network.PublicIPAddressDNSSettings{
+ DomainNameLabel: to.StringPtr("my-publicip-2"),
+ Fqdn: to.StringPtr("fakedns2"),
+ },
+ },
+ })).Times(1),
+ m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip-3", gomockinternal.DiffEq(network.PublicIPAddress{
+ Name: to.StringPtr("my-publicip-3"),
+ Sku: &network.PublicIPAddressSku{Name: network.PublicIPAddressSkuNameStandard},
+ Location: to.StringPtr("testlocation"),
+ PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
+ PublicIPAddressVersion: network.IPv4,
+ PublicIPAllocationMethod: network.Static,
+ DNSSettings: &network.PublicIPAddressDNSSettings{
+ DomainNameLabel: to.StringPtr("my-publicip-3"),
+ Fqdn: to.StringPtr(""),
+ },
+ },
+ })).Times(1),
+ m.CreateOrUpdate(context.TODO(), "my-rg", "my-publicip-ipv6", gomockinternal.DiffEq(network.PublicIPAddress{
+ Name: to.StringPtr("my-publicip-ipv6"),
+ Sku: &network.PublicIPAddressSku{Name: network.PublicIPAddressSkuNameStandard},
+ Location: to.StringPtr("testlocation"),
+ PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
+ PublicIPAddressVersion: network.IPv6,
+ PublicIPAllocationMethod: network.Static,
+ DNSSettings: &network.PublicIPAddressDNSSettings{
+ DomainNameLabel: to.StringPtr("my-publicip-ipv6"),
+ Fqdn: to.StringPtr("fakename"),
+ },
+ },
+ })).Times(1),
+ )
},
},
{
diff --git a/cloud/services/roleassignments/mock_roleassignments/roleassignments_mock.go b/cloud/services/roleassignments/mock_roleassignments/roleassignments_mock.go
index c7f1f3f0b76..1e96f342ea0 100644
--- a/cloud/services/roleassignments/mock_roleassignments/roleassignments_mock.go
+++ b/cloud/services/roleassignments/mock_roleassignments/roleassignments_mock.go
@@ -370,6 +370,20 @@ func (mr *MockRoleAssignmentScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockRoleAssignmentScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockRoleAssignmentScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockRoleAssignmentScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockRoleAssignmentScope)(nil).IsIPv6Enabled))
+}
+
// RoleAssignmentSpecs mocks base method.
func (m *MockRoleAssignmentScope) RoleAssignmentSpecs() []azure.RoleAssignmentSpec {
m.ctrl.T.Helper()
diff --git a/cloud/services/routetables/mock_routetables/routetables_mock.go b/cloud/services/routetables/mock_routetables/routetables_mock.go
index 77ad55372e2..f1f20bee736 100644
--- a/cloud/services/routetables/mock_routetables/routetables_mock.go
+++ b/cloud/services/routetables/mock_routetables/routetables_mock.go
@@ -276,6 +276,20 @@ func (mr *MockRouteTableScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockRouteTableScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockRouteTableScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockRouteTableScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockRouteTableScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockRouteTableScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/scalesets/mock_scalesets/scalesets_mock.go b/cloud/services/scalesets/mock_scalesets/scalesets_mock.go
index 21ff3d299eb..aabf3f28928 100644
--- a/cloud/services/scalesets/mock_scalesets/scalesets_mock.go
+++ b/cloud/services/scalesets/mock_scalesets/scalesets_mock.go
@@ -277,6 +277,20 @@ func (mr *MockScaleSetScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockScaleSetScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockScaleSetScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockScaleSetScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockScaleSetScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockScaleSetScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/securitygroups/mock_securitygroups/securitygroups_mock.go b/cloud/services/securitygroups/mock_securitygroups/securitygroups_mock.go
index d2f14290ef7..0061e0a5a00 100644
--- a/cloud/services/securitygroups/mock_securitygroups/securitygroups_mock.go
+++ b/cloud/services/securitygroups/mock_securitygroups/securitygroups_mock.go
@@ -276,6 +276,20 @@ func (mr *MockNSGScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockNSGScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockNSGScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockNSGScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockNSGScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockNSGScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/subnets/mock_subnets/subnets_mock.go b/cloud/services/subnets/mock_subnets/subnets_mock.go
index b58446e416a..0696299afa9 100644
--- a/cloud/services/subnets/mock_subnets/subnets_mock.go
+++ b/cloud/services/subnets/mock_subnets/subnets_mock.go
@@ -276,6 +276,20 @@ func (mr *MockSubnetScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockSubnetScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockSubnetScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockSubnetScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockSubnetScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockSubnetScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/subnets/subnets.go b/cloud/services/subnets/subnets.go
index 43c58a57d49..c5597545679 100644
--- a/cloud/services/subnets/subnets.go
+++ b/cloud/services/subnets/subnets.go
@@ -35,12 +35,19 @@ func (s *Service) getExisting(ctx context.Context, rgName string, spec azure.Sub
return nil, errors.Wrapf(err, "failed to fetch subnet named %s in vnet %s", spec.VNetName, spec.Name)
}
+ var addresses []string
+ if subnet.SubnetPropertiesFormat != nil && subnet.SubnetPropertiesFormat.AddressPrefix != nil {
+ addresses = []string{to.String(subnet.SubnetPropertiesFormat.AddressPrefix)}
+ } else if subnet.SubnetPropertiesFormat != nil && subnet.SubnetPropertiesFormat.AddressPrefixes != nil {
+ addresses = to.StringSlice(subnet.SubnetPropertiesFormat.AddressPrefixes)
+ }
+
subnetSpec := &infrav1.SubnetSpec{
Role: spec.Role,
InternalLBIPAddress: spec.InternalLBIPAddress,
Name: to.String(subnet.Name),
ID: to.String(subnet.ID),
- CidrBlock: to.String(subnet.SubnetPropertiesFormat.AddressPrefix),
+ CIDRBlocks: addresses,
}
return subnetSpec, nil
@@ -66,16 +73,25 @@ func (s *Service) Reconcile(ctx context.Context) error {
subnet.Role = subnetSpec.Role
subnet.Name = existingSubnet.Name
- subnet.CidrBlock = existingSubnet.CidrBlock
+ subnet.CIDRBlocks = existingSubnet.CIDRBlocks
subnet.ID = existingSubnet.ID
case !s.Scope.IsVnetManaged():
return fmt.Errorf("vnet was provided but subnet %s is missing", subnetSpec.Name)
default:
+
subnetProperties := network.SubnetPropertiesFormat{
- AddressPrefix: to.StringPtr(subnetSpec.CIDR),
+ AddressPrefixes: &subnetSpec.CIDRs,
}
+
+ // workaround needed to avoid SubscriptionNotRegisteredForFeature for feature Microsoft.Network/AllowMultipleAddressPrefixesOnSubnet.
+ if len(subnetSpec.CIDRs) == 1 {
+ subnetProperties = network.SubnetPropertiesFormat{
+ AddressPrefix: &subnetSpec.CIDRs[0],
+ }
+ }
+
if subnetSpec.RouteTableName != "" {
subnetProperties.RouteTable = &network.RouteTable{
ID: to.StringPtr(azure.RouteTableID(s.Scope.SubscriptionID(), s.Scope.ResourceGroup(), subnetSpec.RouteTableName)),
diff --git a/cloud/services/subnets/subnets_test.go b/cloud/services/subnets/subnets_test.go
index e6574bb2cbe..45b75ab0b3c 100644
--- a/cloud/services/subnets/subnets_test.go
+++ b/cloud/services/subnets/subnets_test.go
@@ -19,9 +19,10 @@ package subnets
import (
"context"
"net/http"
- gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock"
"testing"
+ gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock"
+
. "github.com/onsi/gomega"
"k8s.io/klog/klogr"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
@@ -49,7 +50,7 @@ func TestReconcileSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -61,6 +62,7 @@ func TestReconcileSubnets(t *testing.T) {
s.ClusterName().AnyTimes().Return("fake-cluster")
s.SubscriptionID().AnyTimes().Return("123")
s.ResourceGroup().AnyTimes().Return("my-rg")
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.IsVnetManaged().Return(true)
m.Get(context.TODO(), "", "my-vnet", "my-subnet").
Return(network.Subnet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
@@ -73,6 +75,41 @@ func TestReconcileSubnets(t *testing.T) {
}))
},
},
+ {
+ name: "subnet ipv6 does not exist",
+ expectedError: "",
+ expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) {
+ s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
+ s.SubnetSpecs().Return([]azure.SubnetSpec{
+ {
+ Name: "my-ipv6-subnet",
+ CIDRs: []string{"10.0.0.0/16", "2001:1234:5678:9abd::/64"},
+ VNetName: "my-vnet",
+ RouteTableName: "my-subnet_route_table",
+ SecurityGroupName: "my-sg",
+ Role: infrav1.SubnetNode,
+ InternalLBIPAddress: "10.0.0.10",
+ },
+ })
+ s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "my-vnet", ResourceGroup: "my-rg"})
+ s.ClusterName().AnyTimes().Return("fake-cluster")
+ s.ResourceGroup().AnyTimes().Return("my-rg")
+ s.SubscriptionID().AnyTimes().Return("123")
+ s.IsVnetManaged().Return(true)
+ m.Get(context.TODO(), "my-rg", "my-vnet", "my-ipv6-subnet").
+ Return(network.Subnet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
+ m.CreateOrUpdate(context.TODO(), "my-rg", "my-vnet", "my-ipv6-subnet", gomockinternal.DiffEq(network.Subnet{
+ SubnetPropertiesFormat: &network.SubnetPropertiesFormat{
+ AddressPrefixes: &[]string{
+ "10.0.0.0/16",
+ "2001:1234:5678:9abd::/64",
+ },
+ RouteTable: &network.RouteTable{ID: to.StringPtr("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/routeTables/my-subnet_route_table")},
+ NetworkSecurityGroup: &network.SecurityGroup{ID: to.StringPtr("/subscriptions/123/resourceGroups/my-rg/providers/Microsoft.Network/networkSecurityGroups/my-sg")},
+ },
+ }))
+ },
+ },
{
name: "fail to create subnet",
expectedError: "failed to create subnet my-subnet in resource group : #: Internal Server Error: StatusCode=500",
@@ -81,7 +118,7 @@ func TestReconcileSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -93,6 +130,7 @@ func TestReconcileSubnets(t *testing.T) {
s.ClusterName().AnyTimes().Return("fake-cluster")
s.SubscriptionID().AnyTimes().Return("123")
s.ResourceGroup().AnyTimes().Return("my-rg")
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.IsVnetManaged().Return(true)
m.Get(context.TODO(), "", "my-vnet", "my-subnet").
Return(network.Subnet{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
@@ -107,7 +145,7 @@ func TestReconcileSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -131,7 +169,7 @@ func TestReconcileSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "custom-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -160,7 +198,7 @@ func TestReconcileSubnets(t *testing.T) {
s.SubnetSpecs().AnyTimes().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -169,7 +207,7 @@ func TestReconcileSubnets(t *testing.T) {
},
{
Name: "my-subnet-1",
- CIDR: "10.2.0.0/16",
+ CIDRs: []string{"10.2.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg-1",
@@ -223,6 +261,84 @@ func TestReconcileSubnets(t *testing.T) {
}, nil)
},
},
+ {
+ name: "vnet for ipv6 is provided",
+ expectedError: "",
+ expect: func(s *mock_subnets.MockSubnetScopeMockRecorder, m *mock_subnets.MockClientMockRecorder) {
+ s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
+ s.SubnetSpecs().AnyTimes().Return([]azure.SubnetSpec{
+ {
+ Name: "my-ipv6-subnet",
+ CIDRs: []string{"10.0.0.0/16", "2001:1234:5678:9abd::/64"},
+ VNetName: "my-vnet",
+ RouteTableName: "my-subnet_route_table",
+ SecurityGroupName: "my-sg",
+ Role: infrav1.SubnetNode,
+ InternalLBIPAddress: "10.0.0.10",
+ },
+ {
+ Name: "my-ipv6-subnet-cp",
+ CIDRs: []string{"10.2.0.0/16", "2001:1234:5678:9abc::/64"},
+ VNetName: "my-vnet",
+ RouteTableName: "my-subnet_route_table",
+ SecurityGroupName: "my-sg-1",
+ Role: infrav1.SubnetControlPlane,
+ InternalLBIPAddress: "10.2.0.20",
+ },
+ })
+ s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "my-vnet"})
+ s.NodeSubnet().AnyTimes().Return(&infrav1.SubnetSpec{
+ Name: "my-subnet",
+ Role: infrav1.SubnetNode,
+ })
+ s.ControlPlaneSubnet().AnyTimes().Return(&infrav1.SubnetSpec{
+ Name: "my-subnet-1",
+ Role: infrav1.SubnetControlPlane,
+ })
+ s.ClusterName().AnyTimes().Return("fake-cluster")
+ s.IsIPv6Enabled().AnyTimes().Return(true)
+ s.SubscriptionID().AnyTimes().Return("123")
+ s.ResourceGroup().AnyTimes().Return("my-rg")
+ m.Get(context.TODO(), "", "my-vnet", "my-ipv6-subnet").
+ Return(network.Subnet{
+ ID: to.StringPtr("subnet-id"),
+ Name: to.StringPtr("my-ipv6-subnet"),
+ SubnetPropertiesFormat: &network.SubnetPropertiesFormat{
+ AddressPrefixes: &[]string{
+ "10.0.0.0/16",
+ "2001:1234:5678:9abd::/64",
+ },
+ RouteTable: &network.RouteTable{
+ ID: to.StringPtr("rt-id"),
+ Name: to.StringPtr("my-subnet_route_table"),
+ },
+ NetworkSecurityGroup: &network.SecurityGroup{
+ ID: to.StringPtr("sg-id"),
+ Name: to.StringPtr("my-sg"),
+ },
+ },
+ }, nil)
+ m.Get(context.TODO(), "", "my-vnet", "my-ipv6-subnet-cp").
+ Return(network.Subnet{
+ ID: to.StringPtr("subnet-id-1"),
+ Name: to.StringPtr("my-ipv6-subnet-cp"),
+ SubnetPropertiesFormat: &network.SubnetPropertiesFormat{
+ AddressPrefixes: &[]string{
+ "10.2.0.0/16",
+ "2001:1234:5678:9abc::/64",
+ },
+ RouteTable: &network.RouteTable{
+ ID: to.StringPtr("rt-id"),
+ Name: to.StringPtr("my-subnet_route_table"),
+ },
+ NetworkSecurityGroup: &network.SecurityGroup{
+ ID: to.StringPtr("sg-id"),
+ Name: to.StringPtr("my-sg-1"),
+ },
+ },
+ }, nil)
+ },
+ },
}
for _, tc := range testcases {
@@ -268,7 +384,7 @@ func TestDeleteSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -277,7 +393,7 @@ func TestDeleteSubnets(t *testing.T) {
},
{
Name: "my-subnet-1",
- CIDR: "10.1.0.0/16",
+ CIDRs: []string{"10.1.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -300,7 +416,7 @@ func TestDeleteSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -323,7 +439,7 @@ func TestDeleteSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -332,7 +448,7 @@ func TestDeleteSubnets(t *testing.T) {
},
{
Name: "my-subnet-1",
- CIDR: "10.1.0.0/16",
+ CIDRs: []string{"10.1.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -356,7 +472,7 @@ func TestDeleteSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "custom-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
@@ -377,7 +493,7 @@ func TestDeleteSubnets(t *testing.T) {
s.SubnetSpecs().Return([]azure.SubnetSpec{
{
Name: "my-subnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
VNetName: "my-vnet",
RouteTableName: "my-subnet_route_table",
SecurityGroupName: "my-sg",
diff --git a/cloud/services/tags/mock_tags/tags_mock.go b/cloud/services/tags/mock_tags/tags_mock.go
index e0d7d712d88..f6b9249f3c4 100644
--- a/cloud/services/tags/mock_tags/tags_mock.go
+++ b/cloud/services/tags/mock_tags/tags_mock.go
@@ -276,6 +276,20 @@ func (mr *MockTagScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockTagScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockTagScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockTagScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockTagScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockTagScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go b/cloud/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go
index 1c58966eff3..0a57af15d70 100644
--- a/cloud/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go
+++ b/cloud/services/virtualmachines/mock_virtualmachines/virtualmachines_mock.go
@@ -278,6 +278,20 @@ func (mr *MockVMScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockVMScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockVMScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockVMScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockVMScope)(nil).IsIPv6Enabled))
+}
+
// Info mocks base method.
func (m *MockVMScope) Info(msg string, keysAndValues ...interface{}) {
m.ctrl.T.Helper()
diff --git a/cloud/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go b/cloud/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go
index c7681cd2c44..05323b4666d 100644
--- a/cloud/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go
+++ b/cloud/services/virtualnetworks/mock_virtualnetworks/virtualnetworks_mock.go
@@ -370,6 +370,20 @@ func (mr *MockVNetScopeMockRecorder) RouteTable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteTable", reflect.TypeOf((*MockVNetScope)(nil).RouteTable))
}
+// IsIPv6Enabled mocks base method.
+func (m *MockVNetScope) IsIPv6Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsIPv6Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsIPv6Enabled indicates an expected call of IsIPv6Enabled.
+func (mr *MockVNetScopeMockRecorder) IsIPv6Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIPv6Enabled", reflect.TypeOf((*MockVNetScope)(nil).IsIPv6Enabled))
+}
+
// VNetSpecs mocks base method.
func (m *MockVNetScope) VNetSpecs() []azure.VNetSpec {
m.ctrl.T.Helper()
diff --git a/cloud/services/virtualnetworks/virtualnetworks.go b/cloud/services/virtualnetworks/virtualnetworks.go
index 7336894ad62..a041a86f354 100644
--- a/cloud/services/virtualnetworks/virtualnetworks.go
+++ b/cloud/services/virtualnetworks/virtualnetworks.go
@@ -36,18 +36,15 @@ func (s *Service) getExisting(ctx context.Context, spec azure.VNetSpec) (*infrav
}
return nil, errors.Wrapf(err, "failed to get VNet %s", spec.Name)
}
- cidr := ""
+ var prefixes []string
if vnet.VirtualNetworkPropertiesFormat != nil && vnet.VirtualNetworkPropertiesFormat.AddressSpace != nil {
- prefixes := to.StringSlice(vnet.VirtualNetworkPropertiesFormat.AddressSpace.AddressPrefixes)
- if prefixes != nil && len(prefixes) > 0 {
- cidr = prefixes[0]
- }
+ prefixes = to.StringSlice(vnet.VirtualNetworkPropertiesFormat.AddressSpace.AddressPrefixes)
}
return &infrav1.VnetSpec{
ResourceGroup: spec.ResourceGroup,
ID: to.String(vnet.ID),
Name: to.String(vnet.Name),
- CidrBlock: cidr,
+ CIDRBlocks: prefixes,
Tags: converters.MapToTags(vnet.Tags),
}, nil
}
@@ -78,6 +75,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
default:
s.Scope.V(2).Info("creating VNet", "VNet", vnetSpec.Name)
+
vnetProperties := network.VirtualNetwork{
Tags: converters.TagsToMap(infrav1.Build(infrav1.BuildParams{
ClusterName: s.Scope.ClusterName(),
@@ -89,7 +87,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
Location: to.StringPtr(s.Scope.Location()),
VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{
AddressSpace: &network.AddressSpace{
- AddressPrefixes: &[]string{vnetSpec.CIDR},
+ AddressPrefixes: &vnetSpec.CIDRs,
},
},
}
diff --git a/cloud/services/virtualnetworks/virtualnetworks_test.go b/cloud/services/virtualnetworks/virtualnetworks_test.go
index 49e833efbea..1f13d62692c 100644
--- a/cloud/services/virtualnetworks/virtualnetworks_test.go
+++ b/cloud/services/virtualnetworks/virtualnetworks_test.go
@@ -19,6 +19,7 @@ package virtualnetworks
import (
"context"
"net/http"
+ gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock"
"testing"
"github.com/golang/mock/gomock"
@@ -50,7 +51,7 @@ func TestReconcileVnet(t *testing.T) {
{
ResourceGroup: "my-rg",
Name: "vnet-exists",
- CIDR: "10.0.0.0/8",
+ CIDRs: []string{"10.0.0.0/8"},
},
})
m.Get(context.TODO(), "my-rg", "vnet-exists").
@@ -70,6 +71,40 @@ func TestReconcileVnet(t *testing.T) {
}, nil)
},
},
+ {
+ name: "managed ipv6 vnet exists",
+ expectedError: "",
+ expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) {
+ s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
+ s.ClusterName().AnyTimes().Return("fake-cluster")
+ s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "vnet-exists"})
+ s.VNetSpecs().Return([]azure.VNetSpec{
+ {
+ ResourceGroup: "my-rg",
+ Name: "ipv6-vnet-exists",
+ CIDRs: []string{"10.0.0.0/8", "2001:1234:5678:9a00::/56"},
+ },
+ })
+ m.Get(context.TODO(), "my-rg", "ipv6-vnet-exists").
+ Return(network.VirtualNetwork{
+ ID: to.StringPtr("azure/fake/id"),
+ Name: to.StringPtr("ipv6-vnet-exists"),
+ VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{
+ AddressSpace: &network.AddressSpace{
+ AddressPrefixes: to.StringSlicePtr([]string{
+ "10.0.0.0/8",
+ "2001:1234:5678:9a00::/56",
+ }),
+ },
+ },
+ Tags: map[string]*string{
+ "Name": to.StringPtr("ipv6-vnet-exists"),
+ "sigs.k8s.io_cluster-api-provider-azure_cluster_fake-cluster": to.StringPtr("owned"),
+ "sigs.k8s.io_cluster-api-provider-azure_role": to.StringPtr("common"),
+ },
+ }, nil)
+ },
+ },
{
name: "vnet created successufuly",
expectedError: "",
@@ -78,12 +113,13 @@ func TestReconcileVnet(t *testing.T) {
s.ClusterName().AnyTimes().Return("fake-cluster")
s.Location().AnyTimes().Return("fake-location")
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "vnet-new"})
s.VNetSpecs().Return([]azure.VNetSpec{
{
ResourceGroup: "my-rg",
Name: "vnet-new",
- CIDR: "10.0.0.0/8",
+ CIDRs: []string{"10.0.0.0/8"},
},
})
m.Get(context.TODO(), "my-rg", "vnet-new").
@@ -92,6 +128,44 @@ func TestReconcileVnet(t *testing.T) {
m.CreateOrUpdate(context.TODO(), "my-rg", "vnet-new", gomock.AssignableToTypeOf(network.VirtualNetwork{}))
},
},
+ {
+ name: "ipv6 vnet created successufuly",
+ expectedError: "",
+ expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_virtualnetworks.MockClientMockRecorder) {
+ s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
+ s.ClusterName().AnyTimes().Return("fake-cluster")
+ s.Location().AnyTimes().Return("fake-location")
+ s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
+ s.IsIPv6Enabled().AnyTimes().Return(true)
+ s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "vnet-new"})
+ s.VNetSpecs().Return([]azure.VNetSpec{
+ {
+ ResourceGroup: "my-rg",
+ Name: "vnet-ipv6-new",
+ CIDRs: []string{"10.0.0.0/8", "2001:1234:5678:9a00::/56"},
+ },
+ })
+ m.Get(context.TODO(), "my-rg", "vnet-ipv6-new").
+ Return(network.VirtualNetwork{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found"))
+
+ m.CreateOrUpdate(context.TODO(), "my-rg", "vnet-ipv6-new", gomockinternal.DiffEq(network.VirtualNetwork{
+ Tags: map[string]*string{
+ "Name": to.StringPtr("vnet-ipv6-new"),
+ "sigs.k8s.io_cluster-api-provider-azure_cluster_fake-cluster": to.StringPtr(string(infrav1.ResourceLifecycleOwned)),
+ "sigs.k8s.io_cluster-api-provider-azure_role": to.StringPtr(infrav1.CommonRole),
+ },
+ Location: to.StringPtr("fake-location"),
+ VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{
+ AddressSpace: &network.AddressSpace{
+ AddressPrefixes: to.StringSlicePtr([]string{
+ "10.0.0.0/8",
+ "2001:1234:5678:9a00::/56",
+ }),
+ },
+ },
+ }))
+ },
+ },
{
name: "unmanaged vnet exists",
expectedError: "",
@@ -104,7 +178,7 @@ func TestReconcileVnet(t *testing.T) {
{
ResourceGroup: "custom-vnet-rg",
Name: "custom-vnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Get(context.TODO(), "custom-vnet-rg", "custom-vnet").
@@ -129,13 +203,14 @@ func TestReconcileVnet(t *testing.T) {
s.V(gomock.AssignableToTypeOf(2)).AnyTimes().Return(klogr.New())
s.ClusterName().AnyTimes().Return("fake-cluster")
s.Location().AnyTimes().Return("fake-location")
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "custom-vnet"})
s.VNetSpecs().Return([]azure.VNetSpec{
{
ResourceGroup: "custom-vnet-rg",
Name: "custom-vnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Get(context.TODO(), "custom-vnet-rg", "custom-vnet").
@@ -154,7 +229,7 @@ func TestReconcileVnet(t *testing.T) {
{
ResourceGroup: "custom-vnet-rg",
Name: "custom-vnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Get(context.TODO(), "custom-vnet-rg", "custom-vnet").
@@ -170,11 +245,12 @@ func TestReconcileVnet(t *testing.T) {
s.Location().AnyTimes().Return("fake-location")
s.AdditionalTags().AnyTimes().Return(infrav1.Tags{})
s.Vnet().AnyTimes().Return(&infrav1.VnetSpec{Name: "custom-vnet"})
+ s.IsIPv6Enabled().AnyTimes().Return(false)
s.VNetSpecs().Return([]azure.VNetSpec{
{
ResourceGroup: "custom-vnet-rg",
Name: "custom-vnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Get(context.TODO(), "custom-vnet-rg", "custom-vnet").
@@ -237,7 +313,7 @@ func TestDeleteVnet(t *testing.T) {
{
ResourceGroup: "my-rg",
Name: "vnet-exists",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Delete(context.TODO(), "my-rg", "vnet-exists")
@@ -260,7 +336,7 @@ func TestDeleteVnet(t *testing.T) {
{
ResourceGroup: "my-rg",
Name: "vnet-exists",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Delete(context.TODO(), "my-rg", "vnet-exists").
@@ -280,7 +356,7 @@ func TestDeleteVnet(t *testing.T) {
{
ResourceGroup: "my-rg",
Name: "my-vnet",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
},
@@ -302,7 +378,7 @@ func TestDeleteVnet(t *testing.T) {
{
ResourceGroup: "my-rg",
Name: "vnet-exists",
- CIDR: "10.0.0.0/16",
+ CIDRs: []string{"10.0.0.0/16"},
},
})
m.Delete(context.TODO(), "my-rg", "vnet-exists").
diff --git a/cloud/types.go b/cloud/types.go
index 87eb2b96399..b5eb16a62b2 100644
--- a/cloud/types.go
+++ b/cloud/types.go
@@ -24,6 +24,7 @@ import (
type PublicIPSpec struct {
Name string
DNSName string
+ IsIPv6 bool
}
// NICSpec defines the specification for a Network Interface.
@@ -42,6 +43,8 @@ type NICSpec struct {
PublicIPName string
VMSize string
AcceleratedNetworking *bool
+ IPv6Enabled bool
+ EnableIPForwarding bool
}
// DiskSpec defines the specification for a Disk.
@@ -55,7 +58,7 @@ type LBSpec struct {
PublicIPName string
Role string
SubnetName string
- SubnetCidr string
+ SubnetCidrs []string
PrivateIPAddress string
APIServerPort int32
}
@@ -74,7 +77,7 @@ type InboundNatSpec struct {
// SubnetSpec defines the specification for a Subnet.
type SubnetSpec struct {
Name string
- CIDR string
+ CIDRs []string
VNetName string
RouteTableName string
SecurityGroupName string
@@ -86,7 +89,7 @@ type SubnetSpec struct {
type VNetSpec struct {
ResourceGroup string
Name string
- CIDR string
+ CIDRs []string
}
// RoleAssignmentSpec defines the specification for a Role Assignment.
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml
index 5e60eae13aa..8d68c17d29d 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azureclusters.yaml
@@ -472,9 +472,16 @@ spec:
description: SubnetSpec configures an Azure subnet.
properties:
cidrBlock:
- description: CidrBlock is the CIDR block to be used when
- the provider creates a managed Vnet.
+ description: 'CidrBlock is the CIDR block to be used when
+ the provider creates a managed Vnet. DEPRECATED: Use CIDRBlocks
+ instead'
type: string
+ cidrBlocks:
+ description: CIDRBlocks defines the subnet's address space,
+ specified as one or more address prefixes in CIDR notation.
+ items:
+ type: string
+ type: array
id:
description: ID defines a unique identifier to reference
this resource.
@@ -576,9 +583,17 @@ spec:
description: Vnet is the configuration for the Azure virtual network.
properties:
cidrBlock:
- description: CidrBlock is the CIDR block to be used when the
- provider creates a managed virtual network.
- type: string
+ description: 'CidrBlock is the CIDR block to be used when
+ the provider creates a managed virtual network. DEPRECATED:
+ Use CIDRBlocks instead'
+ type: string
+ cidrBlocks:
+ description: CIDRBlocks defines the virtual network's address
+ space, specified as one or more address prefixes in CIDR
+ notation.
+ items:
+ type: string
+ type: array
id:
description: ID is the identifier of the virtual network this
provider should use to create resources.
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml
index 8b10d098770..87fb6476e80 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachines.yaml
@@ -292,6 +292,13 @@ spec:
- nameSuffix
type: object
type: array
+ enableIPForwarding:
+ description: EnableIPForwarding enables IP Forwarding in Azure which
+ is required for some CNI's to send traffic from a pods on one machine
+ to another. This is required for IpV6 with Calico in combination
+ with User Defined Routes (set by the Azure Cloud Controller manager).
+ Default is false for disabled.
+ type: boolean
failureDomain:
description: FailureDomain is the failure domain unique identifier
this Machine should be attached to, as defined in Cluster API. This
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml
index 8d9ecc43377..08366501e4a 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremachinetemplates.yaml
@@ -230,6 +230,14 @@ spec:
- nameSuffix
type: object
type: array
+ enableIPForwarding:
+ description: EnableIPForwarding enables IP Forwarding in Azure
+ which is required for some CNI's to send traffic from a
+ pods on one machine to another. This is required for IpV6
+ with Calico in combination with User Defined Routes (set
+ by the Azure Cloud Controller manager). Default is false
+ for disabled.
+ type: boolean
failureDomain:
description: FailureDomain is the failure domain unique identifier
this Machine should be attached to, as defined in Cluster
diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go
index f576b78749b..53b30e10e35 100644
--- a/controllers/azurecluster_controller.go
+++ b/controllers/azurecluster_controller.go
@@ -18,11 +18,11 @@ package controllers
import (
"context"
+ corev1 "k8s.io/api/core/v1"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
- corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
@@ -167,6 +167,33 @@ func (r *AzureClusterReconciler) reconcileNormal(ctx context.Context, clusterSco
return reconcile.Result{}, err
}
+ // Handle backcompat for CidrBlock
+ if clusterScope.Vnet().CidrBlock != "" {
+ message := "vnet cidrBlock is deprecated, use cidrBlocks instead"
+ clusterScope.Info(message)
+ r.Recorder.Eventf(clusterScope.AzureCluster, corev1.EventTypeWarning, "DeprecatedField", message)
+
+ // Set CIDRBlocks if it is not set.
+ if len(clusterScope.Vnet().CIDRBlocks) == 0 {
+ clusterScope.Info("vnet cidrBlocks not set, setting with value from deprecated vnet cidrBlock", "cidrBlock", clusterScope.Vnet().CidrBlock)
+ clusterScope.Vnet().CIDRBlocks = []string{clusterScope.Vnet().CidrBlock}
+ }
+ }
+
+ for _, subnet := range clusterScope.Subnets() {
+ if subnet.CidrBlock != "" {
+ message := "subnet cidrBlock is deprecated, use cidrBlocks instead"
+ clusterScope.Info(message)
+ r.Recorder.Eventf(clusterScope.AzureCluster, corev1.EventTypeWarning, "DeprecatedField", message)
+
+ // Set CIDRBlocks if it is not set.
+ if len(subnet.CIDRBlocks) == 0 {
+ clusterScope.Info("subnet cidrBlocks not set, setting with value from deprecated subnet cidrBlock", "cidrBlock", subnet.CidrBlock)
+ subnet.CIDRBlocks = []string{subnet.CidrBlock}
+ }
+ }
+ }
+
err := newAzureClusterReconciler(clusterScope).Reconcile(ctx)
if err != nil {
wrappedErr := errors.Wrap(err, "failed to reconcile cluster services")
diff --git a/docs/book/src/SUMMARY.md b/docs/book/src/SUMMARY.md
index 59e8a6d7223..b45837c1b27 100644
--- a/docs/book/src/SUMMARY.md
+++ b/docs/book/src/SUMMARY.md
@@ -10,6 +10,7 @@
- [External Cloud Provider](./topics/external-cloud-provider.md)
- [Failure Domains](./topics/failure-domains.md)
- [Identity](./topics/identity.md)
+ - [IPv6](./topics/ipv6.md)
- [Machine Pools (VMSS)](./topics/machinepools.md)
- [Managed Clusters (AKS)](./topics/managedcluster.md)
- [Spot Virtual Machines](./topics/spot-vms.md)
diff --git a/docs/book/src/topics/custom-vnet.md b/docs/book/src/topics/custom-vnet.md
index 43a6ff10bd9..674acd6ecfc 100644
--- a/docs/book/src/topics/custom-vnet.md
+++ b/docs/book/src/topics/custom-vnet.md
@@ -68,14 +68,17 @@ spec:
networkSpec:
vnet:
name: my-vnet
- cidrBlock: 10.0.0.0/16
+ cidrBlocks:
+ - 10.0.0.0/16
subnets:
- name: my-subnet-cp
role: control-plane
- cidrBlock: 10.0.1.0/24
+ cidrBlocks:
+ - 10.0.1.0/24
- name: my-subnet-node
role: node
- cidrBlock: 10.0.2.0/24
+ cidrBlocks:
+ - 10.0.2.0/24
resourceGroup: cluster-example
```
@@ -102,17 +105,19 @@ spec:
networkSpec:
vnet:
name: my-vnet
- cidrBlock: 10.0.0.0/16
+ cidrBlocks:
+ - 10.0.0.0/16
subnets:
- name: my-subnet-cp
role: control-plane
- cidrBlock: 10.0.1.0/24
+ cidrBlocks:
+ - 10.0.1.0/24
securityGroup:
name: my-subnet-cp-nsg
ingressRule:
- name: "allow_ssh"
description: "allow SSH"
- priority: 100
+ priority: 2200
protocol: "*"
destination: "*"
destinationPorts: "22"
@@ -120,7 +125,7 @@ spec:
sourcePorts: "*"
- name: "allow_apiserver"
description: "Allow K8s API Server"
- priority: 101
+ priority: 2201
protocol: "*"
destination: "*"
destinationPorts: "6443"
@@ -128,7 +133,7 @@ spec:
sourcePorts: "*"
- name: "allow_port_50000"
description: "allow port 50000"
- priority: 102
+ priority: 2202
protocol: "*"
destination: "*"
destinationPorts: "50000"
@@ -136,6 +141,7 @@ spec:
sourcePorts: "*"
- name: my-subnet-node
role: node
- cidrBlock: 10.0.2.0/24
+ cidrBlocks:
+ - 10.0.2.0/24
resourceGroup: cluster-example
```
diff --git a/docs/book/src/topics/ipv6.md b/docs/book/src/topics/ipv6.md
new file mode 100644
index 00000000000..a00293c58cc
--- /dev/null
+++ b/docs/book/src/topics/ipv6.md
@@ -0,0 +1,142 @@
+# IPv6 clusters
+
+## Overview
+
+CAPZ enables you to create IPv6 Kubernetes clusters on Microsoft Azure.
+
+- IPv6 support is available for Kubernetes version 1.18.0 and later on Azure.
+- IPv6 support is in beta as of Kubernetes version 1.18 in Kubernetes community.
+
+To deploy a cluster using IPv6, use the [ipv6 flavor template](https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/master/templates/cluster-template-ipv6.yaml).
+
+
+
+Things to try out after the cluster created:
+
+- Nodes are Kubernetes version 1.18.0 or later
+- Nodes have an IPv6 Internal-IP
+
+```bash
+kubectl get nodes -o wide
+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+ipv6-0-control-plane-8xqgw Ready master 53m v1.18.8 2001:1234:5678:9abc::4 Ubuntu 18.04.5 LTS 5.3.0-1034-azure containerd://1.3.4
+ipv6-0-control-plane-crpvf Ready master 49m v1.18.8 2001:1234:5678:9abc::5 Ubuntu 18.04.5 LTS 5.3.0-1034-azure containerd://1.3.4
+ipv6-0-control-plane-nm5v9 Ready master 46m v1.18.8 2001:1234:5678:9abc::6 Ubuntu 18.04.5 LTS 5.3.0-1034-azure containerd://1.3.4
+ipv6-0-md-0-7k8vm Ready 49m v1.18.8 2001:1234:5678:9abd::5 Ubuntu 18.04.5 LTS 5.3.0-1034-azure containerd://1.3.4
+ipv6-0-md-0-mwfpt Ready 50m v1.18.8 2001:1234:5678:9abd::4 Ubuntu 18.04.5 LTS 5.3.0-1034-azure containerd://1.3.4
+```
+
+- Nodes have 2 internal IPs, one from each IP family. IPv6 clusters on Azure run on dual-stack hosts. The IPv6 is the primary IP.
+
+```bash
+kubectl get nodes ipv6-0-md-0-7k8vm -o go-template --template='{{range .status.addresses}}{{printf "%s: %s \n" .type .address}}{{end}}'
+Hostname: ipv6-0-md-0-7k8vm
+InternalIP: 2001:1234:5678:9abd::5
+InternalIP: 10.1.0.5
+```
+
+- Nodes have an IPv6 PodCIDR
+
+```bash
+kubectl get nodes ipv6-0-md-0-7k8vm -o go-template --template='{{.spec.podCIDR}}'
+2001:1234:5678:9a40:200::/72
+```
+
+- Pods have an IPv6 IP
+
+```bash
+kubectl get pods nginx-f89759699-h65lt -o go-template --template='{{.status.podIP}}'
+2001:1234:5678:9a40:300::1f
+```
+
+- Able to reach other pods in cluster using IPv6
+
+```bash
+# inside the nginx-pod
+# # ifconfig eth0
+ eth0 Link encap:Ethernet HWaddr 3E:DA:12:82:4C:C2
+ inet6 addr: fe80::3cda:12ff:fe82:4cc2/64 Scope:Link
+ inet6 addr: 2001:1234:5678:9a40:100::4/128 Scope:Global
+ UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
+ RX packets:15 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:20 errors:0 dropped:1 overruns:0 carrier:0
+ collisions:0 txqueuelen:0
+ RX bytes:1562 (1.5 KiB) TX bytes:1832 (1.7 KiB)
+# ping 2001:1234:5678:9a40::2
+PING 2001:1234:5678:9a40::2 (2001:1234:5678:9a40::2): 56 data bytes
+64 bytes from 2001:1234:5678:9a40::2: seq=0 ttl=62 time=1.690 ms
+64 bytes from 2001:1234:5678:9a40::2: seq=1 ttl=62 time=1.009 ms
+64 bytes from 2001:1234:5678:9a40::2: seq=2 ttl=62 time=1.388 ms
+64 bytes from 2001:1234:5678:9a40::2: seq=3 ttl=62 time=0.925 ms
+```
+
+- Kubernetes services have IPv6 ClusterIP and ExternalIP
+
+```bash
+kubectl get svc
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+kubernetes ClusterIP fd00::1 443/TCP 94m
+nginx-service LoadBalancer fd00::4a12 2603:1030:805:2::b 80:32136/TCP 40m
+```
+
+- Able to reach the workload on IPv6 ExternalIP
+
+NOTE: this will only work if your ISP has IPv6 enabled. Alternatively, you can connect from an Azure VM with IPv6.
+
+```bash
+curl [2603:1030:805:2::b] -v
+* Rebuilt URL to: [2603:1030:805:2::b]/
+* Trying 2603:1030:805:2::b...
+* TCP_NODELAY set
+* Connected to 2603:1030:805:2::b (2603:1030:805:2::b) port 80 (#0)
+> GET / HTTP/1.1
+> Host: [2603:1030:805:2::b]
+> User-Agent: curl/7.58.0
+> Accept: */*
+>
+< HTTP/1.1 200 OK
+< Server: nginx/1.17.0
+< Date: Fri, 18 Sep 2020 23:07:12 GMT
+< Content-Type: text/html
+< Content-Length: 612
+< Last-Modified: Tue, 21 May 2019 15:33:12 GMT
+< Connection: keep-alive
+< ETag: "5ce41a38-264"
+< Accept-Ranges: bytes
+```
+
+## Known Limitations
+
+The reference [ipv6 flavor](https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/master/templates/cluster-template-ipv6.yaml) takes care of most of these for you, but it is important to be aware of these if you decide to write your own IPv6 cluster template, or use a different bootstrap provider.
+
+- Kubernetes version needs to be 1.18+
+
+- etcd needs to listen on 127.0.0.1:2379 in addition to IPv6 IPs to resolve an issue with the etcd health check as the dial transport is only doing IPv4. This is done by modifying the `listen-client-urls` etcd arg in postKubeadmCommands as follows:
+```yaml
+ - sed -i '\#--listen-client-urls#s#$#,https://127.0.0.1:2379#' /etc/kubernetes/manifests/etcd.yaml
+```
+
+- The :53 port needs to be free on the host so coredns can use it. In 18.04, systemd-resolved uses the port :53 on the host and is used by default for DNS. This causes the coredns pods to crash for single stack IPv6 with bind address already in use as coredns pods are run on hostNetwork to leverage the host routes for DNS resolution. This is done by running the following commands in postKubeadmCommands:
+```yaml
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf
+ /etc/resolv.conf
+ - systemctl restart systemd-resolved
+```
+
+- The coredns pod needs to run on the host network, so it can leverage host routes for the v4 network to do the DNS resolution. The workaround is to edit the coredns deployment and add `hostNetwork: true`:
+```bash
+kubectl patch deploy/coredns -n kube-system --type=merge -p '{"spec": {"template": {"spec":{"hostNetwork": true}}}}'
+```
+
+- When using [Calico CNI](https://docs.projectcalico.org/reference/public-cloud/azure), the selected pod’s subnet should be part of your Azure virtual network IP range.
diff --git a/templates/addons/calico-ipv6.yaml b/templates/addons/calico-ipv6.yaml
new file mode 100644
index 00000000000..8cb28c00b8d
--- /dev/null
+++ b/templates/addons/calico-ipv6.yaml
@@ -0,0 +1,791 @@
+---
+# Source: calico/templates/calico-config.yaml
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # Typha is disabled.
+ typha_service_name: "none"
+ # Configure the backend to use.
+ calico_backend: "none"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ # https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": 1500,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "usePodCidr"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+# Source: calico/templates/kdd-crds.yaml
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: blockaffinities.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BlockAffinity
+ plural: blockaffinities
+ singular: blockaffinity
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+ shortNames:
+ - gnp
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamblocks.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMBlock
+ plural: ipamblocks
+ singular: ipamblock
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamconfigs.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMConfig
+ plural: ipamconfigs
+ singular: ipamconfig
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamhandles.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMHandle
+ plural: ipamhandles
+ singular: ipamhandle
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kubecontrollersconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: KubeControllersConfiguration
+ plural: kubecontrollersconfigurations
+ singular: kubecontrollersconfiguration
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networksets.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkSet
+ plural: networksets
+ singular: networkset
+
+---
+---
+# Source: calico/templates/rbac.yaml
+
+# Include a clusterrole for the kube-controllers component,
+# and bind it to the calico-kube-controllers serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+rules:
+ # Nodes are watched to monitor for deletions.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - watch
+ - list
+ - get
+ # Pods are queried to check for existence.
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ # IPAM resources are manipulated when nodes are deleted.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ verbs:
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # kube-controllers manages hostendpoints.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - hostendpoints
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # Needs access to update clusterinformations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - clusterinformations
+ verbs:
+ - get
+ - create
+ - update
+ # KubeControllersConfiguration is where it gets its config
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - kubecontrollersconfigurations
+ verbs:
+ # read its own config
+ - get
+ # create a default if none exists
+ - create
+ # update status
+ - update
+ # watch for changes
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Include a clusterrole for the calico-node DaemonSet,
+# and bind it to the calico-node serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-node
+rules:
+ # The CNI plugin needs to get pods, nodes, and namespaces.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - services
+ verbs:
+ # Used to discover service IPs for advertisement.
+ - watch
+ - list
+ # Used to discover Typhas.
+ - get
+ # Pod CIDR auto-detection on kubeadm needs access to config maps.
+ - apiGroups: [""]
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ # Needed for clearing NodeNetworkUnavailable flag.
+ - patch
+ # Calico stores some configuration information in node annotations.
+ - update
+ # Watch for changes to Kubernetes NetworkPolicies.
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ # Used by Calico for policy information.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - list
+ - watch
+ # The CNI plugin patches pods/status.
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ # Calico monitors various CRDs for config.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - ipamblocks
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - networksets
+ - clusterinformations
+ - hostendpoints
+ - blockaffinities
+ verbs:
+ - get
+ - list
+ - watch
+ # Calico must create and update some CRDs on startup.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ - felixconfigurations
+ - clusterinformations
+ verbs:
+ - create
+ - update
+ # Calico stores some configuration information on the node.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ # These permissions are only requried for upgrade from v2.6, and can
+ # be removed after upgrade or on fresh installations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - bgpconfigurations
+ - bgppeers
+ verbs:
+ - create
+ - update
+ # These permissions are required for Calico CNI to perform IPAM allocations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ipamconfigs
+ verbs:
+ - get
+ # Block affinities must also be watchable by confd for route aggregation.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ verbs:
+ - watch
+ # The Calico IPAM migration needs to get daemonsets. These permissions can be
+ # removed if not upgrading from an installation using host-local IPAM.
+ - apiGroups: ["apps"]
+ resources:
+ - daemonsets
+ verbs:
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-node.yaml
+# This manifest installs the calico-node container, as well
+# as the CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ priorityClassName: system-node-critical
+ initContainers:
+ # This container installs the CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.14.1
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Prevents the container from sleeping forever.
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ securityContext:
+ privileged: true
+ # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
+ # to communicate with Felix over the Policy Sync API.
+ - name: flexvol-driver
+ image: calico/pod2daemon-flexvol:v3.14.1
+ volumeMounts:
+ - name: flexvol-driver-host
+ mountPath: /host/driver
+ securityContext:
+ privileged: true
+ containers:
+ # Runs calico-node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.14.1
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s"
+ # Enable or Disable VXLAN on the default IP pool.
+ - name: CALICO_IPV4POOL_VXLAN
+ value: "Never"
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ # - name: CALICO_IPV4POOL_CIDR
+ # value: "192.168.0.0/16"
+ # https://docs.projectcalico.org/reference/public-cloud/azure#azure-user-defined-routes
+ - name: CALICO_IPv6POOL_CIDR
+ value: "2001:1234:5678:9a40::/58"
+ - name: IP6
+ value: "autodetect"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "true"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -felix-live
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - name: policysync
+ mountPath: /var/run/nodeagent
+ volumes:
+ # Used by calico-node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Mount in the directory for host-local IPAM allocations. This is
+ # used when upgrading from host-local to calico-ipam, and can be removed
+ # if not using the upgrade-ipam init container.
+ - name: host-local-net-dir
+ hostPath:
+ path: /var/lib/cni/networks
+ # Used to create per-pod Unix Domain Sockets
+ - name: policysync
+ hostPath:
+ type: DirectoryOrCreate
+ path: /var/run/nodeagent
+ # Used to install Flex Volume Driver
+ - name: flexvol-driver-host
+ hostPath:
+ type: DirectoryOrCreate
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: calico-kube-controllers
+ image: calico/kube-controllers:v3.14.1
+ env:
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: node
+ - name: DATASTORE_TYPE
+ value: kubernetes
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-etcd-secrets.yaml
+
+---
+# Source: calico/templates/calico-typha.yaml
+
+---
+# Source: calico/templates/configure-canal.yaml
+
diff --git a/templates/cluster-template-ipv6.yaml b/templates/cluster-template-ipv6.yaml
new file mode 100644
index 00000000000..0aa89b5759e
--- /dev/null
+++ b/templates/cluster-template-ipv6.yaml
@@ -0,0 +1,257 @@
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: Cluster
+metadata:
+ name: ${CLUSTER_NAME}
+ namespace: default
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 2001:1234:5678:9a40::/58
+ services:
+ cidrBlocks:
+ - fd00::/108
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureCluster
+metadata:
+ name: ${CLUSTER_NAME}
+ namespace: default
+spec:
+ location: ${AZURE_LOCATION}
+ networkSpec:
+ subnets:
+ - cidrBlocks:
+ - 10.0.0.0/16
+ - 2001:1234:5678:9abc::/64
+ name: control-plane-subnet
+ role: control-plane
+ - cidrBlocks:
+ - 10.1.0.0/16
+ - 2001:1234:5678:9abd::/64
+ name: node-subnet
+ role: node
+ vnet:
+ cidrBlocks:
+ - 10.0.0.0/8
+ - 2001:1234:5678:9a00::/56
+ name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet}
+ resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}}
+ subscriptionID: ${AZURE_SUBSCRIPTION_ID}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+ namespace: default
+spec:
+ infrastructureTemplate:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ bind-address: '::'
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ extraVolumes:
+ - hostPath: /etc/kubernetes/azure.json
+ mountPath: /etc/kubernetes/azure.json
+ name: cloud-config
+ readOnly: true
+ timeoutForControlPlane: 20m
+ controllerManager:
+ extraArgs:
+ allocate-node-cidrs: "true"
+ bind-address: '::'
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-cidr: 2001:1234:5678:9a40::/58
+ cluster-name: ${CLUSTER_NAME}
+ configure-cloud-routes: "true"
+ extraVolumes:
+ - hostPath: /etc/kubernetes/azure.json
+ mountPath: /etc/kubernetes/azure.json
+ name: cloud-config
+ readOnly: true
+ etcd:
+ local:
+ dataDir: /var/lib/etcddisk/etcd
+ scheduler:
+ extraArgs:
+ bind-address: '::'
+ diskSetup:
+ filesystems:
+ - device: /dev/disk/azure/scsi1/lun0
+ extraOpts:
+ - -E
+ - lazy_itable_init=1,lazy_journal_init=1
+ filesystem: ext4
+ label: etcd_disk
+ - device: ephemeral0.1
+ filesystem: ext4
+ label: ephemeral0
+ replaceFS: ntfs
+ partitions:
+ - device: /dev/disk/azure/scsi1/lun0
+ layout: true
+ overwrite: false
+ tableType: gpt
+ files:
+ - contentFrom:
+ secret:
+ key: control-plane-azure.json
+ name: ${CLUSTER_NAME}-control-plane-azure-json
+ owner: root:root
+ path: /etc/kubernetes/azure.json
+ permissions: "0644"
+ initConfiguration:
+ localAPIEndpoint:
+ advertiseAddress: '::'
+ bindPort: 6443
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-dns: fd00::10
+ node-ip: '::'
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ joinConfiguration:
+ controlPlane:
+ localAPIEndpoint:
+ advertiseAddress: '::'
+ bindPort: 6443
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-dns: fd00::10
+ node-ip: '::'
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ mounts:
+ - - LABEL=etcd_disk
+ - /var/lib/etcddisk
+ postKubeadmCommands:
+ - sed -i '\#--listen-client-urls#s#$#,https://127.0.0.1:2379#' /etc/kubernetes/manifests/etcd.yaml
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf
+ /etc/resolv.conf
+ - systemctl restart systemd-resolved
+ useExperimentalRetryJoin: true
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+ namespace: default
+spec:
+ template:
+ spec:
+ dataDisks:
+ - diskSizeGB: 256
+ lun: 0
+ nameSuffix: etcddisk
+ enableIPForwarding: true
+ location: ${AZURE_LOCATION}
+ osDisk:
+ diskSizeGB: 128
+ managedDisk:
+ storageAccountType: Premium_LRS
+ osType: Linux
+ sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
+ vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE}
+---
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+ namespace: default
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+ namespace: default
+spec:
+ template:
+ spec:
+ enableIPForwarding: true
+ location: ${AZURE_LOCATION}
+ osDisk:
+ diskSizeGB: 30
+ managedDisk:
+ storageAccountType: Premium_LRS
+ osType: Linux
+ sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
+ vmSize: ${AZURE_NODE_MACHINE_TYPE}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+ namespace: default
+spec:
+ template:
+ spec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ bind-address: '::'
+ controllerManager:
+ extraArgs:
+ bind-address: '::'
+ scheduler:
+ extraArgs:
+ bind-address: '::'
+ files:
+ - contentFrom:
+ secret:
+ key: worker-node-azure.json
+ name: ${CLUSTER_NAME}-md-0-azure-json
+ owner: root:root
+ path: /etc/kubernetes/azure.json
+ permissions: "0644"
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-dns: '[fd00::10]'
+ node-ip: '::'
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ postKubeadmCommands:
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf
+ /etc/resolv.conf
+ - systemctl restart systemd-resolved
+ useExperimentalRetryJoin: true
diff --git a/templates/flavors/README.md b/templates/flavors/README.md
index 6813bc8a9dd..7ac605d1167 100644
--- a/templates/flavors/README.md
+++ b/templates/flavors/README.md
@@ -25,7 +25,7 @@ run ```tilt up ${flavors}``` to spin up worker clusters in Azure represented by
Add your desired flavors to tilt_config.json:
```json
{
- "worker-flavors": ["default", "aks", "ephemeral", "external-cloud-provider", "machinepool", "system-assigned-identity", "user-assigned-identity"]
+ "worker-flavors": ["default", "aks", "ephemeral", "external-cloud-provider", "ipv6", "machinepool", "system-assigned-identity", "user-assigned-identity"]
}
```
diff --git a/templates/flavors/ipv6/kustomization.yaml b/templates/flavors/ipv6/kustomization.yaml
new file mode 100644
index 00000000000..7bf633e7ae7
--- /dev/null
+++ b/templates/flavors/ipv6/kustomization.yaml
@@ -0,0 +1,9 @@
+namespace: default
+resources:
+ - ../base
+ - machine-deployment.yaml
+patchesStrategicMerge:
+ - patches/ipv6.yaml
+ - patches/kubeadm-controlplane.yaml
+ - patches/controlplane-azuremachinetemplate.yaml
+
diff --git a/templates/flavors/ipv6/machine-deployment.yaml b/templates/flavors/ipv6/machine-deployment.yaml
new file mode 100644
index 00000000000..67a2607c8fb
--- /dev/null
+++ b/templates/flavors/ipv6/machine-deployment.yaml
@@ -0,0 +1,80 @@
+---
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: MachineDeployment
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels:
+ template:
+ spec:
+ clusterName: "${CLUSTER_NAME}"
+ version: "${KUBERNETES_VERSION}"
+ bootstrap:
+ configRef:
+ name: "${CLUSTER_NAME}-md-0"
+ apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+ kind: KubeadmConfigTemplate
+ infrastructureRef:
+ name: "${CLUSTER_NAME}-md-0"
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureMachineTemplate
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureMachineTemplate
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ template:
+ spec:
+ location: ${AZURE_LOCATION}
+ vmSize: ${AZURE_NODE_MACHINE_TYPE}
+ osDisk:
+ osType: "Linux"
+ diskSizeGB: 30
+ managedDisk:
+ storageAccountType: "Premium_LRS"
+ sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
+ enableIPForwarding: true
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+kind: KubeadmConfigTemplate
+metadata:
+ name: "${CLUSTER_NAME}-md-0"
+spec:
+ template:
+ spec:
+ useExperimentalRetryJoin: true
+ postKubeadmCommands:
+ # This frees up :53 on the host for the coredns pods
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
+ - systemctl restart systemd-resolved
+ joinConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ kubeletExtraArgs:
+ cloud-provider: azure
+ cloud-config: /etc/kubernetes/azure.json
+ node-ip: "::"
+ cluster-dns: "[fd00::10]"
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ bind-address: "::"
+ controllerManager:
+ extraArgs:
+ bind-address: "::"
+ scheduler:
+ extraArgs:
+ bind-address: "::"
+ files:
+ - contentFrom:
+ secret:
+ name: ${CLUSTER_NAME}-md-0-azure-json
+ key: worker-node-azure.json
+ owner: root:root
+ path: /etc/kubernetes/azure.json
+ permissions: "0644"
diff --git a/templates/flavors/ipv6/patches/controlplane-azuremachinetemplate.yaml b/templates/flavors/ipv6/patches/controlplane-azuremachinetemplate.yaml
new file mode 100644
index 00000000000..85523b39e3b
--- /dev/null
+++ b/templates/flavors/ipv6/patches/controlplane-azuremachinetemplate.yaml
@@ -0,0 +1,8 @@
+kind: AzureMachineTemplate
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ template:
+ spec:
+ enableIPForwarding: true
diff --git a/templates/flavors/ipv6/patches/ipv6.yaml b/templates/flavors/ipv6/patches/ipv6.yaml
new file mode 100644
index 00000000000..1b6d95c4215
--- /dev/null
+++ b/templates/flavors/ipv6/patches/ipv6.yaml
@@ -0,0 +1,35 @@
+---
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: Cluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ clusterNetwork:
+ pods:
+ # this is a part of the virtual network IP range.
+ # See https://docs.projectcalico.org/reference/public-cloud/azure
+ cidrBlocks: ["2001:1234:5678:9a40::/58"]
+ services:
+ cidrBlocks: ["fd00::/108"]
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureCluster
+metadata:
+ name: ${CLUSTER_NAME}
+spec:
+ networkSpec:
+ vnet:
+ cidrBlocks:
+ - "10.0.0.0/8"
+ - "2001:1234:5678:9a00::/56"
+ subnets:
+ - name: control-plane-subnet
+ role: control-plane
+ cidrBlocks:
+ - "10.0.0.0/16"
+ - "2001:1234:5678:9abc::/64"
+ - name: node-subnet
+ role: node
+ cidrBlocks:
+ - "10.1.0.0/16"
+ - "2001:1234:5678:9abd::/64"
diff --git a/templates/flavors/ipv6/patches/kubeadm-controlplane.yaml b/templates/flavors/ipv6/patches/kubeadm-controlplane.yaml
new file mode 100644
index 00000000000..16986168359
--- /dev/null
+++ b/templates/flavors/ipv6/patches/kubeadm-controlplane.yaml
@@ -0,0 +1,55 @@
+apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+kind: KubeadmControlPlane
+metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+spec:
+ kubeadmConfigSpec:
+ useExperimentalRetryJoin: true
+ postKubeadmCommands:
+ - sed -i '\#--listen-client-urls#s#$#,https://127.0.0.1:2379#' /etc/kubernetes/manifests/etcd.yaml
+ # This frees up :53 on the host for the coredns pods
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
+ - systemctl restart systemd-resolved
+ initConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ kubeletExtraArgs:
+ cloud-provider: azure
+ cloud-config: /etc/kubernetes/azure.json
+ node-ip: "::"
+ cluster-dns: "fd00::10"
+ localAPIEndpoint:
+ advertiseAddress: "::"
+ bindPort: 6443
+ joinConfiguration:
+ nodeRegistration:
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ kubeletExtraArgs:
+ cloud-provider: azure
+ cloud-config: /etc/kubernetes/azure.json
+ node-ip: "::"
+ cluster-dns: "fd00::10"
+ controlPlane:
+ localAPIEndpoint:
+ advertiseAddress: "::"
+ bindPort: 6443
+ clusterConfiguration:
+ apiServer:
+ timeoutForControlPlane: 20m
+ extraArgs:
+ cloud-provider: azure
+ cloud-config: /etc/kubernetes/azure.json
+ bind-address: "::"
+ controllerManager:
+ extraArgs:
+ cloud-provider: azure
+ cloud-config: /etc/kubernetes/azure.json
+ #required for ipv6 using calico
+ allocate-node-cidrs: "true"
+ cluster-cidr: "2001:1234:5678:9a40::/58"
+ configure-cloud-routes: "true"
+ bind-address: "::"
+ scheduler:
+ extraArgs:
+ bind-address: "::"
diff --git a/templates/test/cluster-template-prow-ipv6.yaml b/templates/test/cluster-template-prow-ipv6.yaml
new file mode 100644
index 00000000000..c2be2050b53
--- /dev/null
+++ b/templates/test/cluster-template-prow-ipv6.yaml
@@ -0,0 +1,283 @@
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: Cluster
+metadata:
+ labels:
+ cni: ${CLUSTER_NAME}-crs-0
+ name: ${CLUSTER_NAME}
+ namespace: default
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 2001:1234:5678:9a40::/58
+ services:
+ cidrBlocks:
+ - fd00::/108
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+ kind: KubeadmControlPlane
+ name: ${CLUSTER_NAME}-control-plane
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureCluster
+ name: ${CLUSTER_NAME}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureCluster
+metadata:
+ name: ${CLUSTER_NAME}
+ namespace: default
+spec:
+ additionalTags:
+ creationTimestamp: ${TIMESTAMP}
+ jobName: ${JOB_NAME}
+ location: ${AZURE_LOCATION}
+ networkSpec:
+ subnets:
+ - cidrBlocks:
+ - 10.0.0.0/16
+ - 2001:1234:5678:9abc::/64
+ name: control-plane-subnet
+ role: control-plane
+ - cidrBlocks:
+ - 10.1.0.0/16
+ - 2001:1234:5678:9abd::/64
+ name: node-subnet
+ role: node
+ vnet:
+ cidrBlocks:
+ - 10.0.0.0/8
+ - 2001:1234:5678:9a00::/56
+ name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet}
+ resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}}
+ subscriptionID: ${AZURE_SUBSCRIPTION_ID}
+---
+apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+kind: KubeadmControlPlane
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+ namespace: default
+spec:
+ infrastructureTemplate:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureMachineTemplate
+ name: ${CLUSTER_NAME}-control-plane
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ bind-address: '::'
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ extraVolumes:
+ - hostPath: /etc/kubernetes/azure.json
+ mountPath: /etc/kubernetes/azure.json
+ name: cloud-config
+ readOnly: true
+ timeoutForControlPlane: 20m
+ controllerManager:
+ extraArgs:
+ allocate-node-cidrs: "true"
+ bind-address: '::'
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-cidr: 2001:1234:5678:9a40::/58
+ cluster-name: ${CLUSTER_NAME}
+ configure-cloud-routes: "true"
+ extraVolumes:
+ - hostPath: /etc/kubernetes/azure.json
+ mountPath: /etc/kubernetes/azure.json
+ name: cloud-config
+ readOnly: true
+ etcd:
+ local:
+ dataDir: /var/lib/etcddisk/etcd
+ scheduler:
+ extraArgs:
+ bind-address: '::'
+ diskSetup:
+ filesystems:
+ - device: /dev/disk/azure/scsi1/lun0
+ extraOpts:
+ - -E
+ - lazy_itable_init=1,lazy_journal_init=1
+ filesystem: ext4
+ label: etcd_disk
+ - device: ephemeral0.1
+ filesystem: ext4
+ label: ephemeral0
+ replaceFS: ntfs
+ partitions:
+ - device: /dev/disk/azure/scsi1/lun0
+ layout: true
+ overwrite: false
+ tableType: gpt
+ files:
+ - contentFrom:
+ secret:
+ key: control-plane-azure.json
+ name: ${CLUSTER_NAME}-control-plane-azure-json
+ owner: root:root
+ path: /etc/kubernetes/azure.json
+ permissions: "0644"
+ initConfiguration:
+ localAPIEndpoint:
+ advertiseAddress: '::'
+ bindPort: 6443
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-dns: fd00::10
+ node-ip: '::'
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ joinConfiguration:
+ controlPlane:
+ localAPIEndpoint:
+ advertiseAddress: '::'
+ bindPort: 6443
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-dns: fd00::10
+ node-ip: '::'
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ mounts:
+ - - LABEL=etcd_disk
+ - /var/lib/etcddisk
+ postKubeadmCommands:
+ - sed -i '\#--listen-client-urls#s#$#,https://127.0.0.1:2379#' /etc/kubernetes/manifests/etcd.yaml
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf
+ /etc/resolv.conf
+ - systemctl restart systemd-resolved
+ useExperimentalRetryJoin: true
+ replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-control-plane
+ namespace: default
+spec:
+ template:
+ spec:
+ dataDisks:
+ - diskSizeGB: 256
+ lun: 0
+ nameSuffix: etcddisk
+ enableIPForwarding: true
+ location: ${AZURE_LOCATION}
+ osDisk:
+ diskSizeGB: 128
+ managedDisk:
+ storageAccountType: Premium_LRS
+ osType: Linux
+ sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
+ vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE}
+---
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: MachineDeployment
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+ namespace: default
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+ kind: KubeadmConfigTemplate
+ name: ${CLUSTER_NAME}-md-0
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+ kind: AzureMachineTemplate
+ name: ${CLUSTER_NAME}-md-0
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AzureMachineTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+ namespace: default
+spec:
+ template:
+ spec:
+ enableIPForwarding: true
+ location: ${AZURE_LOCATION}
+ osDisk:
+ diskSizeGB: 30
+ managedDisk:
+ storageAccountType: Premium_LRS
+ osType: Linux
+ sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
+ vmSize: ${AZURE_NODE_MACHINE_TYPE}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+kind: KubeadmConfigTemplate
+metadata:
+ name: ${CLUSTER_NAME}-md-0
+ namespace: default
+spec:
+ template:
+ spec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ bind-address: '::'
+ controllerManager:
+ extraArgs:
+ bind-address: '::'
+ scheduler:
+ extraArgs:
+ bind-address: '::'
+ files:
+ - contentFrom:
+ secret:
+ key: worker-node-azure.json
+ name: ${CLUSTER_NAME}-md-0-azure-json
+ owner: root:root
+ path: /etc/kubernetes/azure.json
+ permissions: "0644"
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cloud-config: /etc/kubernetes/azure.json
+ cloud-provider: azure
+ cluster-dns: '[fd00::10]'
+ node-ip: '::'
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ postKubeadmCommands:
+ - echo "DNSStubListener=no" >> /etc/systemd/resolved.conf
+ - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf
+ /etc/resolv.conf
+ - systemctl restart systemd-resolved
+ useExperimentalRetryJoin: true
+---
+apiVersion: v1
+data: ${CNI_RESOURCES_IPV6}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+ namespace: default
+---
+apiVersion: addons.cluster.x-k8s.io/v1alpha3
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+ namespace: default
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
diff --git a/templates/test/prow-ipv6/cni-resource-set.yaml b/templates/test/prow-ipv6/cni-resource-set.yaml
new file mode 100644
index 00000000000..23b9571b7a7
--- /dev/null
+++ b/templates/test/prow-ipv6/cni-resource-set.yaml
@@ -0,0 +1,21 @@
+---
+apiVersion: v1
+data: ${CNI_RESOURCES_IPV6}
+kind: ConfigMap
+metadata:
+ name: cni-${CLUSTER_NAME}-crs-0
+ namespace: default
+---
+apiVersion: addons.cluster.x-k8s.io/v1alpha3
+kind: ClusterResourceSet
+metadata:
+ name: ${CLUSTER_NAME}-crs-0
+ namespace: default
+spec:
+ clusterSelector:
+ matchLabels:
+ cni: ${CLUSTER_NAME}-crs-0
+ resources:
+ - kind: ConfigMap
+ name: cni-${CLUSTER_NAME}-crs-0
+ strategy: ApplyOnce
\ No newline at end of file
diff --git a/templates/test/prow-ipv6/kustomization.yaml b/templates/test/prow-ipv6/kustomization.yaml
new file mode 100644
index 00000000000..7ed4eb9a8a5
--- /dev/null
+++ b/templates/test/prow-ipv6/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+namespace: default
+resources:
+ - ../../flavors/ipv6
+ - cni-resource-set.yaml
+patchesStrategicMerge:
+ - ../patches/tags.yaml
+ - patches/cluster-cni.yaml
+
diff --git a/templates/test/prow-ipv6/patches/cluster-cni.yaml b/templates/test/prow-ipv6/patches/cluster-cni.yaml
new file mode 100644
index 00000000000..751f46e6154
--- /dev/null
+++ b/templates/test/prow-ipv6/patches/cluster-cni.yaml
@@ -0,0 +1,8 @@
+---
+apiVersion: cluster.x-k8s.io/v1alpha3
+kind: Cluster
+metadata:
+ name: ${CLUSTER_NAME}
+ namespace: default
+ labels:
+ cni: "${CLUSTER_NAME}-crs-0"
diff --git a/test/e2e/azure_lb.go b/test/e2e/azure_lb.go
index 8aad38a334c..de3958c17a0 100644
--- a/test/e2e/azure_lb.go
+++ b/test/e2e/azure_lb.go
@@ -21,13 +21,13 @@ package e2e
import (
"context"
"fmt"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
"io/ioutil"
+ k8snet "k8s.io/utils/net"
"net"
"regexp"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
retryablehttp "github.com/hashicorp/go-retryablehttp"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@@ -43,6 +43,7 @@ type AzureLBSpecInput struct {
Namespace *corev1.Namespace
ClusterName string
SkipCleanup bool
+ IPv6 bool
}
// AzureLBSpec implements a test that verifies Azure internal and external load balancers can
@@ -64,32 +65,32 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
clientset = clusterProxy.GetClientSet()
Expect(clientset).NotTo(BeNil())
- By("creating an nginx deployment")
+ By("creating an Apache HTTP deployment")
deploymentsClient := clientset.AppsV1().Deployments(corev1.NamespaceDefault)
var replicas int32 = 1
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
- Name: "ingress-nginx",
+ Name: "httpd",
Namespace: corev1.NamespaceDefault,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
- "app": "ingress-nginx",
+ "app": "httpd",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- "app": "ingress-nginx",
+ "app": "httpd",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "web",
- Image: "nginx:1.18",
+ Image: "httpd",
Ports: []corev1.ContainerPort{
{
Name: "http",
@@ -112,91 +113,103 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
}
WaitForDeploymentsAvailable(context.TODO(), deployInput, e2eConfig.GetIntervals(specName, "wait-deployment")...)
- By("creating an internal Load Balancer service")
servicesClient := clientset.CoreV1().Services(corev1.NamespaceDefault)
- ilbService := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ingress-nginx-ilb",
- Namespace: corev1.NamespaceDefault,
- Annotations: map[string]string{
- "service.beta.kubernetes.io/azure-load-balancer-internal": "true",
+ jobsClient := clientset.BatchV1().Jobs(corev1.NamespaceDefault)
+ jobName := "curl-to-ilb-job"
+ ilbName := "httpd-ilb"
+
+ // TODO: fix and enable this. Internal LBs + IPv6 is currently in preview.
+ // https://docs.microsoft.com/en-us/azure/virtual-network/ipv6-dual-stack-standard-internal-load-balancer-powershell
+ if !input.IPv6 {
+ By("creating an internal Load Balancer service")
+ ilbService := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: ilbName,
+ Namespace: corev1.NamespaceDefault,
+ Annotations: map[string]string{
+ "service.beta.kubernetes.io/azure-load-balancer-internal": "true",
+ },
},
- },
- Spec: corev1.ServiceSpec{
- Type: corev1.ServiceTypeLoadBalancer,
- Ports: []corev1.ServicePort{
- {
- Name: "http",
- Port: 80,
- Protocol: corev1.ProtocolTCP,
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeLoadBalancer,
+ Ports: []corev1.ServicePort{
+ {
+ Name: "http",
+ Port: 80,
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "https",
+ Port: 443,
+ Protocol: corev1.ProtocolTCP,
+ },
},
- {
- Name: "https",
- Port: 443,
- Protocol: corev1.ProtocolTCP,
+ Selector: map[string]string{
+ "app": "httpd",
},
},
- Selector: map[string]string{
- "app": "ingress-nginx",
- },
- },
- }
- _, err = servicesClient.Create(ilbService)
- Expect(err).NotTo(HaveOccurred())
- ilbSvcInput := WaitForServiceAvailableInput{
- Getter: servicesClientAdapter{client: servicesClient},
- Service: ilbService,
- Clientset: clientset,
- }
- WaitForServiceAvailable(context.TODO(), ilbSvcInput, e2eConfig.GetIntervals(specName, "wait-service")...)
+ }
+ _, err = servicesClient.Create(ilbService)
+ Expect(err).NotTo(HaveOccurred())
+ ilbSvcInput := WaitForServiceAvailableInput{
+ Getter: servicesClientAdapter{client: servicesClient},
+ Service: ilbService,
+ Clientset: clientset,
+ }
+ WaitForServiceAvailable(context.TODO(), ilbSvcInput, e2eConfig.GetIntervals(specName, "wait-service")...)
- By("connecting to the internal LB service from a curl pod")
- jobsClient := clientset.BatchV1().Jobs(corev1.NamespaceDefault)
- svc, err := servicesClient.Get("ingress-nginx-ilb", metav1.GetOptions{})
- Expect(err).NotTo(HaveOccurred())
- var ilbIP string
- for _, i := range svc.Status.LoadBalancer.Ingress {
- if net.ParseIP(i.IP) != nil {
- ilbIP = i.IP
- break
+ By("connecting to the internal LB service from a curl pod")
+
+ svc, err := servicesClient.Get(ilbName, metav1.GetOptions{})
+ Expect(err).NotTo(HaveOccurred())
+ var ilbIP string
+ for _, i := range svc.Status.LoadBalancer.Ingress {
+ if net.ParseIP(i.IP) != nil {
+ if k8snet.IsIPv6String(i.IP) {
+ ilbIP = fmt.Sprintf("[%s]", i.IP)
+ break
+ }
+ ilbIP = i.IP
+ break
+ }
}
- }
- ilbJob := &batchv1.Job{
- ObjectMeta: metav1.ObjectMeta{
- Name: "curl-to-ilb-job",
- Namespace: corev1.NamespaceDefault,
- },
- Spec: batchv1.JobSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- RestartPolicy: corev1.RestartPolicyNever,
- Containers: []corev1.Container{
- {
- Name: "curl",
- Image: "curlimages/curl",
- Command: []string{
- "curl",
- ilbIP,
+ ilbJob := &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: jobName,
+ Namespace: corev1.NamespaceDefault,
+ },
+ Spec: batchv1.JobSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ RestartPolicy: corev1.RestartPolicyNever,
+ Containers: []corev1.Container{
+ {
+ Name: "curl",
+ Image: "curlimages/curl",
+ Command: []string{
+ "curl",
+ ilbIP,
+ },
},
},
},
},
},
- },
- }
- _, err = jobsClient.Create(ilbJob)
- Expect(err).NotTo(HaveOccurred())
- ilbJobInput := WaitForJobCompleteInput{
- Getter: jobsClientAdapter{client: jobsClient},
- Job: ilbJob,
- Clientset: clientset,
+ }
+ _, err = jobsClient.Create(ilbJob)
+ Expect(err).NotTo(HaveOccurred())
+ ilbJobInput := WaitForJobCompleteInput{
+ Getter: jobsClientAdapter{client: jobsClient},
+ Job: ilbJob,
+ Clientset: clientset,
+ }
+ WaitForJobComplete(context.TODO(), ilbJobInput, e2eConfig.GetIntervals(specName, "wait-job")...)
}
- WaitForJobComplete(context.TODO(), ilbJobInput, e2eConfig.GetIntervals(specName, "wait-job")...)
By("creating an external Load Balancer service")
elbService := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: "ingress-nginx-elb",
+ Name: "httpd-elb",
Namespace: corev1.NamespaceDefault,
},
Spec: corev1.ServiceSpec{
@@ -214,7 +227,7 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
},
},
Selector: map[string]string{
- "app": "ingress-nginx",
+ "app": "httpd",
},
},
}
@@ -228,11 +241,15 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
WaitForServiceAvailable(context.TODO(), elbSvcInput, e2eConfig.GetIntervals(specName, "wait-service")...)
By("connecting to the external LB service from a curl pod")
- svc, err = servicesClient.Get("ingress-nginx-elb", metav1.GetOptions{})
+ svc, err := servicesClient.Get("httpd-elb", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
var elbIP string
for _, i := range svc.Status.LoadBalancer.Ingress {
if net.ParseIP(i.IP) != nil {
+ if k8snet.IsIPv6String(i.IP) {
+ elbIP = fmt.Sprintf("[%s]", i.IP)
+ break
+ }
elbIP = i.IP
break
}
@@ -269,31 +286,35 @@ func AzureLBSpec(ctx context.Context, inputGetter func() AzureLBSpecInput) {
}
WaitForJobComplete(context.TODO(), elbJobInput, e2eConfig.GetIntervals(specName, "wait-job")...)
- By("connecting directly to the external LB service")
- url := fmt.Sprintf("http://%s", elbIP)
- resp, err := retryablehttp.Get(url)
- if resp != nil {
- defer resp.Body.Close()
+ if !input.IPv6 {
+ By("connecting directly to the external LB service")
+ url := fmt.Sprintf("http://%s", elbIP)
+ resp, err := retryablehttp.Get(url)
+ if resp != nil {
+ defer resp.Body.Close()
+ }
+ Expect(err).NotTo(HaveOccurred())
+ body, err := ioutil.ReadAll(resp.Body)
+ Expect(err).NotTo(HaveOccurred())
+ matched, err := regexp.MatchString("It works!", string(body))
+ Expect(err).NotTo(HaveOccurred())
+ Expect(matched).To(BeTrue())
}
- Expect(err).NotTo(HaveOccurred())
- body, err := ioutil.ReadAll(resp.Body)
- Expect(err).NotTo(HaveOccurred())
- matched, err := regexp.MatchString("(Welcome to nginx)", string(body))
- Expect(err).NotTo(HaveOccurred())
- Expect(matched).To(BeTrue())
if input.SkipCleanup {
return
}
By("deleting the test resources")
- err = servicesClient.Delete(ilbService.Name, &metav1.DeleteOptions{})
- Expect(err).NotTo(HaveOccurred())
+ if !input.IPv6 {
+ err = servicesClient.Delete(ilbName, &metav1.DeleteOptions{})
+ Expect(err).NotTo(HaveOccurred())
+ err = jobsClient.Delete(jobName, &metav1.DeleteOptions{})
+ Expect(err).NotTo(HaveOccurred())
+ }
err = servicesClient.Delete(elbService.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
err = deploymentsClient.Delete(deployment.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
- err = jobsClient.Delete(ilbJob.Name, &metav1.DeleteOptions{})
- Expect(err).NotTo(HaveOccurred())
err = jobsClient.Delete(elbJob.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go
index d38d324490b..158b4102c37 100644
--- a/test/e2e/azure_test.go
+++ b/test/e2e/azure_test.go
@@ -161,4 +161,39 @@ var _ = Describe("Workload cluster creation", func() {
})
})
})
+
+ Context("Creating a ipv6 control-plane cluster", func() {
+ It("With ipv6 worker node", func() {
+ cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
+ ClusterProxy: bootstrapClusterProxy,
+ ConfigCluster: clusterctl.ConfigClusterInput{
+ LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
+ ClusterctlConfigPath: clusterctlConfigPath,
+ KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
+ InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
+ Flavor: "ipv6",
+ Namespace: namespace.Name,
+ ClusterName: clusterName,
+ KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
+ ControlPlaneMachineCount: pointer.Int64Ptr(3),
+ WorkerMachineCount: pointer.Int64Ptr(1),
+ },
+ WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
+ WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
+ WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
+ })
+
+ Context("Creating an accessible ipv6 load balancer", func() {
+ AzureLBSpec(ctx, func() AzureLBSpecInput {
+ return AzureLBSpecInput{
+ BootstrapClusterProxy: bootstrapClusterProxy,
+ Namespace: namespace,
+ ClusterName: clusterName,
+ SkipCleanup: skipCleanup,
+ IPv6: true,
+ }
+ })
+ })
+ })
+ })
})
diff --git a/test/e2e/common.go b/test/e2e/common.go
index c029b692c40..286cb5f9a7f 100644
--- a/test/e2e/common.go
+++ b/test/e2e/common.go
@@ -43,6 +43,8 @@ const (
RedactLogScriptPath = "REDACT_LOG_SCRIPT"
AzureResourceGroup = "AZURE_RESOURCE_GROUP"
AzureVNetName = "AZURE_VNET_NAME"
+ CNIPathIPv6 = "CNI_IPV6"
+ CNIResourcesIPv6 = "CNI_RESOURCES_IPV6"
)
func Byf(format string, a ...interface{}) {
diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml
index 66391b73bb0..23a33cd7449 100644
--- a/test/e2e/config/azure-dev.yaml
+++ b/test/e2e/config/azure-dev.yaml
@@ -48,6 +48,8 @@ providers:
targetName: "cluster-template.yaml"
- sourcePath: "../data/infrastructure-azure/cluster-template-kcp-adoption.yaml"
targetName: "cluster-template-kcp-adoption.yaml"
+ - sourcePath: "${PWD}/templates/test/cluster-template-prow-ipv6.yaml"
+ targetName: "cluster-template-ipv6.yaml"
variables:
KUBERNETES_VERSION: "${KUBERNETES_VERSION:-v1.18.8}"
@@ -56,6 +58,7 @@ variables:
KUBERNETES_VERSION_UPGRADE_TO: "${KUBERNETES_VERSION_UPGRADE_TO:-v1.18.8}"
KUBERNETES_VERSION_UPGRADE_FROM: "${KUBERNETES_VERSION_UPGRADE_FROM:-v1.17.11}"
CNI: "${PWD}/templates/addons/calico.yaml"
+ CNI_IPV6: "${PWD}/templates/addons/calico-ipv6.yaml"
REDACT_LOG_SCRIPT: "${PWD}/hack/log/redact.sh"
EXP_AKS: "true"
EXP_MACHINE_POOL: "true"
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
index d61b17ef3ed..cd5c46732e3 100644
--- a/test/e2e/e2e_suite_test.go
+++ b/test/e2e/e2e_suite_test.go
@@ -160,6 +160,10 @@ func loadE2EConfig(configPath string) *clusterctl.E2EConfig {
Expect(config.Variables).To(HaveKey(capi_e2e.CNIPath), "Missing %s variable in the config", capi_e2e.CNIPath)
clusterctl.SetCNIEnvVar(config.GetVariable(capi_e2e.CNIPath), capi_e2e.CNIResources)
+ // Read CNI_IPV6 file and set CNI_RESOURCES_IPV6 environmental variable
+ Expect(config.Variables).To(HaveKey(CNIPathIPv6), "Missing %s variable in the config", CNIPathIPv6)
+ clusterctl.SetCNIEnvVar(config.GetVariable(CNIPathIPv6), CNIResourcesIPv6)
+
return config
}