Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -657,6 +657,8 @@ fields:
api_field: 'nodePools.config.windowsNodeConfig.osVersion'
- field: 'node_pool.node_config.workload_metadata_config.mode'
api_field: 'nodePools.config.workloadMetadataConfig.mode'
- field: 'node_pool.node_drain_config.respect_pdb_during_node_pool_deletion'
api_field: 'nodePools.nodeDrainConfig.respectPdbDuringNodePoolDeletion'
- field: 'node_pool.node_count'
provider_only: true
- field: 'node_pool.node_locations'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3379,6 +3379,36 @@ func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) {
})
}

func TestAccContainerCluster_withNodePoolNodeDrainConfig(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withNodePoolNodeDrainConfig(cluster, np, networkName, subnetworkName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool_node_drain_config",
"node_pool.0.node_drain_config.0.respect_pdb_during_node_pool_deletion", "true"),
),
},
{
ResourceName: "google_container_cluster.with_node_pool_node_drain_config",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"},
},
},
})
}

func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -10693,6 +10723,31 @@ resource "google_container_cluster" "with_node_pool_node_config" {
`, cluster, np, networkName, subnetworkName)
}

func testAccContainerCluster_withNodePoolNodeDrainConfig(cluster, np, networkName, subnetworkName string) string {
return fmt.Sprintf(`
data "google_container_engine_versions" "central1a" {
location = "us-central1-a"
}

resource "google_container_cluster" "with_node_pool_node_drain_config" {
name = "%s"
location = "us-central1-a"
min_master_version = data.google_container_engine_versions.central1a.latest_master_version
node_pool {
name = "%s"
initial_node_count = 1
node_drain_config {
respect_pdb_during_node_pool_deletion = true
}
}

network = "%s"
subnetwork = "%s"
deletion_protection = false
}
`, cluster, np, networkName, subnetworkName)
}

func testAccContainerCluster_withMaintenanceWindow(clusterName, startTime, networkName, subnetworkName string) string {
maintenancePolicy := ""
if len(startTime) > 0 {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -662,6 +662,22 @@ var schemaNodePool = map[string]*schema.Schema{
},
},

"node_drain_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Description: `Node drain configuration for this NodePool.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"respect_pdb_during_node_pool_deletion": {
Type: schema.TypeBool,
Optional: true,
Description: `Whether to respect PodDisruptionBudget policy during node pool deletion.`,
},
},
},
},

}

type NodePoolInformation struct {
Expand Down Expand Up @@ -1304,6 +1320,15 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool,
}
}

if v, ok := d.GetOk(prefix + "node_drain_config"); ok {
nodeDrainConfig := v.([]interface{})[0].(map[string]interface{})
np.NodeDrainConfig = &container.NodeDrainConfig{}

if v, ok := nodeDrainConfig["respect_pdb_during_node_pool_deletion"]; ok {
np.NodeDrainConfig.RespectPdbDuringNodePoolDeletion = v.(bool)
}
}

return np, nil
}

Expand Down Expand Up @@ -1365,6 +1390,17 @@ func flattenNodePoolUpgradeSettings(us *container.UpgradeSettings) []map[string]
return []map[string]interface{}{upgradeSettings}
}

func flattenNodePoolNodeDrainConfig(ndc *container.NodeDrainConfig) []map[string]interface{} {
if ndc == nil {
return nil
}

nodeDrainConfig := make(map[string]interface{})

nodeDrainConfig["respect_pdb_during_node_pool_deletion"] = ndc.RespectPdbDuringNodePoolDeletion
return []map[string]interface{}{nodeDrainConfig}
}

func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *container.NodePool, prefix string) (map[string]interface{}, error) {
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
if err != nil {
Expand Down Expand Up @@ -1480,6 +1516,10 @@ func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *c
delete(nodePool, "upgrade_settings")
}

if np.NodeDrainConfig != nil {
nodePool["node_drain_config"] = flattenNodePoolNodeDrainConfig(np.NodeDrainConfig)
}

return nodePool, nil
}

Expand Down Expand Up @@ -1925,6 +1965,43 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
}
}

if d.HasChange(prefix + "node_drain_config") {
nodeDrainConfig := &container.NodeDrainConfig{}
if v, ok := d.GetOk(prefix + "node_drain_config"); ok {
nodeDrain := v.([]interface{})[0].(map[string]interface{})
if v, ok := nodeDrain["respect_pdb_during_node_pool_deletion"]; ok {
nodeDrainConfig.RespectPdbDuringNodePoolDeletion = v.(bool)
}
}
req := &container.UpdateNodePoolRequest{
NodeDrainConfig: nodeDrainConfig,
}

updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req)

if config.UserProjectOverride {
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
}
op, err := clusterNodePoolsUpdateCall.Do()

if err != nil {
return err
}

// Wait until it's updated
return ContainerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool node_drain_config", userAgent, timeout)
}

if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
return err
}
log.Printf("[INFO] Updated node_drain_config in Node Pool %s", name)
}

return nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,8 @@ fields:
api_field: 'config.windowsNodeConfig.osVersion'
- field: 'node_config.workload_metadata_config.mode'
api_field: 'config.workloadMetadataConfig.mode'
- field: 'node_drain_config.respect_pdb_during_node_pool_deletion'
api_field: 'nodeDrainConfig.respectPdbDuringNodePoolDeletion'
- field: 'node_count'
provider_only: true
- field: 'node_locations'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1594,6 +1594,34 @@ func TestAccContainerNodePool_withManagement(t *testing.T) {
})
}

func TestAccContainerNodePool_withNodeDrainConfig(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withNodeDrainConfig(cluster, nodePool, networkName, subnetworkName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np_with_node_drain_config", "node_drain_config.0.respect_pdb_during_node_pool_deletion", "true"),
),
},
{
ResourceName: "google_container_node_pool.np_with_node_drain_config",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -4752,6 +4780,34 @@ resource "google_container_node_pool" "np_with_node_config_scope_alias" {
`, cluster, networkName, subnetworkName, np)
}

func testAccContainerNodePool_withNodeDrainConfig(cluster, np, networkName, subnetworkName string) string {
return fmt.Sprintf(`
data "google_container_engine_versions" "central1a" {
location = "us-central1-a"
}

resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1-a"
initial_node_count = 1
min_master_version = data.google_container_engine_versions.central1a.latest_master_version
deletion_protection = false
network = "%s"
subnetwork = "%s"
}

resource "google_container_node_pool" "np_with_node_drain_config" {
name = "%s"
location = "us-central1-a"
cluster = google_container_cluster.cluster.name
initial_node_count = 1
node_drain_config {
respect_pdb_during_node_pool_deletion = true
}
}
`, cluster, networkName, subnetworkName, np)
}

func testAccContainerNodePool_version(cluster, np, networkName, subnetworkName string) string {
return fmt.Sprintf(`
data "google_container_engine_versions" "central1a" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,8 @@ cluster.
* `node_count` - (Optional) The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside `autoscaling`.

* `node_drain_config` - (Optional) The node drain configuration of the pool. Structure is [documented below](#nested_node_drain_config).

* `project` - (Optional) The ID of the project in which to create the node pool. If blank,
the provider-configured project will be used.

Expand Down Expand Up @@ -240,12 +242,15 @@ cluster.
<a name="network_performance_config"></a>The `network_performance_config` block supports:

* `total_egress_bandwidth_tier` (Required) - Specifies the total network bandwidth tier for the NodePool. [Valid values](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.Tier) include: "TIER_1" and "TIER_UNSPECIFIED".
* ```

<a name="pod_cidr_overprovision_config"></a>The `pod_cidr_overprovision_config` block supports:

* `disabled` (Required) - Whether pod cidr overprovision is disabled.

<a name="nested_node_drain_config"></a>The `node_drain_config` block supports:

* `respect_pdb_during_node_pool_deletion` - (Optional) Whether to respect PodDisruptionBudget policy during node pool deletion.

<a name="nested_upgrade_settings"></a>The `upgrade_settings` block supports:

* `max_surge` - (Optional) The number of additional nodes that can be added to the node pool during
Expand Down
Loading