Skip to content

feat(baremetal): support easy partitioning #3010

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
Apr 17, 2025
78 changes: 78 additions & 0 deletions docs/data-sources/baremetal_easy_partitioning.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
---
subcategory: "Elastic Metal"
page_title: "Scaleway: scaleway_baremetal_easy_partitioning"
---

# scaleway_baremetal_easy_partitioning

The scaleway_easy_partitioning data source allows you to retrieve a ready-to-use partitioning schema for a BareMetal server. This schema can be used for custom installations with optional swap and extra partitions.

This data source simplifies the process of generating valid partitioning configurations, especially useful when dealing with OS and offer compatibility requirements.

## Example Usage

```hcl
data "scaleway_easy_partitioning" "default" {
offer_id = "11111111-1111-1111-1111-111111111111"
os_id = "22222222-2222-2222-2222-222222222222"
swap = true
extra_partition = true
ext_4_mountpoint = "/data"
}
```

```hcl
data "scaleway_baremetal_offer" "my_offer" {
zone = "fr-par-1"
name = "EM-B220E-NVME"
}

data "scaleway_baremetal_os" "my_os" {
zone = "fr-par-1"
name = "Ubuntu"
version = "22.04 LTS (Jammy Jellyfish)"
}

resource "scaleway_iam_ssh_key" "main" {
name = "my-ssh-key"
public_key = "my-ssh-key-public"
}

data "scaleway_baremetal_easy_partitioning" "test" {
offer_id = data.scaleway_baremetal_offer.my_offer.offer_id
os_id = data.scaleway_baremetal_os.my_os.os_id
swap = false
ext_4_mountpoint = "/hello"
}

resource "scaleway_baremetal_server" "base" {
name = "my-baremetal-server"
zone = "fr-par-1"
description = "test a description"
offer = data.scaleway_baremetal_offer.my_offer.offer_id
os = data.scaleway_baremetal_os.my_os.os_id
partitioning = data.scaleway_baremetal_easy_partitioning.test.json_partition
tags = ["terraform-test", "scaleway_baremetal_server", "minimal", "edited"]
ssh_key_ids = [scaleway_iam_ssh_key.main.id]
}
```

## Argument Reference

- `offer_id` (Required) The UUID of the BareMetal offer.

- `os_id` (Required) The UUID of the OS image.

- `swap` (Optional, Default: true) Whether to include a swap partition.

- `extra_partition` (Optional, Default: true) Whether to add an extra ext4 data partition.

- `ext_4_mountpoint` (Optional, Default: "/data") The mount point for the extra partition. Must be an absolute path using alphanumeric characters and underscores.

## Attributes Reference

In addition to all above arguments, the following attributes are exported:

- `id` — A composite identifier derived from offer_id and os_id.

- `json_partition` — A validated partitioning schema in JSON format that can be directly used for BareMetal server deployment.
1 change: 1 addition & 0 deletions internal/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,7 @@ func Provider(config *Config) plugin.ProviderFunc {
"scaleway_account_ssh_key": iam.DataSourceSSHKey(),
"scaleway_availability_zones": az.DataSourceAvailabilityZones(),
"scaleway_baremetal_offer": baremetal.DataSourceOffer(),
"scaleway_baremetal_easy_partitioning": baremetal.DataEasyPartitioning(),
"scaleway_baremetal_option": baremetal.DataSourceOption(),
"scaleway_baremetal_os": baremetal.DataSourceOS(),
"scaleway_baremetal_server": baremetal.DataSourceServer(),
Expand Down
237 changes: 237 additions & 0 deletions internal/services/baremetal/easy_partitioning_data_source.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,237 @@
package baremetal

import (
"context"
"encoding/json"
"errors"
"fmt"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/scaleway/scaleway-sdk-go/api/baremetal/v1"
"github.com/scaleway/scaleway-sdk-go/scw"
"github.com/scaleway/terraform-provider-scaleway/v2/internal/dsf"
"github.com/scaleway/terraform-provider-scaleway/v2/internal/locality/zonal"
"github.com/scaleway/terraform-provider-scaleway/v2/internal/verify"
)

const (
partitionSize = 20000000000
)

func DataEasyPartitioning() *schema.Resource {
return &schema.Resource{
ReadContext: dataEasyPartitioningRead,
Schema: map[string]*schema.Schema{
"offer_id": {
Type: schema.TypeString,
Required: true,
Description: "ID of the server offer",
},
"os_id": {
Type: schema.TypeString,
Required: true,
Description: "The base image of the server",
DiffSuppressFunc: dsf.Locality,
ValidateDiagFunc: verify.IsUUIDorUUIDWithLocality(),
},
"swap": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "set swap partition",
},
"extra_partition": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "set extra ext_4 partition",
},
"ext_4_mountpoint": {
Type: schema.TypeString,
Optional: true,
Default: "/data",
Description: "Mount point must be an absolute path with alphanumeric characters and underscores",
},
"json_partition": {
Type: schema.TypeString,
Computed: true,
Description: "The partitioning schema in json format",
},
},
}
}

func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
api, fallBackZone, err := newAPIWithZone(d, m)
if err != nil {
return diag.FromErr(err)
}

osID := zonal.ExpandID(d.Get("os_id").(string))

os, err := api.GetOS(&baremetal.GetOSRequest{
Zone: fallBackZone,
OsID: osID.ID,
}, scw.WithContext(ctx))
if err != nil {
return diag.FromErr(err)
}

if !os.CustomPartitioningSupported {
return diag.FromErr(errors.New("custom partitioning is not supported with this OS"))
}

offerID := zonal.ExpandID(d.Get("offer_id").(string))

offer, err := api.GetOffer(&baremetal.GetOfferRequest{
Zone: fallBackZone,
OfferID: offerID.ID,
}, scw.WithContext(ctx))
if err != nil {
return diag.FromErr(err)
}

if !isOSCompatible(offer, os) {
return diag.FromErr(errors.New("OS and offer are not compatible"))
}

defaultPartitioningSchema, err := api.GetDefaultPartitioningSchema(&baremetal.GetDefaultPartitioningSchemaRequest{
Zone: fallBackZone,
OfferID: offerID.ID,
OsID: osID.ID,
}, scw.WithContext(ctx))
if err != nil {
return diag.FromErr(err)
}

extraPart := d.Get("extra_partition").(bool)
swap := d.Get("swap").(bool)

if swap && !extraPart {
jsonSchema, err := json.Marshal(defaultPartitioningSchema)
if err != nil {
return diag.FromErr(err)
}

d.SetId(fmt.Sprintf("%s-%s", offerID, osID))
_ = d.Set("json_partition", string(jsonSchema))

return nil
}

resizeRootPartition(defaultPartitioningSchema.Disks, swap, extraPart)
defaultPartitioningSchema.Disks = handleSwapPartitions(defaultPartitioningSchema.Disks, extraPart, swap)

mountpoint := d.Get("ext_4_mountpoint").(string)
addExtraExt4Partition(mountpoint, defaultPartitioningSchema, extraPart)

if !extraPart && !swap {
defaultPartitioningSchema.Filesystems = defaultPartitioningSchema.Filesystems[:len(defaultPartitioningSchema.Filesystems)-1]
defaultPartitioningSchema.Raids = defaultPartitioningSchema.Raids[:len(defaultPartitioningSchema.Raids)-1]
}

err = api.ValidatePartitioningSchema(&baremetal.ValidatePartitioningSchemaRequest{
Zone: fallBackZone,
OfferID: offerID.ID,
OsID: osID.ID,
PartitioningSchema: defaultPartitioningSchema,
})
if err != nil {
return diag.FromErr(err)
}

jsonSchema, err := json.Marshal(defaultPartitioningSchema)
if err != nil {
return diag.FromErr(err)
}

d.SetId(fmt.Sprintf("%s-%s", offerID, osID))

jsonSchemaStr := string(jsonSchema)

_ = d.Set("json_partition", jsonSchemaStr)

return nil
}

func handleSwapPartitions(originalDisks []*baremetal.SchemaDisk, withExtraPartition bool, swap bool) []*baremetal.SchemaDisk {
if swap {
return originalDisks
}

result := make([]*baremetal.SchemaDisk, 0)

for _, disk := range originalDisks {
i := 1
newPartitions := []*baremetal.SchemaPartition{}

for _, p := range disk.Partitions {
if p.Label == "swap" {
continue
}

if p.Label == "root" {
if !withExtraPartition {
p.Size = 0
p.UseAllAvailableSpace = true
} else {
p.Size = partitionSize
}
}

p.Number = uint32(i)
i++

newPartitions = append(newPartitions, p)
}

result = append(result, &baremetal.SchemaDisk{
Device: disk.Device,
Partitions: newPartitions,
})
}

return result
}

func addExtraExt4Partition(mountpoint string, defaultPartitionSchema *baremetal.Schema, extraPart bool) {
if !extraPart {
return
}

for _, disk := range defaultPartitionSchema.Disks {
partIndex := uint32(len(disk.Partitions)) + 1
data := &baremetal.SchemaPartition{
Label: baremetal.SchemaPartitionLabel("data"),
Number: partIndex,
Size: 0,
UseAllAvailableSpace: true,
}
disk.Partitions = append(disk.Partitions, data)
}

filesystem := &baremetal.SchemaFilesystem{
Device: "/dev/md2",
Format: "ext4",
Mountpoint: mountpoint,
}
defaultPartitionSchema.Filesystems = append(defaultPartitionSchema.Filesystems, filesystem)
}

func resizeRootPartition(originalDisks []*baremetal.SchemaDisk, withSwap bool, withExtraPartition bool) {
for _, disk := range originalDisks {
for _, partition := range disk.Partitions {
if partition.Label == "root" {
if !withSwap && !withExtraPartition {
partition.Size = 0
partition.UseAllAvailableSpace = true
}

if withExtraPartition {
partition.Size = partitionSize
}
}
}
}
}
Loading
Loading