diff --git a/api/v1alpha1/databasecluster_types.go b/api/v1alpha1/databasecluster_types.go index b864a8203..d8181e33e 100644 --- a/api/v1alpha1/databasecluster_types.go +++ b/api/v1alpha1/databasecluster_types.go @@ -49,6 +49,8 @@ const ( AppStateRestoring AppState = "restoring" // AppStateDeleting is a deleting state. AppStateDeleting AppState = "deleting" + // AppStateResizingVolumes is the state when PVCs are being resized. + AppStateResizingVolumes = "resizingVolumes" // AppStateNew represents a newly created cluster that has not yet been reconciled. AppStateNew AppState = "" @@ -355,6 +357,20 @@ type DatabaseClusterSpec struct { Sharding *Sharding `json:"sharding,omitempty"` } +const ( + // ConditionTypeCannotResizeVolume is a condition type that indicates that the volume cannot be resized. + ConditionTypeCannotResizeVolume = "CannotResizeVolume" +) + +const ( + // ReasonStorageClassDoesNotSupportExpansion is a reason for condition ConditionTypeCannotExpandStorage + // when the storage class does not support volume expansion. + ReasonStorageClassDoesNotSupportExpansion = "StorageClassDoesNotSupportExpansion" + // ReasonCannotShrinkVolume is a reason for condition ConditionTypeCannotResizeVolume + // when the volume cannot be shrunk. + ReasonCannotShrinkVolume = "CannotShrinkVolume" +) + // DatabaseClusterStatus defines the observed state of DatabaseCluster. type DatabaseClusterStatus struct { // ObservedGeneration is the most recent generation observed for this DatabaseCluster. @@ -381,6 +397,8 @@ type DatabaseClusterStatus struct { RecommendedCRVersion *string `json:"recommendedCRVersion,omitempty"` // Details provides full status of the upstream cluster as a plain text. Details string `json:"details,omitempty"` + // Conditions contains the observed conditions of the DatabaseCluster. + Conditions []metav1.Condition `json:"conditions,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 01571074c..4bfec509e 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -20,6 +20,7 @@ package v1alpha1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -585,6 +586,13 @@ func (in *DatabaseClusterStatus) DeepCopyInto(out *DatabaseClusterStatus) { *out = new(string) **out = **in } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseClusterStatus. diff --git a/bundle/manifests/everest-operator.clusterserviceversion.yaml b/bundle/manifests/everest-operator.clusterserviceversion.yaml index 90a7e7128..d8d8544a2 100644 --- a/bundle/manifests/everest-operator.clusterserviceversion.yaml +++ b/bundle/manifests/everest-operator.clusterserviceversion.yaml @@ -78,7 +78,7 @@ metadata: } ] capabilities: Basic Install - createdAt: "2025-02-14T15:05:36Z" + createdAt: "2025-03-12T11:23:36Z" operators.operatorframework.io/builder: operator-sdk-v1.38.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 name: everest-operator.v0.0.0 diff --git a/bundle/manifests/everest.percona.com_databaseclusters.yaml b/bundle/manifests/everest.percona.com_databaseclusters.yaml index fa4725d6d..606d5f774 100644 --- a/bundle/manifests/everest.percona.com_databaseclusters.yaml +++ b/bundle/manifests/everest.percona.com_databaseclusters.yaml @@ -411,6 +411,63 @@ spec: activeStorage: description: ActiveStorage is the storage used in cluster (psmdb only) type: string + conditions: + description: Conditions contains the observed conditions of the DatabaseCluster. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array crVersion: description: CRVersion is the observed version of the CR used with the underlying operator. diff --git a/config/crd/bases/everest.percona.com_databaseclusters.yaml b/config/crd/bases/everest.percona.com_databaseclusters.yaml index a34abe801..345d66284 100644 --- a/config/crd/bases/everest.percona.com_databaseclusters.yaml +++ b/config/crd/bases/everest.percona.com_databaseclusters.yaml @@ -411,6 +411,63 @@ spec: activeStorage: description: ActiveStorage is the storage used in cluster (psmdb only) type: string + conditions: + description: Conditions contains the observed conditions of the DatabaseCluster. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array crVersion: description: CRVersion is the observed version of the CR used with the underlying operator. diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 4b21d1be6..85a659375 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -754,6 +754,62 @@ spec: activeStorage: description: ActiveStorage is the storage used in cluster (psmdb only) type: string + conditions: + description: Conditions contains the observed conditions of the DatabaseCluster. + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array crVersion: description: CRVersion is the observed version of the CR used with the underlying operator. type: string diff --git a/internal/controller/common/helper.go b/internal/controller/common/helper.go index 6189c80e2..b56de4042 100644 --- a/internal/controller/common/helper.go +++ b/internal/controller/common/helper.go @@ -35,6 +35,7 @@ import ( storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" @@ -793,3 +794,90 @@ func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { } return ud, nil } + +const ( + storageClassDefaultAnnotation = "storageclass.kubernetes.io/is-default-class" +) + +func storageClassSupportsVolumeExpansion(ctx context.Context, c client.Client, className *string) (bool, error) { + storageClass, err := getStorageClassOrDefault(ctx, c, className) + if err != nil { + return false, fmt.Errorf("getStorageClassOrDefault failed: %w", err) + } + return *storageClass.AllowVolumeExpansion, nil +} + +func getStorageClassOrDefault(ctx context.Context, c client.Client, scName *string) (*storagev1.StorageClass, error) { + storageClass := &storagev1.StorageClass{} + if scName == nil { + storageClasses := &storagev1.StorageClassList{} + if err := c.List(ctx, storageClasses); err != nil { + return nil, err + } + for _, sc := range storageClasses.Items { + if sc.Annotations[storageClassDefaultAnnotation] == "true" { + return &sc, nil + } + } + return nil, errors.New("no default storage class found") + } + if err := c.Get(ctx, types.NamespacedName{Name: *scName}, storageClass); err != nil { + return nil, err + } + return storageClass, nil +} + +// ConfigureStorage handles storage configuration and volume expansion checks for the given database cluster. +func ConfigureStorage( + ctx context.Context, + c client.Client, + db *everestv1alpha1.DatabaseCluster, + currentSize resource.Quantity, + setStorageSizeFunc func(resource.Quantity), +) error { + meta.RemoveStatusCondition(&db.Status.Conditions, everestv1alpha1.ConditionTypeCannotResizeVolume) + + desiredSize := db.Spec.Engine.Storage.Size + storageClass := db.Spec.Engine.Storage.Class + + // We cannot shrink the volume size. + hasStorageShrunk := currentSize.Cmp(desiredSize) > 0 && !currentSize.IsZero() + if hasStorageShrunk { + meta.SetStatusCondition(&db.Status.Conditions, metav1.Condition{ + Type: everestv1alpha1.ConditionTypeCannotResizeVolume, + Status: metav1.ConditionTrue, + Reason: everestv1alpha1.ReasonCannotShrinkVolume, + LastTransitionTime: metav1.Now(), + ObservedGeneration: db.GetGeneration(), + }) + setStorageSizeFunc(currentSize) + return nil + } + + // Check if storage size is being expanded. If not, set the desired size and return early. + hasStorageExpanded := currentSize.Cmp(desiredSize) < 0 && !currentSize.IsZero() + if !hasStorageExpanded { + setStorageSizeFunc(desiredSize) + return nil + } + + allowedByStorageClass, err := storageClassSupportsVolumeExpansion(ctx, c, storageClass) + if err != nil { + return fmt.Errorf("failed to check if storage class supports volume expansion: %w", err) + } + + if !allowedByStorageClass { + meta.SetStatusCondition(&db.Status.Conditions, metav1.Condition{ + Type: everestv1alpha1.ConditionTypeCannotResizeVolume, + Status: metav1.ConditionTrue, + Reason: everestv1alpha1.ReasonStorageClassDoesNotSupportExpansion, + LastTransitionTime: metav1.Now(), + ObservedGeneration: db.GetGeneration(), + }) + setStorageSizeFunc(currentSize) + return nil + } + + setStorageSizeFunc(desiredSize) + return nil +} diff --git a/internal/controller/common/helper_test.go b/internal/controller/common/helper_test.go index 3d43cacbf..e8cbbf94e 100644 --- a/internal/controller/common/helper_test.go +++ b/internal/controller/common/helper_test.go @@ -18,10 +18,14 @@ package common import ( "testing" + "github.com/AlekSi/pointer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" @@ -153,3 +157,181 @@ func TestMergeMapError(t *testing.T) { err := mergeMap(testDst, src) require.Error(t, err) } + +func TestConfigureStorage(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + db *everestv1alpha1.DatabaseCluster + currentSize resource.Quantity + storageClassExists bool + storageClassAllowExpansion bool + wantSize resource.Quantity + expectFailureCond bool + wantFailureCondReason string + expectErr bool + }{ + { + name: "initial storage setup", + db: &everestv1alpha1.DatabaseCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-db", + Namespace: "default", + Generation: 1, + }, + Spec: everestv1alpha1.DatabaseClusterSpec{ + Engine: everestv1alpha1.Engine{ + Storage: everestv1alpha1.Storage{ + Size: resource.MustParse("10Gi"), + Class: pointer.To("standard"), + }, + }, + }, + }, + currentSize: resource.MustParse("0"), + storageClassExists: true, + storageClassAllowExpansion: true, + wantSize: resource.MustParse("10Gi"), + }, + { + name: "successful volume expansion", + db: &everestv1alpha1.DatabaseCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-db", + Namespace: "default", + Generation: 1, + }, + Spec: everestv1alpha1.DatabaseClusterSpec{ + Engine: everestv1alpha1.Engine{ + Storage: everestv1alpha1.Storage{ + Size: resource.MustParse("20Gi"), + Class: pointer.To("standard"), + }, + }, + }, + }, + currentSize: resource.MustParse("10Gi"), + storageClassExists: true, + storageClassAllowExpansion: true, + wantSize: resource.MustParse("20Gi"), + }, + { + name: "volume shrink not allowed", + db: &everestv1alpha1.DatabaseCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-db", + Namespace: "default", + Generation: 1, + }, + Spec: everestv1alpha1.DatabaseClusterSpec{ + Engine: everestv1alpha1.Engine{ + Storage: everestv1alpha1.Storage{ + Size: resource.MustParse("10Gi"), + Class: pointer.To("standard"), + }, + }, + }, + }, + currentSize: resource.MustParse("20Gi"), + storageClassExists: true, + storageClassAllowExpansion: true, + wantSize: resource.MustParse("20Gi"), + expectFailureCond: true, + wantFailureCondReason: everestv1alpha1.ReasonCannotShrinkVolume, + }, + { + name: "storage class doesn't support expansion", + db: &everestv1alpha1.DatabaseCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-db", + Namespace: "default", + Generation: 1, + }, + Spec: everestv1alpha1.DatabaseClusterSpec{ + Engine: everestv1alpha1.Engine{ + Storage: everestv1alpha1.Storage{ + Size: resource.MustParse("20Gi"), + Class: pointer.To("standard"), + }, + }, + }, + }, + currentSize: resource.MustParse("10Gi"), + storageClassExists: true, + storageClassAllowExpansion: false, + wantSize: resource.MustParse("10Gi"), + expectFailureCond: true, + wantFailureCondReason: everestv1alpha1.ReasonStorageClassDoesNotSupportExpansion, + }, + { + name: "storage class not found", + db: &everestv1alpha1.DatabaseCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-db", + Namespace: "default", + Generation: 1, + }, + Spec: everestv1alpha1.DatabaseClusterSpec{ + Engine: everestv1alpha1.Engine{ + Storage: everestv1alpha1.Storage{ + Size: resource.MustParse("20Gi"), + Class: pointer.To("non-existent"), + }, + }, + }, + }, + currentSize: resource.MustParse("10Gi"), + storageClassExists: false, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Setup test objects + var actualSize resource.Quantity + setSize := func(size resource.Quantity) { + actualSize = size + } + + // Setup fake client with storage class if needed + builder := fake.NewClientBuilder().WithScheme(scheme.Scheme) + if tt.storageClassExists && tt.db.Spec.Engine.Storage.Class != nil { + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: *tt.db.Spec.Engine.Storage.Class, + }, + AllowVolumeExpansion: &tt.storageClassAllowExpansion, + } + builder.WithObjects(sc) + } + client := builder.Build() + + // Run the test + err := ConfigureStorage(t.Context(), client, tt.db, tt.currentSize, setSize) + + // Verify results + if tt.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Check if the size was set correctly + assert.Equal(t, tt.wantSize, actualSize, "unexpected storage size") + + // Check conditions if expected + if tt.expectFailureCond { + cond := meta.FindStatusCondition(tt.db.Status.Conditions, everestv1alpha1.ConditionTypeCannotResizeVolume) + assert.Equal(t, tt.wantFailureCondReason, cond.Reason) + assert.Equal(t, tt.db.Generation, cond.ObservedGeneration) + assert.NotEmpty(t, cond.LastTransitionTime) + } else { + assert.Empty(t, tt.db.Status.Conditions, "expected no conditions") + } + }) + } +} diff --git a/internal/controller/databasecluster_controller.go b/internal/controller/databasecluster_controller.go index 03bb1e1b9..31d14584a 100644 --- a/internal/controller/databasecluster_controller.go +++ b/internal/controller/databasecluster_controller.go @@ -180,10 +180,10 @@ func (r *DatabaseClusterReconciler) reconcileDB( // Running the applier can possibly also mutate the DatabaseCluster, // so we should make sure we push those changes to the API server. - updatedDB := db.DeepCopy() - if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, db, func() error { - db.ObjectMeta = updatedDB.ObjectMeta - db.Spec = updatedDB.Spec + dbCopy := db.DeepCopy() + if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, dbCopy, func() error { + dbCopy.ObjectMeta = db.ObjectMeta + dbCopy.Spec = db.Spec return nil }); err != nil { return ctrl.Result{}, err diff --git a/internal/controller/providers/pxc/applier.go b/internal/controller/providers/pxc/applier.go index 2e247d8a0..6154cf914 100644 --- a/internal/controller/providers/pxc/applier.go +++ b/internal/controller/providers/pxc/applier.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" @@ -73,6 +74,34 @@ func (p *applier) AllowUnsafeConfig() { } } +func configureStorage( + ctx context.Context, + c client.Client, + desired *pxcv1.PerconaXtraDBClusterSpec, + current *pxcv1.PerconaXtraDBClusterSpec, + db *everestv1alpha1.DatabaseCluster, +) error { + var currentSize resource.Quantity + if db.Status.Status != everestv1alpha1.AppStateNew { + currentSize = current.PXC.PodSpec.VolumeSpec.PersistentVolumeClaim.Resources.Requests[corev1.ResourceStorage] + } + + setStorageSize := func(size resource.Quantity) { + desired.PXC.PodSpec.VolumeSpec = &pxcv1.VolumeSpec{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: db.Spec.Engine.Storage.Class, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: size, + }, + }, + }, + } + } + + return common.ConfigureStorage(ctx, c, db, currentSize, setStorageSize) +} + func (p *applier) Engine() error { engine := p.DBEngine if p.DB.Spec.Engine.Version == "" { @@ -97,15 +126,10 @@ func (p *applier) Engine() error { } pxc.Spec.PXC.Image = pxcEngineVersion.ImagePath - pxc.Spec.PXC.PodSpec.VolumeSpec = &pxcv1.VolumeSpec{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: p.DB.Spec.Engine.Storage.Class, - Resources: corev1.VolumeResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: p.DB.Spec.Engine.Storage.Size, - }, - }, - }, + pxc.Spec.VolumeExpansionEnabled = true + + if err := configureStorage(p.ctx, p.C, &pxc.Spec, &p.currentPerconaXtraDBClusterSpec, p.DB); err != nil { + return err } if !p.DB.Spec.Engine.Resources.CPU.IsZero() { diff --git a/internal/controller/providers/pxc/provider.go b/internal/controller/providers/pxc/provider.go index d505889f0..682fca703 100644 --- a/internal/controller/providers/pxc/provider.go +++ b/internal/controller/providers/pxc/provider.go @@ -220,6 +220,12 @@ func (p *Provider) Status(ctx context.Context) (everestv1alpha1.DatabaseClusterS return status, err } status.RecommendedCRVersion = recCRVer + + annotations := pxc.GetAnnotations() + _, pvcResizing := annotations[pxcv1.AnnotationPVCResizeInProgress] + if pvcResizing { + status.Status = everestv1alpha1.AppStateResizingVolumes + } return status, nil } diff --git a/tests/e2e/core/pxc/62-assert.yaml b/tests/e2e/core/pxc/62-assert.yaml new file mode 100644 index 000000000..283b43478 --- /dev/null +++ b/tests/e2e/core/pxc/62-assert.yaml @@ -0,0 +1,27 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 600 +--- +apiVersion: everest.percona.com/v1alpha1 +kind: DatabaseCluster +metadata: + name: test-pxc-cluster +status: + status: ready +--- +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: test-pxc-cluster +status: + haproxy: + ready: 1 + size: 1 + status: ready + pxc: + ready: 1 + size: 1 + status: ready + ready: 2 + size: 2 + state: ready diff --git a/tests/e2e/core/pxc/62-create-cluster.yaml b/tests/e2e/core/pxc/62-create-cluster.yaml new file mode 100644 index 000000000..1ba99ab64 --- /dev/null +++ b/tests/e2e/core/pxc/62-create-cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +--- +apiVersion: everest.percona.com/v1alpha1 +kind: DatabaseCluster +metadata: + name: test-pxc-cluster +spec: + engine: + type: pxc + userSecretsName: pxc-sample-secrets + config: | + [mysqld] + wsrep_provider_options="debug=1;gcache.size=1G" + wsrep_debug=1 + wsrep_trx_fragment_unit='bytes' + wsrep_trx_fragment_size=3670016 + replicas: 1 + storage: + size: 15G + resources: + cpu: 600m + memory: 1G + proxy: + replicas: 1 + type: haproxy diff --git a/tests/e2e/core/pxc/63-assert.yaml b/tests/e2e/core/pxc/63-assert.yaml new file mode 100644 index 000000000..7685d71cc --- /dev/null +++ b/tests/e2e/core/pxc/63-assert.yaml @@ -0,0 +1,24 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 600 +--- +apiVersion: everest.percona.com/v1alpha1 +kind: DatabaseCluster +metadata: + name: test-pxc-cluster +status: + status: resizingVolumes +--- +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: test-pxc-cluster +spec: + enableVolumeExpansion: true + pxc: + size: 1 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 20G \ No newline at end of file diff --git a/tests/e2e/core/pxc/63-expand-storage.yaml b/tests/e2e/core/pxc/63-expand-storage.yaml new file mode 100644 index 000000000..773364e03 --- /dev/null +++ b/tests/e2e/core/pxc/63-expand-storage.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - command: kubectl patch db test-pxc-cluster -n $NAMESPACE -p '{"spec":{"engine":{"storage":{"size":"20G"}}}}' --type merge diff --git a/tests/e2e/core/pxc/64-assert.yaml b/tests/e2e/core/pxc/64-assert.yaml new file mode 100644 index 000000000..3a7b481da --- /dev/null +++ b/tests/e2e/core/pxc/64-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 600 +--- +apiVersion: everest.percona.com/v1alpha1 +kind: DatabaseCluster +metadata: + name: test-pxc-cluster +status: + status: ready