From d89e726b56d87765a29b3965554c56205f8a34d6 Mon Sep 17 00:00:00 2001 From: RavinaChidambaram Date: Fri, 2 Aug 2024 13:32:46 +0530 Subject: [PATCH 1/4] changes to postgresql CR status Signed-off-by: RavinaChidambaram --- manifests/minimal-postgres-manifest.yaml | 2 + manifests/postgresql.crd.yaml | 35 ++++++++- pkg/apis/acid.zalan.do/v1/crds.go | 66 ++++++++++++++-- pkg/apis/acid.zalan.do/v1/postgresql_type.go | 42 +++++++++- pkg/apis/acid.zalan.do/v1/util.go | 6 +- pkg/cluster/cluster.go | 18 +++-- pkg/cluster/sync.go | 6 +- pkg/controller/postgresql.go | 12 ++- pkg/util/k8sutil/k8sutil.go | 83 +++++++++++++++++++- 9 files changed, 243 insertions(+), 27 deletions(-) diff --git a/manifests/minimal-postgres-manifest.yaml b/manifests/minimal-postgres-manifest.yaml index d22327905..f3ed3768d 100644 --- a/manifests/minimal-postgres-manifest.yaml +++ b/manifests/minimal-postgres-manifest.yaml @@ -2,6 +2,8 @@ apiVersion: "acid.zalan.do/v1" kind: postgresql metadata: name: acid-minimal-cluster + labels: + cluster-name: acid-minimal-cluster spec: teamId: "acid" volume: diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 4bd757f38..e9ae3acf6 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -20,6 +20,10 @@ spec: storage: true subresources: status: {} + scale: + specReplicasPath: .spec.numberOfInstances + statusReplicasPath: .status.numberOfInstances + labelSelectorPath: .status.labelSelector additionalPrinterColumns: - name: Team type: string @@ -51,7 +55,7 @@ spec: - name: Status type: string description: Current sync status of postgresql resource - jsonPath: .status.PostgresClusterStatus + jsonPath: .status.postgresClusterStatus schema: openAPIV3Schema: type: object @@ -677,5 +681,30 @@ spec: type: integer status: type: object - additionalProperties: - type: string + properties: + postgresClusterStatus: + type: string + numberOfInstances: + format: int32 + type: integer + labelSelector: + type: string + observedGeneration: + format: int64 + type: integer + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + lastTransitionTime: + type: string + format: date-time + reason: + type: string + message: + type: string diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 9e65869e7..4aee60104 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -25,6 +25,12 @@ const ( OperatorConfigCRDResourceShort = "opconfig" ) +var ( + specReplicasPath = ".spec.numberOfInstances" + statusReplicasPath = ".status.numberOfInstances" + labelSelectorPath = ".status.labelSelector" +) + // PostgresCRDResourceColumns definition of AdditionalPrinterColumns for postgresql CRD var PostgresCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ { @@ -72,7 +78,7 @@ var PostgresCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ Name: "Status", Type: "string", Description: "Current sync status of postgresql resource", - JSONPath: ".status.PostgresClusterStatus", + JSONPath: ".status.postgresClusterStatus", }, } @@ -1106,10 +1112,47 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, "status": { Type: "object", - AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ - Schema: &apiextv1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ + "postgresClusterStatus": { Type: "string", }, + "numberOfInstances": { + Type: "integer", + Format: "int32", + }, + "labelSelector": { + Type: "string", + }, + "observedGeneration": { + Type: "integer", + Format: "int64", + }, + "conditions": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "type": { + Type: "string", + }, + "status": { + Type: "string", + }, + "lastTransitionTime": { + Type: "string", + Format: "date-time", + }, + "reason": { + Type: "string", + }, + "message": { + Type: "string", + }, + }, + }, + }, + }, }, }, }, @@ -1983,7 +2026,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ func buildCRD(name, kind, plural, list, short string, categories []string, columns []apiextv1.CustomResourceColumnDefinition, - validation apiextv1.CustomResourceValidation) *apiextv1.CustomResourceDefinition { + validation apiextv1.CustomResourceValidation, specReplicasPath string, statusReplicasPath string, labelSelectorPath string) *apiextv1.CustomResourceDefinition { return &apiextv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ APIVersion: fmt.Sprintf("%s/%s", apiextv1.GroupName, apiextv1.SchemeGroupVersion.Version), @@ -2010,6 +2053,11 @@ func buildCRD(name, kind, plural, list, short string, Storage: true, Subresources: &apiextv1.CustomResourceSubresources{ Status: &apiextv1.CustomResourceSubresourceStatus{}, + Scale: &apiextv1.CustomResourceSubresourceScale{ + SpecReplicasPath: specReplicasPath, + StatusReplicasPath: statusReplicasPath, + LabelSelectorPath: &labelSelectorPath, + }, }, AdditionalPrinterColumns: columns, Schema: &validation, @@ -2028,7 +2076,10 @@ func PostgresCRD(crdCategories []string) *apiextv1.CustomResourceDefinition { PostgresCRDResourceShort, crdCategories, PostgresCRDResourceColumns, - PostgresCRDResourceValidation) + PostgresCRDResourceValidation, + specReplicasPath, + statusReplicasPath, + labelSelectorPath) } // ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource @@ -2040,5 +2091,8 @@ func ConfigurationCRD(crdCategories []string) *apiextv1.CustomResourceDefinition OperatorConfigCRDResourceShort, crdCategories, OperatorConfigCRDResourceColumns, - OperatorConfigCRDResourceValidation) + OperatorConfigCRDResourceValidation, + specReplicasPath, + statusReplicasPath, + labelSelectorPath) } diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 612cf7041..9759134cf 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -4,6 +4,7 @@ package v1 import ( "time" + "k8s.io/apimachinery/pkg/api/equality" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -225,9 +226,48 @@ type Sidecar struct { // UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users type UserFlags []string +type Conditions []Condition + +type ConditionType string +type VolatileTime struct { + Inner metav1.Time `json:",inline"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (t VolatileTime) MarshalJSON() ([]byte, error) { + return t.Inner.MarshalJSON() +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *VolatileTime) UnmarshalJSON(b []byte) error { + return t.Inner.UnmarshalJSON(b) +} + +func init() { + equality.Semantic.AddFunc( + // Always treat VolatileTime fields as equivalent. + func(VolatileTime, VolatileTime) bool { + return true + }, + ) +} + +// Condition contains the conditions of the PostgreSQL cluster +type Condition struct { + Type ConditionType `json:"type" description:"type of status condition"` + Status v1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + LastTransitionTime VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + // PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) type PostgresStatus struct { - PostgresClusterStatus string `json:"PostgresClusterStatus"` + PostgresClusterStatus string `json:"postgresClusterStatus"` + NumberOfInstances int32 `json:"numberOfInstances"` + LabelSelector string `json:"labelSelector"` + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + Conditions Conditions `json:"conditions,omitempty"` } // ConnectionPooler Options for connection pooler diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go index 719defe93..8d9b4ebc9 100644 --- a/pkg/apis/acid.zalan.do/v1/util.go +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -101,6 +101,6 @@ func (postgresStatus PostgresStatus) Creating() bool { return postgresStatus.PostgresClusterStatus == ClusterStatusCreating } -func (postgresStatus PostgresStatus) String() string { - return postgresStatus.PostgresClusterStatus -} +//func (postgresStatus PostgresStatus) String() string { +// return postgresStatus.PostgresClusterStatus +//} diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 86aaa4788..c7bd089dc 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -260,11 +260,13 @@ func (c *Cluster) Create() (err error) { pgUpdatedStatus *acidv1.Postgresql errStatus error ) + labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) + existingCondition := c.Postgresql.Status.Conditions if err == nil { - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running? + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.Postgresql.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingCondition, "") //TODO: are you sure it's running? } else { c.logger.Warningf("cluster created failed: %v", err) - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed, 0, labelstring, 0, existingCondition, err.Error()) } if errStatus != nil { c.logger.Warningf("could not set cluster status: %v", errStatus) @@ -274,7 +276,9 @@ func (c *Cluster) Create() (err error) { } }() - pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating) + labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) + existingCondition := c.Postgresql.Status.Conditions + pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating, 0, labelstring, 0, existingCondition, "") if err != nil { return fmt.Errorf("could not set cluster status: %v", err) } @@ -927,7 +931,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { c.mu.Lock() defer c.mu.Unlock() - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating) + labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) + existingCondition := c.Postgresql.Status.Conditions + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingCondition, "") c.setSpec(newSpec) defer func() { @@ -936,9 +942,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { err error ) if updateFailed { - pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed) + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingCondition, err.Error()) } else { - pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingCondition, "") } if err != nil { c.logger.Warningf("could not set cluster status: %v", err) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index b106fc722..cb64e2f84 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -46,11 +46,13 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { pgUpdatedStatus *acidv1.Postgresql errStatus error ) + labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) + existingCondition := c.Postgresql.Status.Conditions if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed, newSpec.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingCondition, errStatus.Error()) } else if !c.Status.Running() { - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingCondition, "") } if errStatus != nil { c.logger.Warningf("could not set cluster status: %v", errStatus) diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index accc345ad..e4661f2a0 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -161,7 +161,9 @@ func (c *Controller) acquireInitialListOfClusters() error { func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) { if c.opConfig.EnableTeamIdClusternamePrefix { if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil { - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid) + labelstring := fmt.Sprintf("%s=%s", "cluster-name", pgSpec.ObjectMeta.Labels["cluster-name"]) + existingCondition := pgSpec.Status.Conditions + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid, pgSpec.Status.NumberOfInstances, labelstring, pgSpec.Status.ObservedGeneration, existingCondition, err.Error()) return nil, err } } @@ -472,16 +474,18 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if clusterError != "" && eventType != EventDelete { c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) + labelstring := fmt.Sprintf("%s=%s", "cluster-name", informerNewSpec.ObjectMeta.Labels["cluster-name"]) + existingCondition := informerNewSpec.Status.Conditions switch eventType { case EventAdd: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed) + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingCondition, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError) case EventUpdate: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed) + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingCondition, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError) default: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed) + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingCondition, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError) } diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 7ae402fe3..abefe8cbd 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -3,6 +3,7 @@ package k8sutil import ( "context" "fmt" + "time" b64 "encoding/base64" "encoding/json" @@ -192,10 +193,17 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { } // SetPostgresCRDStatus of Postgres cluster -func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) { +func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string, numberOfInstances int32, labelSelector string, observedGeneration int64, existingCondition apiacidv1.Conditions, message string) (*apiacidv1.Postgresql, error) { var pg *apiacidv1.Postgresql - var pgStatus apiacidv1.PostgresStatus + pgStatus := apiacidv1.PostgresStatus{} pgStatus.PostgresClusterStatus = status + pgStatus.NumberOfInstances = numberOfInstances + pgStatus.LabelSelector = labelSelector + pgStatus.ObservedGeneration = observedGeneration + + newConditions := updateConditions(existingCondition, status, message) + pgStatus.Conditions = newConditions + patch, err := json.Marshal(struct { PgStatus interface{} `json:"status"` @@ -217,6 +225,77 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced return pg, nil } +func updateConditions(existingConditions apiacidv1.Conditions, currentStatus string, message string) apiacidv1.Conditions { + now := apiacidv1.VolatileTime{Inner: metav1.NewTime(time.Now())} + var readyCondition, reconciliationCondition *apiacidv1.Condition + + // Find existing conditions + for i := range existingConditions { + if existingConditions[i].Type == "Ready" { + readyCondition = &existingConditions[i] + } else if existingConditions[i].Type == "ReconciliationSuccessful" { + reconciliationCondition = &existingConditions[i] + } + } + + // Initialize conditions if not present + switch currentStatus { + case "Creating": + if reconciliationCondition == nil { + existingConditions = append(existingConditions, apiacidv1.Condition{Type: "ReconciliationSuccessful"}) + reconciliationCondition = &existingConditions[len(existingConditions)-1] + + } + default: + if readyCondition == nil { + existingConditions = append(existingConditions, apiacidv1.Condition{Type: "Ready"}) + readyCondition = &existingConditions[len(existingConditions)-1] + } + } + + // Update Ready condition + switch currentStatus { + case "Running": + readyCondition.Status = v1.ConditionTrue + readyCondition.LastTransitionTime = now + case "CreateFailed": + readyCondition.Status = v1.ConditionFalse + readyCondition.LastTransitionTime = now + case "UpdateFailed", "SyncFailed", "Invalid": + if readyCondition.Status == v1.ConditionFalse { + readyCondition.LastTransitionTime = now + } + case "Updating": + // not updatinf time, just setting the status + if readyCondition.Status == v1.ConditionFalse { + readyCondition.Status = v1.ConditionFalse + } else { + readyCondition.Status = v1.ConditionTrue + } + } + + // Update ReconciliationSuccessful condition + reconciliationCondition.LastTransitionTime = now + reconciliationCondition.Message = message + if currentStatus == "Running" { + reconciliationCondition.Status = v1.ConditionTrue + reconciliationCondition.Reason = "" + } else { + reconciliationCondition.Status = v1.ConditionFalse + reconciliationCondition.Reason = currentStatus + } + // Directly modify elements in the existingConditions slice + for i := range existingConditions { + if existingConditions[i].Type == "Ready" && readyCondition != nil { + existingConditions[i] = *readyCondition + } else if existingConditions[i].Type == "ReconciliationSuccessful" && reconciliationCondition != nil { + existingConditions[i] = *reconciliationCondition + } + } + + return existingConditions +} + // SetFinalizer of Postgres cluster func (client *KubernetesClient) SetFinalizer(clusterName spec.NamespacedName, pg *apiacidv1.Postgresql, finalizers []string) (*apiacidv1.Postgresql, error) { var ( From 30ed723c95664862dff7d89b54064aef2338662e Mon Sep 17 00:00:00 2001 From: RavinaChidambaram Date: Fri, 2 Aug 2024 18:12:02 +0530 Subject: [PATCH 2/4] minor changes Signed-off-by: RavinaChidambaram --- pkg/apis/acid.zalan.do/v1/postgresql_type.go | 4 ++-- pkg/apis/acid.zalan.do/v1/util.go | 4 ---- pkg/cluster/cluster.go | 23 ++++++++++---------- pkg/cluster/sync.go | 6 ++--- pkg/controller/postgresql.go | 12 +++++----- pkg/util/k8sutil/k8sutil.go | 5 ++--- 6 files changed, 25 insertions(+), 29 deletions(-) diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 9759134cf..4985ee3bb 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -3,8 +3,8 @@ package v1 // Postgres CRD definition, please use CamelCase for field names. import ( - "time" "k8s.io/apimachinery/pkg/api/equality" + "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -263,7 +263,7 @@ type Condition struct { // PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) type PostgresStatus struct { - PostgresClusterStatus string `json:"postgresClusterStatus"` + PostgresClusterStatus string `json:"postgresClusterStatus"` NumberOfInstances int32 `json:"numberOfInstances"` LabelSelector string `json:"labelSelector"` ObservedGeneration int64 `json:"observedGeneration,omitempty"` diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go index 8d9b4ebc9..3f22f9f44 100644 --- a/pkg/apis/acid.zalan.do/v1/util.go +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -100,7 +100,3 @@ func (postgresStatus PostgresStatus) Running() bool { func (postgresStatus PostgresStatus) Creating() bool { return postgresStatus.PostgresClusterStatus == ClusterStatusCreating } - -//func (postgresStatus PostgresStatus) String() string { -// return postgresStatus.PostgresClusterStatus -//} diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index c7bd089dc..25cdd3cc8 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -255,18 +255,20 @@ func (c *Cluster) Create() (err error) { ss *appsv1.StatefulSet ) + //Even though its possible to propogate other CR labels to the pods, picking the default label here since its propogated to all the pods by default. But this means that in order for the scale subresource to work properly, user must set the "cluster-name" key in their CRs with value matching the CR name. + labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) //TODO: make this configurable. + defer func() { var ( pgUpdatedStatus *acidv1.Postgresql errStatus error ) - labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) - existingCondition := c.Postgresql.Status.Conditions + existingConditions := c.Postgresql.Status.Conditions if err == nil { - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.Postgresql.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingCondition, "") //TODO: are you sure it's running? + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.Postgresql.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") //TODO: are you sure it's running? } else { c.logger.Warningf("cluster created failed: %v", err) - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed, 0, labelstring, 0, existingCondition, err.Error()) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed, 0, labelstring, 0, existingConditions, err.Error()) } if errStatus != nil { c.logger.Warningf("could not set cluster status: %v", errStatus) @@ -276,9 +278,8 @@ func (c *Cluster) Create() (err error) { } }() - labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) - existingCondition := c.Postgresql.Status.Conditions - pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating, 0, labelstring, 0, existingCondition, "") + existingConditions := c.Postgresql.Status.Conditions + pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating, 0, labelstring, 0, existingConditions, "") if err != nil { return fmt.Errorf("could not set cluster status: %v", err) } @@ -932,8 +933,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { defer c.mu.Unlock() labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) - existingCondition := c.Postgresql.Status.Conditions - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingCondition, "") + existingConditions := c.Postgresql.Status.Conditions + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, "") c.setSpec(newSpec) defer func() { @@ -942,9 +943,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { err error ) if updateFailed { - pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingCondition, err.Error()) + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, err.Error()) } else { - pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingCondition, "") + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") } if err != nil { c.logger.Warningf("could not set cluster status: %v", err) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index cb64e2f84..380baedbf 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -47,12 +47,12 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { errStatus error ) labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) - existingCondition := c.Postgresql.Status.Conditions + existingConditions := c.Postgresql.Status.Conditions if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed, newSpec.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingCondition, errStatus.Error()) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed, newSpec.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, errStatus.Error()) } else if !c.Status.Running() { - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingCondition, "") + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") } if errStatus != nil { c.logger.Warningf("could not set cluster status: %v", errStatus) diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index e4661f2a0..c70419dae 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -162,8 +162,8 @@ func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedNam if c.opConfig.EnableTeamIdClusternamePrefix { if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil { labelstring := fmt.Sprintf("%s=%s", "cluster-name", pgSpec.ObjectMeta.Labels["cluster-name"]) - existingCondition := pgSpec.Status.Conditions - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid, pgSpec.Status.NumberOfInstances, labelstring, pgSpec.Status.ObservedGeneration, existingCondition, err.Error()) + existingConditions := pgSpec.Status.Conditions + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid, pgSpec.Status.NumberOfInstances, labelstring, pgSpec.Status.ObservedGeneration, existingConditions, err.Error()) return nil, err } } @@ -475,17 +475,17 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if clusterError != "" && eventType != EventDelete { c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) labelstring := fmt.Sprintf("%s=%s", "cluster-name", informerNewSpec.ObjectMeta.Labels["cluster-name"]) - existingCondition := informerNewSpec.Status.Conditions + existingConditions := informerNewSpec.Status.Conditions switch eventType { case EventAdd: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingCondition, clusterError) + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError) case EventUpdate: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingCondition, clusterError) + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError) default: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingCondition, clusterError) + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError) } diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index abefe8cbd..8477f2867 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -193,7 +193,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { } // SetPostgresCRDStatus of Postgres cluster -func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string, numberOfInstances int32, labelSelector string, observedGeneration int64, existingCondition apiacidv1.Conditions, message string) (*apiacidv1.Postgresql, error) { +func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string, numberOfInstances int32, labelSelector string, observedGeneration int64, existingConditions apiacidv1.Conditions, message string) (*apiacidv1.Postgresql, error) { var pg *apiacidv1.Postgresql pgStatus := apiacidv1.PostgresStatus{} pgStatus.PostgresClusterStatus = status @@ -201,10 +201,9 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced pgStatus.LabelSelector = labelSelector pgStatus.ObservedGeneration = observedGeneration - newConditions := updateConditions(existingCondition, status, message) + newConditions := updateConditions(existingConditions, status, message) pgStatus.Conditions = newConditions - patch, err := json.Marshal(struct { PgStatus interface{} `json:"status"` }{&pgStatus}) From 0269432560916fbdf12589654ec8f76b0775d0fa Mon Sep 17 00:00:00 2001 From: RavinaChidambaram Date: Thu, 29 Aug 2024 18:25:51 +0530 Subject: [PATCH 3/4] minor changes and fixes for unit test Signed-off-by: RavinaChidambaram --- pkg/apis/acid.zalan.do/v1/crds.go | 2 - pkg/apis/acid.zalan.do/v1/postgresql_type.go | 12 ++-- pkg/cluster/cluster.go | 61 ++++++++++++++++---- pkg/cluster/sync.go | 21 +++++-- pkg/controller/postgresql.go | 34 +++++++---- pkg/util/k8sutil/k8sutil.go | 20 ++++--- 6 files changed, 110 insertions(+), 40 deletions(-) diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 4aee60104..f56bd0a89 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -23,9 +23,7 @@ const ( OperatorConfigCRDResourceList = OperatorConfigCRDResouceKind + "List" OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName OperatorConfigCRDResourceShort = "opconfig" -) -var ( specReplicasPath = ".spec.numberOfInstances" statusReplicasPath = ".status.numberOfInstances" labelSelectorPath = ".status.labelSelector" diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 4985ee3bb..8576e4c2f 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -228,7 +228,7 @@ type UserFlags []string type Conditions []Condition -type ConditionType string +type PostgresqlConditionType string type VolatileTime struct { Inner metav1.Time `json:",inline"` } @@ -254,11 +254,11 @@ func init() { // Condition contains the conditions of the PostgreSQL cluster type Condition struct { - Type ConditionType `json:"type" description:"type of status condition"` - Status v1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` - LastTransitionTime VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` - Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` - Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + Type PostgresqlConditionType `json:"type" description:"type of status condition"` + Status v1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + LastTransitionTime VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` } // PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 25cdd3cc8..8d212da4f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -256,19 +256,32 @@ func (c *Cluster) Create() (err error) { ) //Even though its possible to propogate other CR labels to the pods, picking the default label here since its propogated to all the pods by default. But this means that in order for the scale subresource to work properly, user must set the "cluster-name" key in their CRs with value matching the CR name. - labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) //TODO: make this configurable. + labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel]) defer func() { var ( pgUpdatedStatus *acidv1.Postgresql errStatus error ) - existingConditions := c.Postgresql.Status.Conditions if err == nil { - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.Postgresql.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") //TODO: are you sure it's running? + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusRunning, + NumberOfInstances: c.Postgresql.Spec.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: c.Postgresql.Generation, + Conditions: c.Postgresql.Status.Conditions, + } + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") //TODO: are you sure it's running? } else { c.logger.Warningf("cluster created failed: %v", err) - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed, 0, labelstring, 0, existingConditions, err.Error()) + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusAddFailed, + NumberOfInstances: 0, + LabelSelector: labelstring, + ObservedGeneration: 0, + Conditions: c.Postgresql.Status.Conditions, + } + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, err.Error()) } if errStatus != nil { c.logger.Warningf("could not set cluster status: %v", errStatus) @@ -278,8 +291,14 @@ func (c *Cluster) Create() (err error) { } }() - existingConditions := c.Postgresql.Status.Conditions - pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating, 0, labelstring, 0, existingConditions, "") + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusCreating, + NumberOfInstances: 0, + LabelSelector: labelstring, + ObservedGeneration: 0, + Conditions: c.Postgresql.Status.Conditions, + } + pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") if err != nil { return fmt.Errorf("could not set cluster status: %v", err) } @@ -932,9 +951,15 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { c.mu.Lock() defer c.mu.Unlock() - labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) - existingConditions := c.Postgresql.Status.Conditions - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, "") + labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel]) + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusUpdating, + NumberOfInstances: c.Postgresql.Status.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: c.Postgresql.Status.ObservedGeneration, + Conditions: c.Postgresql.Status.Conditions, + } + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") c.setSpec(newSpec) defer func() { @@ -943,9 +968,23 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { err error ) if updateFailed { - pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, err.Error()) + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusUpdateFailed, + NumberOfInstances: c.Postgresql.Status.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: c.Postgresql.Status.ObservedGeneration, + Conditions: c.Postgresql.Status.Conditions, + } + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, err.Error()) } else { - pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusRunning, + NumberOfInstances: newSpec.Spec.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: c.Postgresql.Generation, + Conditions: c.Postgresql.Status.Conditions, + } + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") } if err != nil { c.logger.Warningf("could not set cluster status: %v", err) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 380baedbf..f038cb03c 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -46,13 +46,26 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { pgUpdatedStatus *acidv1.Postgresql errStatus error ) - labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) - existingConditions := c.Postgresql.Status.Conditions + labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel]) if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed, newSpec.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, errStatus.Error()) + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusSyncFailed, + NumberOfInstances: newSpec.Status.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: c.Postgresql.Status.ObservedGeneration, + Conditions: c.Postgresql.Status.Conditions, + } + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, errStatus.Error()) } else if !c.Status.Running() { - pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusRunning, + NumberOfInstances: newSpec.Spec.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: c.Postgresql.Generation, + Conditions: c.Postgresql.Status.Conditions, + } + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") } if errStatus != nil { c.logger.Warningf("could not set cluster status: %v", errStatus) diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index c70419dae..77e9e1585 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -161,9 +161,15 @@ func (c *Controller) acquireInitialListOfClusters() error { func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) { if c.opConfig.EnableTeamIdClusternamePrefix { if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil { - labelstring := fmt.Sprintf("%s=%s", "cluster-name", pgSpec.ObjectMeta.Labels["cluster-name"]) - existingConditions := pgSpec.Status.Conditions - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid, pgSpec.Status.NumberOfInstances, labelstring, pgSpec.Status.ObservedGeneration, existingConditions, err.Error()) + labelstring := fmt.Sprintf("%s=%s", c.opConfig.ClusterNameLabel, pgSpec.ObjectMeta.Labels[c.opConfig.ClusterNameLabel]) + ClusterStatus := acidv1.PostgresStatus{ + PostgresClusterStatus: acidv1.ClusterStatusInvalid, + NumberOfInstances: pgSpec.Status.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: pgSpec.Status.ObservedGeneration, + Conditions: pgSpec.Status.Conditions, + } + c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, err.Error()) return nil, err } } @@ -209,10 +215,10 @@ func (c *Controller) processEvent(event ClusterEvent) { if event.EventType == EventRepair { runRepair, lastOperationStatus := cl.NeedsRepair() if !runRepair { - lg.Debugf("observed cluster status %s, repair is not required", lastOperationStatus) + lg.Debugf("observed cluster status %#v, repair is not required", lastOperationStatus) return } - lg.Debugf("observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus) + lg.Debugf("observed cluster status %#v, running sync scan to repair the cluster", lastOperationStatus) event.EventType = EventSync } @@ -474,18 +480,26 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if clusterError != "" && eventType != EventDelete { c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) - labelstring := fmt.Sprintf("%s=%s", "cluster-name", informerNewSpec.ObjectMeta.Labels["cluster-name"]) - existingConditions := informerNewSpec.Status.Conditions + labelstring := fmt.Sprintf("%s=%s", c.opConfig.ClusterNameLabel, informerNewSpec.ObjectMeta.Labels[c.opConfig.ClusterNameLabel]) + ClusterStatus := acidv1.PostgresStatus{ + NumberOfInstances: informerNewSpec.Status.NumberOfInstances, + LabelSelector: labelstring, + ObservedGeneration: informerNewSpec.Status.ObservedGeneration, + Conditions: informerNewSpec.Status.Conditions, + } switch eventType { case EventAdd: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError) + ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusAddFailed + c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError) case EventUpdate: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError) + ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed + c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError) default: - c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError) + ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed + c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError) c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError) } diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 8477f2867..48d31b77a 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -193,15 +193,10 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { } // SetPostgresCRDStatus of Postgres cluster -func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string, numberOfInstances int32, labelSelector string, observedGeneration int64, existingConditions apiacidv1.Conditions, message string) (*apiacidv1.Postgresql, error) { +func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, pgStatus apiacidv1.PostgresStatus, message string) (*apiacidv1.Postgresql, error) { var pg *apiacidv1.Postgresql - pgStatus := apiacidv1.PostgresStatus{} - pgStatus.PostgresClusterStatus = status - pgStatus.NumberOfInstances = numberOfInstances - pgStatus.LabelSelector = labelSelector - pgStatus.ObservedGeneration = observedGeneration - newConditions := updateConditions(existingConditions, status, message) + newConditions := updateConditions(pgStatus.Conditions, pgStatus.PostgresClusterStatus, message) pgStatus.Conditions = newConditions patch, err := json.Marshal(struct { @@ -252,6 +247,17 @@ func updateConditions(existingConditions apiacidv1.Conditions, currentStatus str } } + // Safety checks to avoid nil pointer dereference + if readyCondition == nil { + readyCondition = &apiacidv1.Condition{Type: "Ready"} + existingConditions = append(existingConditions, *readyCondition) + } + + if reconciliationCondition == nil { + reconciliationCondition = &apiacidv1.Condition{Type: "ReconciliationSuccessful"} + existingConditions = append(existingConditions, *reconciliationCondition) + } + // Update Ready condition switch currentStatus { case "Running": From 061b051d806d65f3d10c4ce70976caec0cea1ef1 Mon Sep 17 00:00:00 2001 From: RavinaChidambaram Date: Thu, 29 Aug 2024 19:40:49 +0530 Subject: [PATCH 4/4] updated zz_generated.deepcopy.go Signed-off-by: RavinaChidambaram --- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 65 ++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 80bc7b34d..bddf4de90 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -98,6 +98,45 @@ func (in *CloneDescription) DeepCopy() *CloneDescription { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) { *out = *in @@ -892,6 +931,13 @@ func (in *PostgresSpec) DeepCopy() *PostgresSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresStatus) DeepCopyInto(out *PostgresStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1053,7 +1099,7 @@ func (in *Postgresql) DeepCopyInto(out *Postgresql) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -1444,6 +1490,23 @@ func (in UserFlags) DeepCopy() UserFlags { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolatileTime) DeepCopyInto(out *VolatileTime) { + *out = *in + in.Inner.DeepCopyInto(&out.Inner) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolatileTime. +func (in *VolatileTime) DeepCopy() *VolatileTime { + if in == nil { + return nil + } + out := new(VolatileTime) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in