Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions internal/helmutil/restgetter.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,20 @@
// Package helmutil provides small helpers and adapters for integrating Helm
// into a Kubernetes operator.
//
// Why RemoteRESTClientGetter exists:
// - Helm's action.Configuration.Init expects a RESTClientGetter interface to
// construct API clients, discovery, and REST mappers.
// - In an operator we already hold a fully constructed *rest.Config for the
// target (often remote/edge) cluster via controller-runtime. Reading a local
// kubeconfig file is undesirable or impossible inside the operator pod.
// - This adapter bridges Helm's expectations with the operator's reality:
// it wraps the existing *rest.Config, sets a sensible default namespace for
// objects that omit metadata.namespace, and lazily constructs discovery and
// RESTMapper components.
// - Practical outcome: The operator can install/upgrade/uninstall Helm charts
// directly against remote clusters using in-memory configuration, without
// depending on kubeconfig files. See usage in the deploy/uninstall paths
// of the multicluster operator.
package helmutil

import (
Expand Down
38 changes: 38 additions & 0 deletions internal/helmutil/restgetter_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package helmutil

import (
"testing"

"k8s.io/client-go/rest"
)

func TestRemoteRESTClientGetter_ToRESTConfig(t *testing.T) {
base := &rest.Config{Host: "https://example.invalid"}
g := NewRemoteRESTClientGetter(base)
cfg, err := g.ToRESTConfig()
if err != nil {
t.Fatalf("ToRESTConfig error: %v", err)
}
if cfg == nil {
t.Fatalf("ToRESTConfig returned nil config")
}
if cfg == base {
t.Fatalf("ToRESTConfig should return a copy, got same pointer")
}
if cfg.Host != base.Host {
t.Fatalf("Host mismatch: got %s want %s", cfg.Host, base.Host)
}
}

func TestRemoteRESTClientGetter_ToRawKubeConfigLoader_Namespace(t *testing.T) {
base := &rest.Config{Host: "https://example.invalid"}
g := NewRemoteRESTClientGetterForNamespace(base, "foo")
loader := g.ToRawKubeConfigLoader()
ns, _, err := loader.Namespace()
if err != nil {
t.Fatalf("Namespace error: %v", err)
}
if ns != "foo" {
t.Fatalf("Namespace mismatch: got %s want %s", ns, "foo")
}
}
5 changes: 5 additions & 0 deletions internal/multicluster/crd_watch.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
package multicluster

// File overview: Ensures the KryptonDeployment CRD is present on the watch (home)
// cluster at startup. If missing, the embedded CRD manifest is applied with
// simple retries to tolerate transient API errors. This avoids a manual
// bootstrap step and improves resilience across environments.

import (
"context"
"errors"
Expand Down
29 changes: 29 additions & 0 deletions internal/multicluster/crd_watch_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package multicluster

import (
"context"
"testing"

"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

clientgoscheme "k8s.io/client-go/kubernetes/scheme"
)

// TestEnsureKryptonDeploymentCRD_Create ensures the CRD is created when missing.
func TestEnsureKryptonDeploymentCRD_Create(t *testing.T) {
sch := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(sch)
c := fake.NewClientBuilder().WithScheme(sch).Build()

ctx := context.Background()
if err := EnsureKryptonDeploymentCRD(ctx, c); err != nil {
t.Fatalf("EnsureKryptonDeploymentCRD returned error: %v", err)
}

// Verify CRD now exists
// Idempotency: second call should be a no-op and return nil
if err := EnsureKryptonDeploymentCRD(ctx, c); err != nil {
t.Fatalf("EnsureKryptonDeploymentCRD second call should be no-op, got error: %v", err)
}
}
11 changes: 11 additions & 0 deletions internal/multicluster/krypton_operator.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
package multicluster

// File overview: Core operator logic to reconcile KryptonDeployment resources and
// orchestrate work across a home cluster and remote edge clusters.
//
// Entrypoint responsibilities:
// - Parse flags/env to determine watch scope, kubeconfig sources, and chart info.
// - Build managers: a home manager (watching KryptonDeployment) and a multicluster
// manager that uses a secret-based Provider for edge clusters.
// - Route each deployment to a target edge cluster, ensure namespace, deploy/upgrade
// Helm charts, and update status conditions for readiness.
// - Provide health endpoints and optional CRD bootstrap at startup for resilience.

import (
"context"
"flag"
Expand Down
119 changes: 119 additions & 0 deletions internal/multicluster/krypton_operator_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
package multicluster

import (
"context"
"os"
"path/filepath"
"sync"
"testing"
"time"

platformv1alpha1 "github.com/openkcm/krypton-operator/api/v1alpha1"
)

func TestTruncatePath(t *testing.T) {
if truncatePath("") != "" {
t.Fatalf("empty path should return empty string")
}
short := "abc"
if truncatePath(short) != short {
t.Fatalf("short path mismatch")
}
long := "this/is/a/very/long/path/that/should/get/truncated/by/the/helper/function/because/it/exceeds/sixtyfour/chars"
got := truncatePath(long)
if len(got) >= len(long) || got[0] == 't' {
t.Fatalf("expected truncated string starting with ellipsis, got: %s", got)
}
}

func TestSplitYAMLDocuments(t *testing.T) {
s := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: b\n"
parts := splitYAMLDocuments(s)
if len(parts) != 2 {
t.Fatalf("expected 2 parts, got %d", len(parts))
}
}

func TestParseManifestToObjects(t *testing.T) {
manifest := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm1\n namespace: ns1\n---\n# note\nThis is not k8s\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: d1\n namespace: ns1\nspec:\n selector:\n matchLabels:\n app: x\n template:\n metadata:\n labels:\n app: x\n spec:\n containers:\n - name: c\n image: i\n"
objs, err := parseManifestToObjects(manifest)
if err != nil {
t.Fatalf("parseManifestToObjects error: %v", err)
}
if len(objs) != 2 {
t.Fatalf("expected 2 k8s objects, got %d", len(objs))
}
}

func TestGetCheckInterval(t *testing.T) {
// Reset cached once state
checkIntervalOnce = sync.Once{}
t.Setenv("KRYPTON_CHECK_INTERVAL", "5s")
d := getCheckInterval()
if d != 5*time.Second {
t.Fatalf("expected 5s, got %v", d)
}
}

func TestBuildWatchConfig_File(t *testing.T) {
dir := t.TempDir()
p := filepath.Join(dir, "kubeconfig")
data := []byte(`apiVersion: v1
clusters:
- cluster:
server: https://example.invalid
name: c1
contexts:
- context:
cluster: c1
user: u1
name: ctx1
current-context: ctx1
kind: Config
preferences: {}
users:
- name: u1
user:
token: dummy
`)
if err := os.WriteFile(p, data, 0o600); err != nil {
t.Fatalf("write kubeconfig: %v", err)
}
cfg, host, used, err := buildWatchConfig(context.Background(), p, "", "", "", "")
if err != nil {
t.Fatalf("buildWatchConfig error: %v", err)
}
if cfg == nil || host == "" || used != "ctx1" {
t.Fatalf("unexpected results: cfg nil? %v host=%s used=%s", cfg == nil, host, used)
}
}

func TestRcedResolveTarget(t *testing.T) {
dep := &platformv1alpha1.KryptonDeployment{}
dep.Spec.Region.Name = "eu-west1"
// new kubeconfig ref with explicit ns
dep.Spec.Region.Kubeconfig = &platformv1alpha1.KubeconfigRef{Secret: platformv1alpha1.SecretRef{Name: "edge", Namespace: "opns"}}
key, region := rcedResolveTarget(context.Background(), nil, "discover", dep)
if key != "opns/edge" || region != "eu-west1" {
t.Fatalf("unexpected key/region: %s %s", key, region)
}
// only name provided -> default to discovery namespace
dep.Spec.Region.Kubeconfig.Secret.Namespace = ""
key, _ = rcedResolveTarget(context.Background(), nil, "discover", dep)
if key != "discover/edge" {
t.Fatalf("unexpected key defaulting: %s", key)
}
// deprecated field fallback
dep.Spec.Region.Kubeconfig = nil
dep.Spec.Region.KubeconfigSecretName = "legacy"
key, _ = rcedResolveTarget(context.Background(), nil, "discover", dep)
if key != "discover/legacy" {
t.Fatalf("unexpected legacy key: %s", key)
}
// default derive from region
dep.Spec.Region.KubeconfigSecretName = ""
key, _ = rcedResolveTarget(context.Background(), nil, "discover", dep)
if key != "discover/eu-west1-kubeconfig" {
t.Fatalf("unexpected derived key: %s", key)
}
}
12 changes: 12 additions & 0 deletions multicluster/secretprovider/provider.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
// Package secretprovider implements a multicluster Provider that resolves
// edge clusters from kubeconfig Secrets. It lets the operator construct
// controller-runtime Cluster instances on demand, keyed by secret name
// (optionally prefixed with a namespace as "ns/name").
//
// Why this Provider exists:
// - In multi-cluster scenarios, edge cluster credentials are typically stored
// as Secrets. The operator needs to turn those into usable clients quickly.
// - This Provider lazily creates and caches Cluster objects from embedded
// kubeconfigs, avoiding global kubeconfig files and minimizing startup work.
// - It supports namespaced keys and a default namespace to keep secret lookup
// flexible while remaining simple.
package secretprovider

import (
Expand Down
84 changes: 84 additions & 0 deletions multicluster/secretprovider/provider_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
package secretprovider

import (
"context"
"slices"
"testing"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

corev1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)

func makeKubeconfig() []byte {
cfg := clientcmdapi.NewConfig()
cfg.Clusters["test"] = &clientcmdapi.Cluster{Server: "https://example.invalid"}
cfg.AuthInfos["u1"] = &clientcmdapi.AuthInfo{Token: "dummy"}
cfg.Contexts["test"] = &clientcmdapi.Context{Cluster: "test", AuthInfo: "u1", Namespace: "default"}
cfg.CurrentContext = "test"
data, _ := clientcmd.Write(*cfg)
return data
}

func TestProvider_Get_DefaultNamespace(t *testing.T) {
sch := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(sch)
// Secret in namespace "opns"
sec := &corev1.Secret{}
sec.Namespace = "opns"
sec.Name = "edge"
sec.Data = map[string][]byte{"kubeconfig": makeKubeconfig()}

c := fake.NewClientBuilder().WithScheme(sch).WithObjects(sec).Build()
p := New(c, sch, "opns")

ctx := context.Background()
cl, err := p.Get(ctx, "edge")
if err != nil {
t.Fatalf("Provider.Get returned error: %v", err)
}
if cl == nil {
t.Fatalf("Provider.Get returned nil cluster")
}

// List should include constructed name
names, err := p.List(ctx)
if err != nil {
t.Fatalf("Provider.List returned error: %v", err)
}
if !slices.Contains(names, "edge") {
t.Fatalf("constructed cluster name not found in list")
}
}

func TestProvider_Get_NamespacedKey(t *testing.T) {
sch := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(sch)
sec := &corev1.Secret{}
sec.Namespace = "otherns"
sec.Name = "edge"
sec.Data = map[string][]byte{"kubeconfig": makeKubeconfig()}

c := fake.NewClientBuilder().WithScheme(sch).WithObjects(sec).Build()
p := New(c, sch, "opns")

ctx := context.Background()
cl, err := p.Get(ctx, "otherns/edge")
if err != nil {
t.Fatalf("Provider.Get returned error: %v", err)
}
if cl == nil {
t.Fatalf("Provider.Get returned nil cluster")
}

// Verify secret was fetched from other namespace via client
got := &corev1.Secret{}
if err := c.Get(ctx, types.NamespacedName{Namespace: "otherns", Name: "edge"}, got); err != nil {
t.Fatalf("client Get failed: %v", err)
}
}