From 8721eecf55e640164a7ba5d0c32c5f6833bea108 Mon Sep 17 00:00:00 2001 From: Artem Shcherbatiuk Date: Mon, 29 Sep 2025 13:58:37 +0200 Subject: [PATCH 01/13] ensured that we have ClusterAccess CRD installed --- scripts/create-clusteraccess.sh | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/scripts/create-clusteraccess.sh b/scripts/create-clusteraccess.sh index 78cba30..c9d7e78 100755 --- a/scripts/create-clusteraccess.sh +++ b/scripts/create-clusteraccess.sh @@ -99,6 +99,36 @@ if [[ -z "$CLUSTER_NAME" ]]; then fi log_info "Cluster name: $CLUSTER_NAME" +ensure_crd_installed() { + log_info "Checking if ClusterAccess CRD is installed in management cluster..." + + # Check if CRD exists + if ! KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl get crd clusteraccesses.gateway.platform-mesh.io &>/dev/null; then + log_info "ClusterAccess CRD not found. Installing CRD..." + + # Get the directory where this script is located + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + CRD_PATH="$SCRIPT_DIR/../config/crd/gateway.platform-mesh.io_clusteraccesses.yaml" + + # Check if CRD file exists + if [[ ! -f "$CRD_PATH" ]]; then + log_error "CRD file not found at: $CRD_PATH" + log_error "Please ensure the CRD file exists in the expected location" + exit 1 + fi + + # Install the CRD + if KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl apply -f "$CRD_PATH"; then + log_info "ClusterAccess CRD installed successfully" + else + log_error "Failed to install ClusterAccess CRD" + exit 1 + fi + else + log_info "ClusterAccess CRD is already installed" + fi +} + cleanup_existing_resources() { log_info "Checking for existing ClusterAccess resource '$CLUSTER_NAME'..." @@ -174,6 +204,9 @@ if ! KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl cluster-info &>/dev/null; then fi log_info "Management cluster is accessible" +# Ensure CRD is installed in management cluster +ensure_crd_installed + # Create kubeconfig secret in management cluster log_info "Creating admin kubeconfig secret in management cluster..." KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl create secret generic "${CLUSTER_NAME}-admin-kubeconfig" \ From e78ee20c1973432d0004ef73c926fdbd82a606ad Mon Sep 17 00:00:00 2001 From: Artem Shcherbatiuk Date: Mon, 29 Sep 2025 15:14:42 +0200 Subject: [PATCH 02/13] wait for CRD to be fully registered --- scripts/create-clusteraccess.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/scripts/create-clusteraccess.sh b/scripts/create-clusteraccess.sh index c9d7e78..b869bc3 100755 --- a/scripts/create-clusteraccess.sh +++ b/scripts/create-clusteraccess.sh @@ -120,6 +120,15 @@ ensure_crd_installed() { # Install the CRD if KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl apply -f "$CRD_PATH"; then log_info "ClusterAccess CRD installed successfully" + + # Wait for CRD to reach Established condition + log_info "Waiting for ClusterAccess CRD to become established..." + if KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl wait --for=condition=Established crd/clusteraccesses.gateway.platform-mesh.io --timeout=60s; then + log_info "ClusterAccess CRD is now established and ready" + else + log_error "ClusterAccess CRD failed to reach Established condition within 60 seconds" + exit 1 + fi else log_error "Failed to install ClusterAccess CRD" exit 1 From d13952d0f1eebc493e44c4b2a3ce7dc81fa502c3 Mon Sep 17 00:00:00 2001 From: Artem Shcherbatiuk Date: Tue, 30 Sep 2025 11:11:13 +0200 Subject: [PATCH 03/13] addressed comments --- hack/create-clusteraccess.md | 14 ++++++++++++++ {scripts => hack}/create-clusteraccess.sh | 0 .../test-clusteraccess-integration.sh | 0 3 files changed, 14 insertions(+) create mode 100644 hack/create-clusteraccess.md rename {scripts => hack}/create-clusteraccess.sh (100%) rename {scripts => hack}/test-clusteraccess-integration.sh (100%) diff --git a/hack/create-clusteraccess.md b/hack/create-clusteraccess.md new file mode 100644 index 0000000..5056457 --- /dev/null +++ b/hack/create-clusteraccess.md @@ -0,0 +1,14 @@ +# Create ClusterAccess script + +This script is used to create a ClusterAccess resource, which is needed for kubernetes-graphql-gateway to work with Standard K8S cluster. + +More details about it you can find at [this readme](../docs/clusteraccess.md) + +## Usage + +```shell +./hack/create-clusteraccess.sh --target-kubeconfig $TARGET_CLUSTER_KUBECONFIG --management-kubeconfig $MANAGEMENT_CLUSTER_KUBECONFIG +``` +Where +- TARGET_CLUSTER_KUBECONFIG - path to the kubeconfig of the cluster we want the gateway to generate graphql schema +- MANAGEMENT_CLUSTER_KUBECONFIG - path to the kubeconfig of the cluster where ClusterAccess object will be created. It can be the same cluster as TARGET_CLUSTER_KUBECONFIG. \ No newline at end of file diff --git a/scripts/create-clusteraccess.sh b/hack/create-clusteraccess.sh similarity index 100% rename from scripts/create-clusteraccess.sh rename to hack/create-clusteraccess.sh diff --git a/scripts/test-clusteraccess-integration.sh b/hack/test-clusteraccess-integration.sh similarity index 100% rename from scripts/test-clusteraccess-integration.sh rename to hack/test-clusteraccess-integration.sh From b5d41878af8379a274764ad41af863c1942ade52 Mon Sep 17 00:00:00 2001 From: Artem Shcherbatiuk Date: Thu, 2 Oct 2025 17:42:41 +0200 Subject: [PATCH 04/13] removed TOCTOU race --- hack/create-clusteraccess.sh | 55 ++++++++++++++---------------------- 1 file changed, 21 insertions(+), 34 deletions(-) diff --git a/hack/create-clusteraccess.sh b/hack/create-clusteraccess.sh index b869bc3..8686613 100755 --- a/hack/create-clusteraccess.sh +++ b/hack/create-clusteraccess.sh @@ -100,42 +100,29 @@ fi log_info "Cluster name: $CLUSTER_NAME" ensure_crd_installed() { - log_info "Checking if ClusterAccess CRD is installed in management cluster..." + log_info "Ensuring ClusterAccess CRD is installed in management cluster..." - # Check if CRD exists - if ! KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl get crd clusteraccesses.gateway.platform-mesh.io &>/dev/null; then - log_info "ClusterAccess CRD not found. Installing CRD..." - - # Get the directory where this script is located - SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - CRD_PATH="$SCRIPT_DIR/../config/crd/gateway.platform-mesh.io_clusteraccesses.yaml" - - # Check if CRD file exists - if [[ ! -f "$CRD_PATH" ]]; then - log_error "CRD file not found at: $CRD_PATH" - log_error "Please ensure the CRD file exists in the expected location" - exit 1 - fi - - # Install the CRD - if KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl apply -f "$CRD_PATH"; then - log_info "ClusterAccess CRD installed successfully" - - # Wait for CRD to reach Established condition - log_info "Waiting for ClusterAccess CRD to become established..." - if KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl wait --for=condition=Established crd/clusteraccesses.gateway.platform-mesh.io --timeout=60s; then - log_info "ClusterAccess CRD is now established and ready" - else - log_error "ClusterAccess CRD failed to reach Established condition within 60 seconds" - exit 1 - fi - else - log_error "Failed to install ClusterAccess CRD" - exit 1 - fi - else - log_info "ClusterAccess CRD is already installed" + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + CRD_PATH="$SCRIPT_DIR/../config/crd/gateway.platform-mesh.io_clusteraccesses.yaml" + + if [[ ! -f "$CRD_PATH" ]]; then + log_error "CRD file not found at: $CRD_PATH" + log_error "Please ensure the CRD file exists in the expected location" + exit 1 + fi + + if ! KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl apply -f "$CRD_PATH"; then + log_error "Failed to apply ClusterAccess CRD" + exit 1 fi + + log_info "Waiting for ClusterAccess CRD to become established..." + if ! KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl wait --for=condition=Established crd/clusteraccesses.gateway.platform-mesh.io --timeout=60s; then + log_error "ClusterAccess CRD failed to reach Established condition within 60 seconds" + exit 1 + fi + + log_info "ClusterAccess CRD is established and ready" } cleanup_existing_resources() { From 483f358d1a477cd07dc7e257f8b1de31e7d993f9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:45:28 +0000 Subject: [PATCH 05/13] fix(deps): update module sigs.k8s.io/controller-runtime to v0.22.2 (#67) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 0051df3..ab2f3a2 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( k8s.io/apimachinery v0.33.3 k8s.io/client-go v0.33.3 k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 - sigs.k8s.io/controller-runtime v0.22.1 + sigs.k8s.io/controller-runtime v0.22.2 ) require ( From 44bd887a6482f8d9d74271acd18c3bdc888dca4b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 17:46:55 +0000 Subject: [PATCH 06/13] fix(deps): update module golang.org/x/text to v0.30.0 (#70) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ab2f3a2..b71e93b 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 - golang.org/x/text v0.29.0 + golang.org/x/text v0.30.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.3 k8s.io/apiextensions-apiserver v0.33.3 diff --git a/go.sum b/go.sum index 67f7d29..f049476 100644 --- a/go.sum +++ b/go.sum @@ -290,8 +290,8 @@ golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 06d9d8c0ca2e1c1b1aefc8157a59e53ef67c119f Mon Sep 17 00:00:00 2001 From: Artem Shcherbatiuk Date: Thu, 9 Oct 2025 08:52:39 +0200 Subject: [PATCH 07/13] Fix: relations nil pointer check (#68) * added nil checks in relation resovling On-behalf-of: @SAP a.shcherbatiuk@sap.com Signed-off-by: Artem Shcherbatiuk --- gateway/schema/recursion_test.go | 386 ++++++++++++++++++ gateway/schema/relations.go | 6 +- gateway/schema/schema.go | 11 +- .../crd/core.platform-mesh.io_accounts.yaml | 21 +- 4 files changed, 403 insertions(+), 21 deletions(-) create mode 100644 gateway/schema/recursion_test.go diff --git a/gateway/schema/recursion_test.go b/gateway/schema/recursion_test.go new file mode 100644 index 0000000..45871e6 --- /dev/null +++ b/gateway/schema/recursion_test.go @@ -0,0 +1,386 @@ +package schema_test + +import ( + "testing" + + "github.com/go-openapi/spec" + "github.com/graphql-go/graphql" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/platform-mesh/golang-commons/logger/testlogger" + "github.com/platform-mesh/kubernetes-graphql-gateway/gateway/resolver" + gatewaySchema "github.com/platform-mesh/kubernetes-graphql-gateway/gateway/schema" +) + +func TestConvertSwaggerTypeToGraphQL_WithNilInCache(t *testing.T) { + log := testlogger.New().Logger + mockResolver := &mockResolverProvider{} + + tests := []struct { + name string + definitions spec.Definitions + setupCache func(*gatewaySchema.Gateway) + expectedNoPanic bool + expectedReturnType bool + }{ + { + name: "handles_nil_in_cache_for_recursive_ref", + definitions: spec.Definitions{ + "io.test.v1.RecursiveType": spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + }, + }, + "parent": { + SchemaProps: spec.SchemaProps{ + Ref: spec.MustCreateRef("#/definitions/io.test.v1.RecursiveType"), + }, + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: map[string]interface{}{ + "x-kubernetes-group-version-kind": []interface{}{ + map[string]interface{}{ + "group": "test", + "version": "v1", + "kind": "RecursiveType", + }, + }, + }, + }, + }, + }, + expectedNoPanic: true, + expectedReturnType: true, + }, + { + name: "handles_nested_object_with_nil_in_cache", + definitions: spec.Definitions{ + "io.test.v1.NestedType": spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "spec": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nested": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "field": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: map[string]interface{}{ + "x-kubernetes-group-version-kind": []interface{}{ + map[string]interface{}{ + "group": "test", + "version": "v1", + "kind": "NestedType", + }, + }, + }, + }, + }, + }, + expectedNoPanic: true, + expectedReturnType: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + if tt.expectedNoPanic { + t.Errorf("Test panicked when it shouldn't: %v", r) + } + } + }() + + gateway, err := gatewaySchema.New(log, tt.definitions, mockResolver) + if err != nil && tt.expectedNoPanic { + t.Errorf("Schema creation failed: %v", err) + return + } + + if gateway != nil && tt.expectedReturnType { + schemaObj := gateway.GetSchema() + if schemaObj == nil { + t.Error("Expected schema to be created but got nil") + } + } + }) + } +} + +func TestHandleObjectFieldSpecType_WithNilInCache(t *testing.T) { + log := testlogger.New().Logger + mockResolver := &mockResolverProvider{} + + definitions := spec.Definitions{ + "io.test.v1.SelfReferencing": spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + }, + }, + "labels": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + }, + }, + }, + }, + }, + }, + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "template": { + SchemaProps: spec.SchemaProps{ + Ref: spec.MustCreateRef("#/definitions/io.test.v1.SelfReferencing"), + }, + }, + }, + }, + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: map[string]interface{}{ + "x-kubernetes-group-version-kind": []interface{}{ + map[string]interface{}{ + "group": "test", + "version": "v1", + "kind": "SelfReferencing", + }, + }, + }, + }, + }, + } + + t.Run("no_panic_with_self_referencing_object", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Test panicked: %v", r) + } + }() + + gateway, err := gatewaySchema.New(log, definitions, mockResolver) + if err != nil { + t.Errorf("Schema creation failed: %v", err) + return + } + + if gateway == nil { + t.Error("Expected gateway to be created but got nil") + return + } + + schemaObj := gateway.GetSchema() + if schemaObj == nil { + t.Error("Expected schema to be created but got nil") + } + }) +} + +func TestFindRelationTarget_WithNilInCache(t *testing.T) { + log := testlogger.New().Logger + mockResolver := &mockResolverProvider{} + + definitions := spec.Definitions{ + "io.test.v1.Parent": spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "childRef": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + }, + }, + }, + }, + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: map[string]interface{}{ + "x-kubernetes-group-version-kind": []interface{}{ + map[string]interface{}{ + "group": "test", + "version": "v1", + "kind": "Parent", + }, + }, + "x-kubernetes-resource-scope": "Namespaced", + }, + }, + }, + "io.test.v1.Child": spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "parentRef": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + }, + }, + }, + }, + }, + }, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: map[string]interface{}{ + "x-kubernetes-group-version-kind": []interface{}{ + map[string]interface{}{ + "group": "test", + "version": "v1", + "kind": "Child", + }, + }, + "x-kubernetes-resource-scope": "Namespaced", + }, + }, + }, + } + + t.Run("no_panic_with_cross_references", func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Test panicked: %v", r) + } + }() + + gateway, err := gatewaySchema.New(log, definitions, mockResolver) + if err != nil { + t.Errorf("Schema creation failed: %v", err) + return + } + + if gateway == nil { + t.Error("Expected gateway to be created but got nil") + return + } + + schemaObj := gateway.GetSchema() + if schemaObj == nil { + t.Error("Expected schema to be created but got nil") + } + }) +} + +type mockResolverProvider struct{} + +func (m *mockResolverProvider) CommonResolver() graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return nil, nil + } +} + +func (m *mockResolverProvider) ListItems(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return []interface{}{}, nil + } +} + +func (m *mockResolverProvider) GetItem(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return map[string]interface{}{}, nil + } +} + +func (m *mockResolverProvider) GetItemAsYAML(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return "", nil + } +} + +func (m *mockResolverProvider) CreateItem(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return map[string]interface{}{}, nil + } +} + +func (m *mockResolverProvider) UpdateItem(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return map[string]interface{}{}, nil + } +} + +func (m *mockResolverProvider) DeleteItem(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return true, nil + } +} + +func (m *mockResolverProvider) SubscribeItem(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return nil, nil + } +} + +func (m *mockResolverProvider) SubscribeItems(gvk schema.GroupVersionKind, scope v1.ResourceScope) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return nil, nil + } +} + +func (m *mockResolverProvider) RelationResolver(baseName string, gvk schema.GroupVersionKind) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return map[string]interface{}{}, nil + } +} + +func (m *mockResolverProvider) TypeByCategory(typeByCategory map[string][]resolver.TypeByCategory) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + return []interface{}{}, nil + } +} + +func (m *mockResolverProvider) SanitizeGroupName(group string) string { + return group +} + +var _ resolver.Provider = (*mockResolverProvider)(nil) diff --git a/gateway/schema/relations.go b/gateway/schema/relations.go index e9dfa79..e39993c 100644 --- a/gateway/schema/relations.go +++ b/gateway/schema/relations.go @@ -96,7 +96,7 @@ func (g *Gateway) findRelationTarget(baseName string) (graphql.Output, *schema.G if g.matchesTargetKind(defSchema, targetKind) { // Resolve or build the GraphQL type var fieldType graphql.Output - if existingType, exists := g.typesCache[defKey]; exists { + if existingType, exists := g.typesCache[defKey]; exists && existingType != nil { fieldType = existingType } else { ft, _, err := g.convertSwaggerTypeToGraphQL(defSchema, defKey, []string{}, make(map[string]bool)) @@ -106,6 +106,10 @@ func (g *Gateway) findRelationTarget(baseName string) (graphql.Output, *schema.G fieldType = ft } + if fieldType == nil { + continue + } + // Extract GVK from the schema definition gvk, err := g.getGroupVersionKind(defKey) if err != nil || gvk == nil { diff --git a/gateway/schema/schema.go b/gateway/schema/schema.go index b8dbc0f..d68cf8f 100644 --- a/gateway/schema/schema.go +++ b/gateway/schema/schema.go @@ -76,7 +76,7 @@ func (g *Gateway) generateGraphqlSchema() error { newSchema, err := graphql.NewSchema(graphql.SchemaConfig{ Query: graphql.NewObject(graphql.ObjectConfig{ - Name: "PrivateNameForQuery", // we must keep those name unique to avoid collision with objects having the same names + Name: "PrivateNameForQuery", Fields: rootQueryFields, }), Mutation: graphql.NewObject(graphql.ObjectConfig{ @@ -352,7 +352,7 @@ func (g *Gateway) convertSwaggerTypeToGraphQL(schema spec.Schema, typePrefix str // Check if type is already being processed if processingTypes[refKey] { // Return existing type to prevent infinite recursion - if existingType, exists := g.typesCache[refKey]; exists { + if existingType, exists := g.typesCache[refKey]; exists && existingType != nil { existingInputType := g.inputTypesCache[refKey] return existingType, existingInputType, nil } @@ -418,10 +418,15 @@ func (g *Gateway) handleObjectFieldSpecType(fieldSpec spec.Schema, typePrefix st typeName := g.generateTypeName(typePrefix, fieldPath) // Check if type already generated - if existingType, exists := g.typesCache[typeName]; exists { + if existingType, exists := g.typesCache[typeName]; exists && existingType != nil { return existingType, g.inputTypesCache[typeName], nil } + // If type is being processed (nil in cache), return placeholder to prevent recursion + if _, exists := g.typesCache[typeName]; exists { + return graphql.String, graphql.String, nil + } + // Store placeholder to prevent recursion g.typesCache[typeName] = nil g.inputTypesCache[typeName] = nil diff --git a/tests/gateway_test/testdata/crd/core.platform-mesh.io_accounts.yaml b/tests/gateway_test/testdata/crd/core.platform-mesh.io_accounts.yaml index 8842274..b10939a 100644 --- a/tests/gateway_test/testdata/crd/core.platform-mesh.io_accounts.yaml +++ b/tests/gateway_test/testdata/crd/core.platform-mesh.io_accounts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: accounts.core.platform-mesh.io spec: group: core.platform-mesh.io @@ -110,16 +110,8 @@ spec: properties: conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -160,12 +152,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string From dfa9b29163839624222eaac667e1d1d3fae5dad8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 9 Oct 2025 17:48:00 +0000 Subject: [PATCH 08/13] fix(deps): update golang.org/x/exp digest to d2f985d (#71) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index b71e93b..0f18d59 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 - golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 + golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b golang.org/x/text v0.30.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.3 diff --git a/go.sum b/go.sum index f049476..6b4d18b 100644 --- a/go.sum +++ b/go.sum @@ -261,8 +261,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= -golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A= -golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= +golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b h1:18qgiDvlvH7kk8Ioa8Ov+K6xCi0GMvmGfGW0sgd/SYA= +golang.org/x/exp v0.0.0-20251009144603-d2f985daa21b/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -298,8 +298,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From e332a37731bd666e725e236cedcfa844bd5992e8 Mon Sep 17 00:00:00 2001 From: Artem Shcherbatiuk Date: Fri, 10 Oct 2025 11:51:28 +0200 Subject: [PATCH 09/13] improved docs --- README.md | 35 +---- docs/authorization.md | 16 +- docs/clusteraccess-setup.md | 184 +++++++++++++++++++++++ docs/clusteraccess.md | 212 ++++++++++++--------------- docs/gateway.md | 28 ++-- docs/listener.md | 40 ++++- docs/local_test.md | 21 ++- docs/multicluster-kubeconfig-flow.md | 160 -------------------- docs/quickstart.md | 46 +++--- docs/subscriptions.md | 30 ++-- docs/virtual-workspaces.md | 2 +- hack/create-clusteraccess.sh | 73 ++++++++- 12 files changed, 464 insertions(+), 383 deletions(-) create mode 100644 docs/clusteraccess-setup.md delete mode 100644 docs/multicluster-kubeconfig-flow.md diff --git a/README.md b/README.md index 38ff767..4e9f24f 100644 --- a/README.md +++ b/README.md @@ -16,37 +16,17 @@ This repository contains two main components: - [Listener](./docs/listener.md): watches a cluster and stores its openAPI spec in a directory. - [Gateway](./docs/gateway.md): exposes the openAPI spec as a GraphQL endpoints. -## MultiCluster Support +## Operation Modes -The system supports three modes of operation: +The system supports two modes of operation: -1. **Single Cluster** (`ENABLE_KCP=false`, `MULTICLUSTER=false`): Gateway connects to the same cluster as the listener -2. **KCP Mode** (`ENABLE_KCP=true`): Designed for KCP-based multi-cluster scenarios -3. **MultiCluster Mode** (`ENABLE_KCP=false`, `MULTICLUSTER=true`): Gateway connects to multiple external clusters via ClusterAccess resources +1. **KCP Mode** (`ENABLE_KCP=true`): Designed for KCP-based multi-cluster scenarios + - See [Virtual Workspaces](./docs/virtual-workspaces.md) for advanced KCP configuration +2. **ClusterAccess Mode** (`ENABLE_KCP=false`): Designed for support of multiple standard clusters. -### MultiCluster with ClusterAccess +## ClusterAccess -In MultiCluster mode, the system uses ClusterAccess resources to store kubeconfig data and connection information. The listener processes these resources and embeds connection metadata into schema files, which the gateway then uses to establish cluster-specific connections. - -For complete setup instructions, see: -- [ClusterAccess documentation](./docs/clusteraccess.md) - Manual setup -- [MultiCluster Kubeconfig Flow](./docs/multicluster-kubeconfig-flow.md) - Detailed flow explanation - -### Quick Setup Scripts - -```bash -# Create ClusterAccess with secure token authentication -./scripts/create-clusteraccess.sh --target-kubeconfig ~/.kube/prod-config - -# Test end-to-end integration -./scripts/test-clusteraccess-integration.sh -``` - -### Gateway Requirements - -- **Single Cluster Mode**: Requires KUBECONFIG to connect to the local cluster -- **KCP Mode**: Requires KUBECONFIG to connect to KCP management cluster -- **MultiCluster Mode**: Does NOT require KUBECONFIG - gets all connection info from schema files +For detailed information, see [Clusteraccess](./docs/clusteraccess.md) section. ## Authorization @@ -71,3 +51,4 @@ If you find any bug that may be a security problem, please follow our instructio ## Licensing Copyright 2025 SAP SE or an SAP affiliate company and platform-mesh contributors. Please see our [LICENSE](LICENSE) for copyright and license information. Detailed information including third-party components and their licensing/copyright information is available [via the REUSE tool](https://api.reuse.software/info/github.com/platform-mesh/kubernetes-graphql-gateway). + diff --git a/docs/authorization.md b/docs/authorization.md index bf59bd8..d069072 100644 --- a/docs/authorization.md +++ b/docs/authorization.md @@ -3,7 +3,7 @@ All requests must contain an `Authorization` header with a valid Bearer token by default: ```shell { - "Authorization": $YOUR_TOKEN + "Authorization": "Bearer $YOUR_TOKEN" } ``` You can disable authorization by setting the following environment variable: @@ -14,21 +14,21 @@ This is useful for local development and testing purposes. ## Introspection authentication -By default, introspection requests (i.e. the requests that are made to fetch the GraphQL schema) are **not** protected by authorization. +By default, introspection requests (i.e., the requests that are made to fetch the GraphQL schema) are **not** protected by authorization. You can protect those requests by setting the following environment variable: ```shell -export INTROSPECTION_AUTHENTICATION=true +export GATEWAY_INTROSPECTION_AUTHENTICATION=true ``` -### Error fetching schema +### Error fetching schema in documentation explorer -When GraphiQL page is loaded, it makes a request to fetch the GraphQL schema and there is no way to add the `Authorization` header to that request. +When the GraphiQL page is loaded, it makes a request to fetch the GraphQL schema, and there is no way to add the `Authorization` header to that request. We have this [issue](https://github.com/openmfp/kubernetes-graphql-gateway/issues/217) open to fix this. But for now, you can use the following workaround: 1. Open the GraphiQL page in your browser. -2. Add the `Authorization` header in the `Headers` section of the GraphiQL user interface like so: -3. Press `Re-fetch GraphQL schema` button in the left sidebar(third button from the top). -4. Now the GraphQL schema should be fetched, and you can use the GraphiQL interface as usual. +2. Add the `Authorization` header in the `Headers` section of the GraphiQL user interface. +3. Press the `Re-fetch GraphQL schema` button in the left sidebar (third button from the top). +4. The GraphQL schema should now be fetched, and you can use the GraphiQL interface as usual. diff --git a/docs/clusteraccess-setup.md b/docs/clusteraccess-setup.md new file mode 100644 index 0000000..e39cfed --- /dev/null +++ b/docs/clusteraccess-setup.md @@ -0,0 +1,184 @@ +# ClusterAccess Resource Setup + +To enable the gateway to access external Kubernetes clusters, you need to create ClusterAccess resources. This section provides both an automated script and manual step-by-step instructions. + +## Quick Setup (Recommended) + +For development purposes, use the provided script to automatically create ClusterAccess resources: + +**Example:** +```bash +./hack/create-clusteraccess.sh --target-kubeconfig ~/.kube/platform-mesh-config --management-kubeconfig ~/.kube/platform-mesh-config +``` + +The script will: +- Extract cluster name, server URL, and CA certificate from the target kubeconfig +- Create a ServiceAccount with cluster-admin access in the target cluster +- Generate a long-lived token for the ServiceAccount +- Create the admin kubeconfig and CA secrets in the management cluster +- Create the ClusterAccess resource with kubeconfig-based authentication +- Output a copy-paste ready bearer token for direct API access + +## Manual Setup + +## Prerequisites + +- Access to the target cluster (the cluster you want to expose via GraphQL) +- Access to the management cluster (the cluster where the gateway runs) +- ClusterAccess CRDs installed in the management cluster +- Target cluster kubeconfig file + +## Step 1: Create ServiceAccount with Admin Access in Target Cluster + +```bash +# Switch to target cluster +export KUBECONFIG=/path/to/target-cluster-kubeconfig + +# Create ServiceAccount with cluster-admin access +cat </graphql` which will be used to query the resources of that workspace. -It watches for changes in the directory and update the schema accordingly. +Each file in that directory corresponds to a KCP workspace or a target cluster (via ClusterAccess). +It creates a separate URL for each file, like `//graphql` which will be used to query the resources of that workspace or cluster. +It watches for changes in the directory and updates the schema accordingly. So, if there are two files in the directory - `root` and `root:alpha`, then we will have two URLs: - `http://localhost:3000/root/graphql` @@ -15,13 +15,23 @@ See example queries in the [Queries Examples](./quickstart.md#first-steps-and-ba ## Packages Overview -### Workspace Manager -Holds the logic for watching a directory, triggering schema generation, and binding it to an HTTP handler. +### Manager (`gateway/manager/`) -### Schema +Manages the gateway lifecycle and cluster connections: +- **Watcher**: Watches the definitions directory for schema file changes +- **Target Cluster**: Manages cluster registry and GraphQL endpoint routing +- **Round Tripper**: Handles HTTP request routing and authentication -Is responsible for the conversion from OpenAPI spec into the GraphQL schema. +### Schema (`gateway/schema/`) -### Resolver +Converts OpenAPI specifications into GraphQL schemas: +- Generates GraphQL types from OpenAPI definitions +- Handles custom queries and relations between resources +- Manages scalar type mappings -Holds the logic of interaction with the cluster. +### Resolver (`gateway/resolver/`) + +Executes GraphQL queries against Kubernetes clusters: +- Resolves GraphQL queries to Kubernetes API calls +- Handles subscriptions for real-time updates +- Processes query arguments and filters diff --git a/docs/listener.md b/docs/listener.md index c0c8e29..761bd0f 100644 --- a/docs/listener.md +++ b/docs/listener.md @@ -1,6 +1,40 @@ # Listener -The Listener component is responsible for watching a Kubernetes cluster and generating OpenAPI specifications for discovered resources. +The Listener component is responsible for watching Kubernetes clusters and generating OpenAPI specifications for discovered resources. It stores these specifications in a directory, which can then be used by the [Gateway](./gateway.md) component to expose them as GraphQL endpoints. -The Listener creates a separate file for each KCP workspace in the specified directory. -The Gateway will then watch this directory for changes and update the GraphQL schema accordingly. + +In **KCP mode**, it creates a separate file for each KCP workspace. In **ClusterAccess mode**, it creates a file for each ClusterAccess resource representing a target cluster. + +The Gateway watches this directory for changes and updates the GraphQL schema accordingly. + +## Packages Overview + +### Reconciler (`listener/reconciler/`) + +Contains reconciliation logic for different operational modes: + +#### ClusterAccess Reconciler (`reconciler/clusteraccess/`) +- Watches ClusterAccess resources in the management cluster +- Connects to target clusters using kubeconfig-based authentication +- Generates schema files with embedded cluster connection metadata +- Injects `x-cluster-metadata` into schema files for gateway consumption + +#### KCP Reconciler (`reconciler/kcp/`) +- Watches APIBinding resources in KCP workspaces +- Discovers virtual workspaces and their API resources +- Handles cluster path resolution for KCP workspace hierarchies +- Generates schema files for each workspace + +### Packages (`listener/pkg/`) + +Supporting packages for schema generation: + +#### API Schema (`pkg/apischema/`) +- Builds OpenAPI specifications from Kubernetes API resources +- Resolves Custom Resource Definitions (CRDs) +- Converts Kubernetes API schemas to OpenAPI format +- Handles resource relationships and dependencies + +#### Workspace File (`pkg/workspacefile/`) +- Manages reading and writing schema files to the definitions directory +- Handles file I/O operations for schema persistence diff --git a/docs/local_test.md b/docs/local_test.md index 7d75180..957374a 100644 --- a/docs/local_test.md +++ b/docs/local_test.md @@ -1,21 +1,20 @@ # Test Locally -**Warning!** This test is for those who have access to `helm-charts-priv`. - ## Run and check cluster 1. Create and run a cluster if it is not running yet. ```shell -git clone https://github.com/platform-mesh/helm-charts-priv.git -cd helm-charts-priv +git clone https://github.com/platform-mesh/helm-charts.git +cd helm-charts task local-setup ``` +If this task fails, you can try to run `task local-setup:iterate` to complete it. 2. Verify that the cluster is running. -Run k9s, go to `:pods`. All pods must have a status of "Running". -It may take some time before they are all ready. The possible issues may be insufficient RAM and/or CPU cores. In this case, increase the limits in Docker settings. +Run k9s and go to `:pods`. All pods should have a status of "Running". +It may take some time before they are all ready. Possible issues include insufficient RAM and/or CPU cores. In this case, increase the limits in Docker settings. 3. In k9s, go to `:pods`, then open pod `kubernetes-graphql-gateway-...`. @@ -23,7 +22,7 @@ Open container `kubernetes-graphql-gateway-gateway` to see the logs. The logs must contain more than a single line (with "Starting server..."). If you see only this single line, the problem might be in the container called "kubernetes-graphql-gateway-listener". -Note the `IMAGE` column, corresponding to the two `kubernetes-...` container. It contains the name and the currently used version of the build, i.e. +Note the image name from one of the `kubernetes-...` containers. It contains the name and the currently used version of the build, e.g.: ``` ghcr.io/platform-mesh/kubernetes-graphql-gateway:v0.75.1 ``` @@ -33,11 +32,11 @@ ghcr.io/platform-mesh/kubernetes-graphql-gateway:v0.75.1 task docker ``` -5. Tag the newly built image with the version used in local-setup -- that image is going to be replaced with the one built on step 4. +5. Tag the newly built image with the version used in local-setup: ```shell docker tag ghcr.io/platform-mesh/kubernetes-graphql-gateway:latest ghcr.io/platform-mesh/kubernetes-graphql-gateway:v0.75.1 ``` -Use the name you and version got from the `IMAGE` column on step 3. Leave the version number unchanged. +Use the name and version you got from the `IMAGE` column in step 3. 6. Check your cluster name: ```shell @@ -50,7 +49,7 @@ In this example, the cluster name is `platform-mesh`. ```shell kind load docker-image ghcr.io/platform-mesh/kubernetes-graphql-gateway:v0.75.1 -n platform-mesh ``` -The argument `-n platform-mesh` is to change the default value of the cluster name, which is `kind`. +The argument `-n platform-mesh` targets the platform-mesh kind cluster. ***Podman-based kind:*** - Pull (or build) the image locally with Podman: @@ -70,7 +69,7 @@ kind load image-archive kubernetes-graphql-gateway_v0.75.1.tar -n platform-mesh 8. In k9s, go to `:pods` and delete the pod (not the container) called `kubernetes-graphql-gateway-...`. -Kubernetes will immediately recreate the pod -- but this time it will use the new version of the build. +Kubernetes will immediately recreate the pod - but this time it will use the new version of the build. 9. Once the pod is recreated, go to [https://portal.dev.local:8443](https://portal.dev.local:8443) and check if everything works fine. diff --git a/docs/multicluster-kubeconfig-flow.md b/docs/multicluster-kubeconfig-flow.md deleted file mode 100644 index 53682fe..0000000 --- a/docs/multicluster-kubeconfig-flow.md +++ /dev/null @@ -1,160 +0,0 @@ -# MultiCluster Kubeconfig Flow - -This document explains how the kubeconfig storage and usage flow works when `ENABLE_KCP=false` and `MULTICLUSTER=true`. - -## Overview - -The system is designed to work in the following way: - -1. **ClusterAccess Resources**: Store connection information for target clusters, including kubeconfig data -2. **Listener**: Processes ClusterAccess resources and generates schema files with embedded connection metadata -3. **Gateway**: Reads schema files and uses embedded metadata to connect to specific clusters - -## Flow Details - -### 1. ClusterAccess Resource Creation - -```yaml -apiVersion: gateway.platform-mesh.io/v1alpha1 -kind: ClusterAccess -metadata: - name: my-target-cluster -spec: - path: my-target-cluster # Used as schema filename - host: https://my-cluster-api-server:6443 - auth: - kubeconfigSecretRef: - name: my-cluster-kubeconfig - namespace: default - ca: - secretRef: - name: my-cluster-ca - namespace: default - key: ca.crt -``` - -### 2. Listener Processing - -When running with `ENABLE_KCP=false` and `MULTICLUSTER=true`: - -```bash -export ENABLE_KCP=false -export MULTICLUSTER=true -export KUBECONFIG=/path/to/management-cluster-config -./listener -``` - -The listener: -- Uses the `ClusterAccessReconciler` -- Watches for ClusterAccess resources -- For each ClusterAccess: - - Extracts cluster connection info (host, auth, CA) - - Connects to the target cluster to discover API schema - - Generates schema JSON with Kubernetes API definitions - - Injects `x-cluster-metadata` with connection information - - Saves schema file to `definitions/{cluster-name}.json` - -### 3. Schema File Structure - -Generated schema files contain: - -```json -{ - "definitions": { - // ... Kubernetes API definitions - }, - "x-cluster-metadata": { - "host": "https://my-cluster-api-server:6443", - "path": "my-target-cluster", - "auth": { - "type": "kubeconfig", - "kubeconfig": "base64-encoded-kubeconfig" - }, - "ca": { - "data": "base64-encoded-ca-cert" - } - } -} -``` - -### 4. Gateway Usage - -When running the gateway with `ENABLE_KCP=false` and `MULTICLUSTER=true`: - -```bash -export ENABLE_KCP=false -export MULTICLUSTER=true -# NOTE: KUBECONFIG is NOT needed for gateway in multicluster mode -./gateway -``` - -The gateway: -- Watches the definitions directory for schema files -- For each schema file: - - Reads the `x-cluster-metadata` section - - Creates a `rest.Config` using the embedded connection info - - Establishes a Kubernetes client connection to the target cluster - - Serves GraphQL API at `/{cluster-name}/graphql` -- **Does NOT require KUBECONFIG** - all connection info comes from schema files - -## Authentication Methods Supported - -### 1. Bearer Token -```yaml -auth: - secretRef: - name: my-cluster-token - namespace: default - key: token -``` - -### 2. Kubeconfig -```yaml -auth: - kubeconfigSecretRef: - name: my-cluster-kubeconfig - namespace: default -``` - -### 3. Client Certificates -```yaml -auth: - clientCertificateRef: - name: my-cluster-certs - namespace: default -``` - -## Key Benefits - -1. **Centralized Management**: All cluster access is managed through ClusterAccess resources -2. **Secure Storage**: Credentials stored in Kubernetes secrets -3. **Automatic Discovery**: API schemas automatically discovered from target clusters -4. **Standard Patterns**: Uses `ctrl.GetConfigOrDie()` pattern for configuration loading -5. **Simple Gateway Logic**: Gateway doesn't need complex certificate/token handling - -## Testing - -Use the provided integration test: - -```bash -./scripts/test-clusteraccess-integration.sh -``` - -This test verifies the end-to-end flow with kubeconfig-based authentication. - -## Troubleshooting - -### Schema files not generated -- Check that ClusterAccess CRD is installed: `kubectl apply -f config/crd/` -- Verify ClusterAccess resources exist: `kubectl get clusteraccess` -- Check listener logs for connection errors to target clusters - -### Gateway not connecting to clusters -- Verify schema files contain `x-cluster-metadata` -- Check gateway logs for authentication errors -- Ensure credentials in secrets are valid - -### Connection errors -- Verify target cluster URLs are accessible -- Check CA certificates are correct -- Validate authentication credentials have required permissions \ No newline at end of file diff --git a/docs/quickstart.md b/docs/quickstart.md index f4e37a1..2987e67 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -1,34 +1,26 @@ # Quick Start -This page shows you how to get started to use the GraphQL Gateway for Kubernetes. +This page shows you how to get started using the GraphQL Gateway for Kubernetes. ## Prerequisites - Installed [Golang](https://go.dev/doc/install) - Installed [Taskfile](https://taskfile.dev/installation) - A Kubernetes cluster to connect to (some options below) - - Option A: Prexisting standard Kuberentes cluster - - Option B: Preexisting Kuberentes cluster that is available through [Kuberentes Control Plane (KCP)](https://docs.kcp.io/kcp/main/setup/quickstart/) - - Option C: Create your own locally running Kuberentes cluster using [kind](https://kind.sigs.k8s.io/) + - Option A: Preexisting standard Kubernetes cluster + - Option B: Preexisting Kubernetes cluster that is available through [Kubernetes Control Plane (KCP)](https://docs.kcp.io/kcp/main/setup/quickstart/) + - Option C: Create your own locally running Kubernetes cluster using [kind](https://kind.sigs.k8s.io/) - Clone the `kubernetes-graphql-gateway` repository and change to the root directory ```shell -git clone git@github.com:openmfp/kubernetes-graphql-gateway.git && cd kubernetes-graphql-gateway +git clone https://github.com/platform-mesh/kubernetes-graphql-gateway.git && cd kubernetes-graphql-gateway ``` +## Operation Modes -## Setup the environment: -```shell -# this will disable authorization -export LOCAL_DEVELOPMENT=true -# kcp is enabled by default, in case you want to run it against a standard Kubernetes cluster -export ENABLE_KCP=false -# you must point to the config of the cluster you want to run against -export KUBECONFIG=YOUR_KUBECONFIG_PATH -``` - +Don't skip this step. Please go to the [operation modes](../README.md#operation-modes) page and complete the required setup. ## Running the Listener -Make sure you have done steps from the [setup section](#setup-the-environment). +Make sure you have completed the steps from the [Prerequisites](#prerequisites) and [Operation Modes](#operation-modes) sections. ```shell task listener @@ -39,9 +31,9 @@ The file will contain the API definitions for the resources in that workspace. ## Running the Gateway -Make sure you have done steps from the [setup section](#setup-the-environment). +Make sure you have completed the steps from the [Prerequisites](#prerequisites) section. -In the root directory of the `kubernetes-graphql-gateway` repository, open a new shell and run the Graphql gateway as follows: +In the root directory of the `kubernetes-graphql-gateway` repository, open a new shell and run the GraphQL gateway as follows: ```shell task gateway ``` @@ -52,21 +44,21 @@ Check the console output to get the localhost URL of the GraphQL playground. ## First Steps and Basic Examples -As said above, the GraphQL Gateway allows you do CRUD operations on any of the Kubernetes resources in the cluster. -You may checkout the following copy & paste examples to get started: -- Examples on [CRUD operations on ConfigMaps](./configmap_queries.md). -- Examples on [CRUD operations on Pods](./pod_queries.md). -- Subscribe to events using [Subscriptions](./subscriptions.md). -- There are also [Custom Queries](./custom_queries.md) that go beyond what. +As mentioned above, the GraphQL Gateway allows you to do CRUD operations on any of the Kubernetes resources in the cluster. +You may check out the following copy & paste examples to get started: +- Examples on [CRUD operations on ConfigMaps](./configmap_queries.md) +- Examples on [CRUD operations on Pods](./pod_queries.md) +- Subscribe to events using [Subscriptions](./subscriptions.md) +- There are also [Custom Queries](./custom_queries.md) that go beyond standard CRUD operations -## Authorization with Remote Kuberenetes Clusters +## Authorization with Remote Kubernetes Clusters -If you run the GraphQL gateway with an shell environment that sets `LOCAL_DEVELOPMENT=false`, you need to add the `Authorization` header to any of your GraphQL queries you are executing. +If you run the GraphQL gateway with a shell environment that sets `LOCAL_DEVELOPMENT=false`, you need to add the `Authorization` header to any of your GraphQL queries you are executing. When using the GraphQL playground, you can add the header in the `Headers` section of the playground user interface like so: ```shell { - "Authorization": "YOUR_TOKEN" + "Authorization": "Bearer YOUR_TOKEN" } ``` diff --git a/docs/subscriptions.md b/docs/subscriptions.md index 11f1261..12c6cbf 100644 --- a/docs/subscriptions.md +++ b/docs/subscriptions.md @@ -1,33 +1,33 @@ # Subscriptions To subscribe to events, you should use the SSE (Server-Sent Events) protocol. -Since GraphQL playground doesn't support (see [Quick Start Guide](./quickstart.md)) we won't use the GraphQL playground to execute the queries. -Instead we use the `curl` command line tool to execute the queries. +Since GraphQL playground doesn't support SSE (see [Quick Start Guide](./quickstart.md)), we won't use the GraphQL playground to execute the queries. +Instead, we use the `curl` command line tool to execute the queries. ## Prerequisites ```shell -export GRAPHQL_URL=http://localhost:8080/root/graphql # update with your actual GraphQL endpoint -export AUTHORIZATION_TOKEN= # update this with your token, if LOCAL_DEVELOPMENT=false +export GRAPHQL_URL=http://localhost:8080/root/graphql # Update with your actual GraphQL endpoint +export AUTHORIZATION_TOKEN="Bearer " # Update with your token if LOCAL_DEVELOPMENT=false ``` ## Parameters -- `subscribeToAll`: if true, any field change will be sent to the client. +- `subscribeToAll`: If true, any field change will be sent to the client. Otherwise, only fields defined within the `{}` brackets will be listened to. -Please note that only fields specified in `{}` brackets will be returned, even if `subscribeToAll: true` +**Note:** Only fields specified in `{}` brackets will be returned, even if `subscribeToAll: true`. -### Return parameters +### Return Parameters -- `data` field contains the data returned by the subscription. -- `errors` field contains the errors if any occurred during the subscription. +- `data`: Contains the data returned by the subscription. +- `errors`: Contains the errors if any occurred during the subscription. ## Subscribe to the ConfigMap Resource ConfigMap is present in both KCP and standard Kubernetes clusters, so we can use it right away without any additional setup. -After subscription, you can run mutations from [Configmap Queries](./configmap_queries.md) to see the changes in the subscription. +After subscription, you can run mutations from [ConfigMap Queries](./configmap_queries.md) to see the changes in the subscription. -### Subscribe to a Change of a Data Field in All ConfigMaps: +### Subscribe to a Change of a Data Field in All ConfigMaps ```shell curl \ -H "Accept: text/event-stream" \ @@ -36,7 +36,7 @@ curl \ -d '{"query": "subscription { core_configmaps { metadata { name } data }}"}' \ $GRAPHQL_URL ``` -### Subscribe to a Change of a Data Field in a Specific ConfigMap: +### Subscribe to a Change of a Data Field in a Specific ConfigMap ```shell curl \ @@ -47,9 +47,7 @@ curl \ $GRAPHQL_URL ``` -### Subscribe to a Change of All Fields in a Specific ConfigMap: - -Please note that only fields specified in `{}` brackets will be returned, even if `subscribeToAll: true` +### Subscribe to a Change of All Fields in a Specific ConfigMap ```shell curl \ @@ -86,7 +84,7 @@ curl \ $GRAPHQL_URL ``` -### Subscribe to a Change of a DisplayName Field in All Accounts +### Subscribe to a Change of All Fields in a Specific Account ```shell curl \ -H "Accept: text/event-stream" \ diff --git a/docs/virtual-workspaces.md b/docs/virtual-workspaces.md index 07b530a..3b691ad 100644 --- a/docs/virtual-workspaces.md +++ b/docs/virtual-workspaces.md @@ -21,7 +21,7 @@ virtualWorkspaces: - `virtualWorkspaces`: Array of virtual workspace definitions - `name`: Unique identifier for the virtual workspace (used in URL paths) - `url`: Full URL to the virtual workspace or API export - - `kubeconfig`: path to kcp kubeconfig + - `kubeconfig`: Path to KCP kubeconfig ## Environment Variables diff --git a/hack/create-clusteraccess.sh b/hack/create-clusteraccess.sh index 8686613..a370f2b 100755 --- a/hack/create-clusteraccess.sh +++ b/hack/create-clusteraccess.sh @@ -128,6 +128,9 @@ ensure_crd_installed() { cleanup_existing_resources() { log_info "Checking for existing ClusterAccess resource '$CLUSTER_NAME'..." + SA_NAME="kubernetes-graphql-gateway-admin" + SA_NAMESPACE="default" + # Check if ClusterAccess exists in management cluster if KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl get clusteraccess "$CLUSTER_NAME" &>/dev/null; then log_warn "ClusterAccess '$CLUSTER_NAME' already exists. Cleaning up existing resources..." @@ -146,6 +149,14 @@ cleanup_existing_resources() { else log_info "No existing ClusterAccess found. Creating new resources..." fi + + # Clean up ServiceAccount and related resources in target cluster + if KUBECONFIG="$TARGET_KUBECONFIG" kubectl get serviceaccount "$SA_NAME" -n "$SA_NAMESPACE" &>/dev/null; then + log_info "Cleaning up existing ServiceAccount and related resources in target cluster..." + KUBECONFIG="$TARGET_KUBECONFIG" kubectl delete secret "${SA_NAME}-token" -n "$SA_NAMESPACE" --ignore-not-found=true + KUBECONFIG="$TARGET_KUBECONFIG" kubectl delete clusterrolebinding "${SA_NAME}-cluster-admin" --ignore-not-found=true + KUBECONFIG="$TARGET_KUBECONFIG" kubectl delete serviceaccount "$SA_NAME" -n "$SA_NAMESPACE" --ignore-not-found=true + fi } log_info "Creating ClusterAccess resource '$CLUSTER_NAME'" @@ -200,6 +211,51 @@ if ! KUBECONFIG="$MANAGEMENT_KUBECONFIG" kubectl cluster-info &>/dev/null; then fi log_info "Management cluster is accessible" +# Create ServiceAccount with admin access in target cluster +log_info "Creating ServiceAccount with admin access in target cluster..." +SA_NAME="kubernetes-graphql-gateway-admin" +SA_NAMESPACE="default" + +cat < Date: Tue, 14 Oct 2025 13:34:56 +0200 Subject: [PATCH 10/13] removed testing script due for the simplicity of support --- docs/clusteraccess.md | 10 -- hack/test-clusteraccess-integration.sh | 197 ------------------------- 2 files changed, 207 deletions(-) delete mode 100755 hack/test-clusteraccess-integration.sh diff --git a/docs/clusteraccess.md b/docs/clusteraccess.md index a1afcb2..c68d5a6 100644 --- a/docs/clusteraccess.md +++ b/docs/clusteraccess.md @@ -105,16 +105,6 @@ The gateway: - Serves GraphQL API at `/{cluster-name}/graphql` - **Does NOT require KUBECONFIG** - all connection info comes from schema files -## Testing - -Use the provided integration test: - -```bash -./scripts/test-clusteraccess-integration.sh -``` - -This test verifies the end-to-end flow with kubeconfig-based authentication. - ## Troubleshooting ### Schema files not generated diff --git a/hack/test-clusteraccess-integration.sh b/hack/test-clusteraccess-integration.sh deleted file mode 100755 index e1b9411..0000000 --- a/hack/test-clusteraccess-integration.sh +++ /dev/null @@ -1,197 +0,0 @@ -#!/bin/bash - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -log_info() { - echo -e "${GREEN}[INFO]${NC} $1" -} - -log_warn() { - echo -e "${YELLOW}[WARN]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -log_step() { - echo -e "${BLUE}[STEP]${NC} $1" -} - -# Test configuration -TEST_CLUSTER_NAME="test-cluster" -MANAGEMENT_KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}" -DEFINITIONS_DIR="./bin/definitions" - -log_info "Testing ClusterAccess integration with kubeconfig storage" -log_info "Management kubeconfig: $MANAGEMENT_KUBECONFIG" -log_info "Definitions directory: $DEFINITIONS_DIR" - -# Verify prerequisites -log_step "1. Verifying prerequisites" - -if ! kubectl --kubeconfig="$MANAGEMENT_KUBECONFIG" cluster-info &>/dev/null; then - log_error "Cannot connect to management cluster" - exit 1 -fi - -if ! kubectl --kubeconfig="$MANAGEMENT_KUBECONFIG" get clusteraccess &>/dev/null; then - log_error "ClusterAccess CRD not installed. Please run: kubectl apply -f config/crd/" - exit 1 -fi - -log_info "Prerequisites verified" - -# Create test kubeconfig secret -log_step "2. Creating test kubeconfig secret" - -# Use the same kubeconfig for testing (in real scenarios this would be different) -KUBECONFIG_B64=$(base64 -w 0 < "$MANAGEMENT_KUBECONFIG") - -cat </dev/null; then - log_info "ClusterAccess resource exists" -else - log_error "ClusterAccess resource not found" - exit 1 -fi - -# Start listener to process ClusterAccess -log_step "6. Starting listener to process ClusterAccess" - -export ENABLE_KCP=false -export LOCAL_DEVELOPMENT=false -export MULTICLUSTER=true -export KUBECONFIG="$MANAGEMENT_KUBECONFIG" -export OPENAPI_DEFINITIONS_PATH="$DEFINITIONS_DIR" - -log_info "Starting listener with ENABLE_KCP=false, MULTICLUSTER=true" -log_info "This should use the ClusterAccess reconciler..." - -# Run listener in background for a short time to generate schema -timeout 30s go run . listener || true - -# Check if schema file was generated -log_step "7. Checking if schema file was generated" - -SCHEMA_FILE="$DEFINITIONS_DIR/${TEST_CLUSTER_NAME}.json" -if [ -f "$SCHEMA_FILE" ]; then - log_info "Schema file generated: $SCHEMA_FILE" - - # Check if it contains x-cluster-metadata - if grep -q "x-cluster-metadata" "$SCHEMA_FILE"; then - log_info "Schema file contains x-cluster-metadata ✓" - - # Show the metadata - log_info "Cluster metadata:" - jq '.["x-cluster-metadata"]' "$SCHEMA_FILE" 2>/dev/null || echo " (Could not parse metadata)" - else - log_warn "Schema file does not contain x-cluster-metadata" - fi -else - log_error "Schema file not generated: $SCHEMA_FILE" - exit 1 -fi - -# Test gateway reading the schema -log_step "8. Testing gateway configuration" - -export ENABLE_KCP=false -export LOCAL_DEVELOPMENT=false -export MULTICLUSTER=true -# NOTE: KUBECONFIG not needed for gateway in multicluster mode -unset KUBECONFIG -export OPENAPI_DEFINITIONS_PATH="$DEFINITIONS_DIR" -export GATEWAY_PORT=17080 - -log_info "Starting gateway with the generated schema..." -log_info "Gateway should read x-cluster-metadata and connect to the specified cluster" -log_info "KUBECONFIG is NOT needed for gateway in multicluster mode" - -# Start gateway in background for a short test -timeout 10s go run . gateway & -GATEWAY_PID=$! - -# Wait a bit for gateway to start -sleep 3 - -# Test gateway endpoint -log_step "9. Testing gateway endpoint" -if curl -s "http://localhost:$GATEWAY_PORT/${TEST_CLUSTER_NAME}/graphql" -H "Content-Type: application/json" -d '{"query": "{ __schema { types { name } } }"}' | grep -q "data"; then - log_info "Gateway endpoint responds correctly ✓" -else - log_warn "Gateway endpoint test failed or timed out" -fi - -# Cleanup -log_step "10. Cleanup" - -# Kill gateway if still running -if kill -0 $GATEWAY_PID 2>/dev/null; then - kill $GATEWAY_PID 2>/dev/null || true -fi - -# Remove test resources -kubectl --kubeconfig="$MANAGEMENT_KUBECONFIG" delete clusteraccess "$TEST_CLUSTER_NAME" --ignore-not-found=true -kubectl --kubeconfig="$MANAGEMENT_KUBECONFIG" delete secret "${TEST_CLUSTER_NAME}-kubeconfig" --ignore-not-found=true - -# Remove generated schema -rm -f "$SCHEMA_FILE" - -log_info "Cleanup completed" -log_info "Integration test completed successfully!" - -echo "" -log_info "Summary:" -echo " ✓ ClusterAccess reconciler processes kubeconfig-based authentication" -echo " ✓ Schema files are generated with x-cluster-metadata" -echo " ✓ Gateway reads x-cluster-metadata for cluster-specific connections" -echo " ✓ End-to-end integration works with ENABLE_KCP=false and MULTICLUSTER=true" \ No newline at end of file From cde201ff70d4c9a7ec9693bfcad0cfe0e941c4ef Mon Sep 17 00:00:00 2001 From: makdeniss Date: Tue, 14 Oct 2025 16:21:25 +0300 Subject: [PATCH 11/13] feat: cluster access definitions deletion (#69) --- .../reconciler/clusteraccess/subroutines.go | 57 ++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/listener/reconciler/clusteraccess/subroutines.go b/listener/reconciler/clusteraccess/subroutines.go index 3310709..4387999 100644 --- a/listener/reconciler/clusteraccess/subroutines.go +++ b/listener/reconciler/clusteraccess/subroutines.go @@ -17,6 +17,8 @@ import ( "github.com/platform-mesh/kubernetes-graphql-gateway/listener/reconciler" ) +const lastSchemaFilenameAnnotation = "platform-mesh.io/last-schema-filename" + // generateSchemaSubroutine processes ClusterAccess resources and generates schemas type generateSchemaSubroutine struct { reconciler *ClusterAccessReconciler @@ -72,13 +74,36 @@ func (s *generateSchemaSubroutine) Process(ctx context.Context, instance runtime return ctrl.Result{}, commonserrors.NewOperatorError(err, false, false) } + // Before writing, check if the schema filename changed compared to previous reconcile. + ann := clusterAccess.GetAnnotations() + if ann == nil { + ann = map[string]string{} + } + prevFile := ann[lastSchemaFilenameAnnotation] + if prevFile != "" && prevFile != clusterName { + if err := s.reconciler.ioHandler.Delete(prevFile); err != nil { + s.reconciler.log.Warn().Err(err).Str("prevFile", prevFile).Str("clusterAccess", clusterAccessName).Msg("failed to delete previous schema file; continuing") + } else { + s.reconciler.log.Info().Str("deletedFile", prevFile).Str("clusterAccess", clusterAccessName).Msg("deleted previous schema file after path change") + } + } + // Write schema to file using cluster name from path or resource name if err := s.reconciler.ioHandler.Write(schemaWithMetadata, clusterName); err != nil { s.reconciler.log.Error().Err(err).Str("clusterAccess", clusterAccessName).Msg("failed to write schema") return ctrl.Result{}, commonserrors.NewOperatorError(err, false, false) } - s.reconciler.log.Info().Str("clusterAccess", clusterAccessName).Msg("successfully processed ClusterAccess resource") + // Update the annotation to reflect the current schema filename + if prevFile != clusterName { + ann[lastSchemaFilenameAnnotation] = clusterName + clusterAccess.SetAnnotations(ann) + if err := s.reconciler.opts.Client.Update(ctx, clusterAccess); err != nil { + s.reconciler.log.Warn().Err(err).Str("clusterAccess", clusterAccessName).Msg("failed to update annotation with last schema filename") + } + } + + s.reconciler.log.Info().Str("clusterAccess", clusterAccessName).Str("schemaFile", clusterName).Msg("successfully processed ClusterAccess resource") return ctrl.Result{}, nil } @@ -97,6 +122,36 @@ func (s *generateSchemaSubroutine) restMapperFromConfig(cfg *rest.Config) (meta. } func (s *generateSchemaSubroutine) Finalize(ctx context.Context, instance runtimeobject.RuntimeObject) (ctrl.Result, commonserrors.OperatorError) { + clusterAccess, ok := instance.(*gatewayv1alpha1.ClusterAccess) + if !ok { + s.reconciler.log.Error().Msg("instance is not a ClusterAccess resource in Finalize") + return ctrl.Result{}, commonserrors.NewOperatorError(errors.New("invalid resource type"), false, false) + } + + // Determine which file to delete: prefer the recorded annotation, fallback to computed name + ann := clusterAccess.GetAnnotations() + var filename string + if ann != nil && ann[lastSchemaFilenameAnnotation] != "" { + filename = ann[lastSchemaFilenameAnnotation] + } else { + // compute from spec.path or name + filename = clusterAccess.Spec.Path + if filename == "" { + filename = clusterAccess.GetName() + } + } + + if filename == "" { + return ctrl.Result{}, nil + } + + if err := s.reconciler.ioHandler.Delete(filename); err != nil { + s.reconciler.log.Warn().Err(err).Str("clusterAccess", clusterAccess.GetName()).Str("file", filename).Msg("failed to delete schema file on finalize") + // Do not block finalization + return ctrl.Result{}, nil + } + + s.reconciler.log.Info().Str("clusterAccess", clusterAccess.GetName()).Str("file", filename).Msg("deleted schema file on finalize") return ctrl.Result{}, nil } From 9d9185bac70376bec6222c00a96a1ac9a59614b0 Mon Sep 17 00:00:00 2001 From: makdeniss Date: Wed, 15 Oct 2025 17:41:03 +0300 Subject: [PATCH 12/13] feat: refine cluster access definitions deletion (#69) --- .../reconciler/clusteraccess/subroutines.go | 86 +++++++++++-------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/listener/reconciler/clusteraccess/subroutines.go b/listener/reconciler/clusteraccess/subroutines.go index 4387999..6c8ba9e 100644 --- a/listener/reconciler/clusteraccess/subroutines.go +++ b/listener/reconciler/clusteraccess/subroutines.go @@ -17,7 +17,11 @@ import ( "github.com/platform-mesh/kubernetes-graphql-gateway/listener/reconciler" ) -const lastSchemaFilenameAnnotation = "platform-mesh.io/last-schema-filename" +// generateSchemaSubroutine processes ClusterAccess resources and generates schemas +const ( + finalizerName = "gateway.openmfp.org/clusteraccess-finalizer" + lastSchemaPathAnnotation = "gateway.openmfp.org/last-schema-path" +) // generateSchemaSubroutine processes ClusterAccess resources and generates schemas type generateSchemaSubroutine struct { @@ -74,17 +78,15 @@ func (s *generateSchemaSubroutine) Process(ctx context.Context, instance runtime return ctrl.Result{}, commonserrors.NewOperatorError(err, false, false) } - // Before writing, check if the schema filename changed compared to previous reconcile. - ann := clusterAccess.GetAnnotations() - if ann == nil { - ann = map[string]string{} + // If path changed, delete the old schema file referenced in the annotation + prevPath := "" + if ann := clusterAccess.GetAnnotations(); ann != nil { + prevPath = ann[lastSchemaPathAnnotation] } - prevFile := ann[lastSchemaFilenameAnnotation] - if prevFile != "" && prevFile != clusterName { - if err := s.reconciler.ioHandler.Delete(prevFile); err != nil { - s.reconciler.log.Warn().Err(err).Str("prevFile", prevFile).Str("clusterAccess", clusterAccessName).Msg("failed to delete previous schema file; continuing") - } else { - s.reconciler.log.Info().Str("deletedFile", prevFile).Str("clusterAccess", clusterAccessName).Msg("deleted previous schema file after path change") + if prevPath != "" && prevPath != clusterName { + if err := s.reconciler.ioHandler.Delete(prevPath); err != nil { + // Log and continue; do not fail reconciliation on cleanup issues + s.reconciler.log.Warn().Err(err).Str("previousPath", prevPath).Str("clusterAccess", clusterAccessName).Msg("failed to delete previous schema file") } } @@ -94,16 +96,24 @@ func (s *generateSchemaSubroutine) Process(ctx context.Context, instance runtime return ctrl.Result{}, commonserrors.NewOperatorError(err, false, false) } - // Update the annotation to reflect the current schema filename - if prevFile != clusterName { - ann[lastSchemaFilenameAnnotation] = clusterName - clusterAccess.SetAnnotations(ann) - if err := s.reconciler.opts.Client.Update(ctx, clusterAccess); err != nil { - s.reconciler.log.Warn().Err(err).Str("clusterAccess", clusterAccessName).Msg("failed to update annotation with last schema filename") + // Ensure annotation reflects the current path for future cleanups + needUpdate := prevPath != clusterName + if needUpdate { + obj := clusterAccess.DeepCopy() + if obj.Annotations == nil { + obj.Annotations = map[string]string{} + } + obj.Annotations[lastSchemaPathAnnotation] = clusterName + if err := s.reconciler.opts.Client.Update(ctx, obj); err != nil { + // Log but do not fail reconciliation; file has been written already + s.reconciler.log.Warn().Err(err).Str("clusterAccess", clusterAccessName).Msg("failed to update last schema path annotation") + } else { + // Reflect update locally too to avoid future confusion in this reconcile loop + clusterAccess.Annotations = obj.Annotations } } - s.reconciler.log.Info().Str("clusterAccess", clusterAccessName).Str("schemaFile", clusterName).Msg("successfully processed ClusterAccess resource") + s.reconciler.log.Info().Str("clusterAccess", clusterAccessName).Msg("successfully processed ClusterAccess resource") return ctrl.Result{}, nil } @@ -128,30 +138,30 @@ func (s *generateSchemaSubroutine) Finalize(ctx context.Context, instance runtim return ctrl.Result{}, commonserrors.NewOperatorError(errors.New("invalid resource type"), false, false) } - // Determine which file to delete: prefer the recorded annotation, fallback to computed name - ann := clusterAccess.GetAnnotations() - var filename string - if ann != nil && ann[lastSchemaFilenameAnnotation] != "" { - filename = ann[lastSchemaFilenameAnnotation] - } else { - // compute from spec.path or name - filename = clusterAccess.Spec.Path - if filename == "" { - filename = clusterAccess.GetName() - } + // Determine current and previously used paths + currentPath := clusterAccess.Spec.Path + if currentPath == "" { + currentPath = clusterAccess.GetName() } - - if filename == "" { - return ctrl.Result{}, nil + prevPath := "" + if ann := clusterAccess.GetAnnotations(); ann != nil { + prevPath = ann[lastSchemaPathAnnotation] } - if err := s.reconciler.ioHandler.Delete(filename); err != nil { - s.reconciler.log.Warn().Err(err).Str("clusterAccess", clusterAccess.GetName()).Str("file", filename).Msg("failed to delete schema file on finalize") - // Do not block finalization - return ctrl.Result{}, nil + // Try deleting current path file + if currentPath != "" { + if err := s.reconciler.ioHandler.Delete(currentPath); err != nil { + // Log and continue; do not block finalization just because file was missing or deletion failed + s.reconciler.log.Warn().Err(err).Str("path", currentPath).Str("clusterAccess", clusterAccess.GetName()).Msg("failed to delete schema file during finalization") + } + } + // If previous differs, try deleting it as well + if prevPath != "" && prevPath != currentPath { + if err := s.reconciler.ioHandler.Delete(prevPath); err != nil { + s.reconciler.log.Warn().Err(err).Str("path", prevPath).Str("clusterAccess", clusterAccess.GetName()).Msg("failed to delete previous schema file during finalization") + } } - s.reconciler.log.Info().Str("clusterAccess", clusterAccess.GetName()).Str("file", filename).Msg("deleted schema file on finalize") return ctrl.Result{}, nil } @@ -160,5 +170,5 @@ func (s *generateSchemaSubroutine) GetName() string { } func (s *generateSchemaSubroutine) Finalizers() []string { - return nil + return []string{finalizerName} } From 6ab9646d76866e86364f4eac71bfa223107c1360 Mon Sep 17 00:00:00 2001 From: makdeniss Date: Thu, 16 Oct 2025 15:08:13 +0300 Subject: [PATCH 13/13] feat: add tests for cluster access definitions deletion and edit (#69) --- .../clusteraccess/subroutines_test.go | 122 ++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 listener/reconciler/clusteraccess/subroutines_test.go diff --git a/listener/reconciler/clusteraccess/subroutines_test.go b/listener/reconciler/clusteraccess/subroutines_test.go new file mode 100644 index 0000000..7b3cf78 --- /dev/null +++ b/listener/reconciler/clusteraccess/subroutines_test.go @@ -0,0 +1,122 @@ +package clusteraccess + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/platform-mesh/golang-commons/logger" + gatewayv1alpha1 "github.com/platform-mesh/kubernetes-graphql-gateway/common/apis/v1alpha1" + workspacefile_mocks "github.com/platform-mesh/kubernetes-graphql-gateway/listener/pkg/workspacefile/mocks" + "github.com/platform-mesh/kubernetes-graphql-gateway/listener/reconciler" +) + +func TestGenerateSchemaSubroutine_Process_InvalidResourceType(t *testing.T) { + mockIO := workspacefile_mocks.NewMockIOHandler(t) + log, _ := logger.New(logger.DefaultConfig()) + + r := &ClusterAccessReconciler{ + ioHandler: mockIO, + log: log, + } + s := &generateSchemaSubroutine{reconciler: r} + + _, opErr := s.Process(context.Background(), &metav1.PartialObjectMetadata{}) + + assert.NotNil(t, opErr) +} + +func TestGenerateSchemaSubroutine_Process_MissingHostReturnsError(t *testing.T) { + scheme := runtime.NewScheme() + _ = gatewayv1alpha1.AddToScheme(scheme) + + ca := &gatewayv1alpha1.ClusterAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Annotations: map[string]string{}, + }, + Spec: gatewayv1alpha1.ClusterAccessSpec{ + // Host is intentionally empty to trigger validation error + }, + } + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(ca).Build() + + mockIO := workspacefile_mocks.NewMockIOHandler(t) + mockIO.EXPECT().Write(mock.Anything, mock.Anything).Maybe().Return(nil) + mockIO.EXPECT().Delete(mock.Anything).Maybe().Return(nil) + + log, _ := logger.New(logger.DefaultConfig()) + + r := &ClusterAccessReconciler{ + ioHandler: mockIO, + log: log, + opts: reconciler.ReconcilerOpts{ + Client: fakeClient, + Config: &rest.Config{Host: "https://unit-test.invalid"}, + ManagerOpts: ctrl.Options{Scheme: scheme}, + Scheme: scheme, + }, + } + s := &generateSchemaSubroutine{reconciler: r} + + res, opErr := s.Process(context.Background(), ca) + + assert.NotNil(t, opErr) + assert.Equal(t, ctrl.Result{}, res) +} + +func TestGenerateSchemaSubroutine_Finalize_DeletesCurrentAndPreviousPaths(t *testing.T) { + mockIO := workspacefile_mocks.NewMockIOHandler(t) + log, _ := logger.New(logger.DefaultConfig()) + + // Expect deletion of both current and previous paths + mockIO.EXPECT().Delete("current-path").Return(nil).Once() + mockIO.EXPECT().Delete("previous-path").Return(nil).Once() + + r := &ClusterAccessReconciler{ + ioHandler: mockIO, + log: log, + } + s := &generateSchemaSubroutine{reconciler: r} + + ca := &gatewayv1alpha1.ClusterAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-resource", + Annotations: map[string]string{ + lastSchemaPathAnnotation: "previous-path", + }, + }, + Spec: gatewayv1alpha1.ClusterAccessSpec{ + Path: "current-path", + }, + } + + res, opErr := s.Finalize(context.Background(), ca) + + assert.Nil(t, opErr) + assert.Equal(t, ctrl.Result{}, res) +} + +func TestGenerateSchemaSubroutine_restMapperFromConfig_SucceedsWithMinimalConfig(t *testing.T) { + mockIO := workspacefile_mocks.NewMockIOHandler(t) + log, _ := logger.New(logger.DefaultConfig()) + + r := &ClusterAccessReconciler{ + ioHandler: mockIO, + log: log, + } + s := &generateSchemaSubroutine{reconciler: r} + + rm, err := s.restMapperFromConfig(&rest.Config{}) + + assert.NotNil(t, rm) + assert.NoError(t, err) +}