Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

📝 Refactor the structure of the global hub agent #1156

Merged
merged 13 commits into from
Oct 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 20 additions & 21 deletions agent/Containerfile.operator
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ COPY go.mod go.sum ./
COPY ./agent/ ./agent/
COPY ./pkg/ ./pkg/

RUN CGO_ENABLED=1 GOFLAGS="-p=4" go build -tags strictfipsruntime -a -v -o bin/agent ./agent/cmd/agent/main.go
RUN CGO_ENABLED=1 GOFLAGS="-p=4" go build -tags strictfipsruntime -a -v -o bin/agent ./agent/cmd/main.go

# Stage 2: Copy the binaries from the image builder to the base image
FROM registry.access.redhat.com/ubi9/ubi-minimal:latest
Expand All @@ -29,22 +29,22 @@ ARG IMAGE_SUMMARY
ARG IMAGE_OPENSHIFT_TAGS

LABEL org.label-schema.vendor="Red Hat" \
org.label-schema.name="$IMAGE_NAME_ARCH" \
org.label-schema.description="$IMAGE_DESCRIPTION" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url=$VCS_URL \
org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \
org.label-schema.schema-version="1.0" \
name="$IMAGE_NAME" \
maintainer="$IMAGE_MAINTAINER" \
vendor="$IMAGE_VENDOR" \
version="$IMAGE_VERSION" \
release="$IMAGE_RELEASE" \
description="$IMAGE_DESCRIPTION" \
summary="$IMAGE_SUMMARY" \
io.k8s.display-name="$IMAGE_DISPLAY_NAME" \
io.k8s.description="$IMAGE_DESCRIPTION" \
io.openshift.tags="$IMAGE_OPENSHIFT_TAGS"
org.label-schema.name="$IMAGE_NAME_ARCH" \
org.label-schema.description="$IMAGE_DESCRIPTION" \
org.label-schema.vcs-ref=$VCS_REF \
org.label-schema.vcs-url=$VCS_URL \
org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \
org.label-schema.schema-version="1.0" \
name="$IMAGE_NAME" \
maintainer="$IMAGE_MAINTAINER" \
vendor="$IMAGE_VENDOR" \
version="$IMAGE_VERSION" \
release="$IMAGE_RELEASE" \
description="$IMAGE_DESCRIPTION" \
summary="$IMAGE_SUMMARY" \
io.k8s.display-name="$IMAGE_DISPLAY_NAME" \
io.k8s.description="$IMAGE_DESCRIPTION" \
io.openshift.tags="$IMAGE_OPENSHIFT_TAGS"

ENV USER_UID=1001
ENV USER_NAME=agent
Expand All @@ -53,10 +53,9 @@ ENV USER_NAME=agent
COPY --from=builder /workspace/bin/agent /usr/local/bin/agent

COPY ./agent/scripts/user_setup /usr/local/scripts/user_setup
RUN /usr/local/scripts/user_setup
RUN /usr/local/scripts/user_setup

RUN microdnf update -y && \
microdnf clean all
RUN microdnf update -y && microdnf clean all

USER ${USER_UID}
ENTRYPOINT ["/usr/local/bin/agent"]
ENTRYPOINT ["/usr/local/bin/agent"]
7 changes: 3 additions & 4 deletions agent/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ COPY go.mod go.sum ./
COPY ./agent/ ./agent/
COPY ./pkg/ ./pkg/

RUN go build -o bin/agent ./agent/cmd/agent/main.go
RUN go build -o bin/agent ./agent/cmd/main.go
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to update the downstream for this change


# Stage 2: Copy the binaries from the image builder to the base image
FROM registry.access.redhat.com/ubi9/ubi-minimal:latest
Expand All @@ -20,10 +20,9 @@ ENV USER_NAME=agent
COPY --from=builder /workspace/bin/agent /usr/local/bin/agent

COPY ./agent/scripts/user_setup /usr/local/scripts/user_setup
RUN /usr/local/scripts/user_setup
RUN /usr/local/scripts/user_setup

RUN microdnf update -y && \
microdnf clean all
RUN microdnf update -y && microdnf clean all

USER ${USER_UID}
ENTRYPOINT ["/usr/local/bin/agent"]
2 changes: 1 addition & 1 deletion agent/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ manifests: controller-gen

.PHONY: build ##builds the binary
build:
@go build -o bin/${COMPONENT} cmd/agent/main.go
@go build -o bin/${COMPONENT} cmd/main.go

.PHONY: clean ##cleans the build directories
clean:
Expand Down
26 changes: 26 additions & 0 deletions agent/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Multicluster Global Hub Agent

The **Global Hub Agent** component is responsible for applying resources to the hub cluster (spec path) and reporting the resource status to the Global Hub Manager via Kafka. It also synchronizes events with the Inventory API (status path). Additionally, the agent can be run in [standalone mode](./../doc/event-exporter/README.md), which only enables the status path feature.

## Structure

- **agent**
- **cmd**: Command-line utilities for the agent.
- **pkg**: Contains the core logic and functionalities.
- **configs**: Holds configurations, schemas, and related assets.
- **controllers**: Common controllers, including initialization, cluster claim, and lease controllers.
- **inventory**: The controllers that report resources via the inventory API.
- **spec**:
- **rbac**: Manages role-based access control.
- **syncers**: Syncs resources and signals from the Global Hub Manager.
- **workers**: Backend goroutines that execute tasks received from the spec syncers.
- **status**:
- **filter**: Deduplicates events when reporting resource statuses.
- **generic**: Common implementations for the status syncer.
- **controller**: Specifies the types of resources to be synced.
- **handler**: Updates the bundle synced to the manager by the watched resources.
- **emitter**: Sends the bundle created/updated by the handler to the transport layer (e.g., via CloudEvents).
- **multi-event syncer**: A template for sending multiple events related to a single object, such as the policy syncer.
- **multi-object syncer**: A template for sending one event related to multiple objects, such as the managedhub info syncer.
- **interfaces**: Defines the behaviors for the Controller, Handler, and Emitter.
- **syncers**: Specifies the resources to be synced, following templates provided by the generic syncers.
34 changes: 13 additions & 21 deletions agent/cmd/agent/main.go → agent/cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log/zap"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"

"github.com/stolostron/multicluster-global-hub/agent/pkg/config"
"github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
"github.com/stolostron/multicluster-global-hub/agent/pkg/controllers"
statusconfig "github.com/stolostron/multicluster-global-hub/agent/pkg/status/controller/config"
"github.com/stolostron/multicluster-global-hub/pkg/constants"
"github.com/stolostron/multicluster-global-hub/pkg/jobs"
commonobjects "github.com/stolostron/multicluster-global-hub/pkg/objects"
Expand Down Expand Up @@ -60,7 +59,7 @@ func main() {
restConfig.QPS = agentConfig.QPS
restConfig.Burst = agentConfig.Burst

c, err := client.New(restConfig, client.Options{Scheme: config.GetRuntimeScheme()})
c, err := client.New(restConfig, client.Options{Scheme: configs.GetRuntimeScheme()})
if err != nil {
setupLog.Error(err, "failed to int controller runtime client")
os.Exit(1)
Expand All @@ -82,7 +81,7 @@ func doTermination(ctx context.Context, c client.Client) int {
}

// function to handle defers with exit, see https://stackoverflow.com/a/27629493/553720.
func doMain(ctx context.Context, restConfig *rest.Config, agentConfig *config.AgentConfig, c client.Client) int {
func doMain(ctx context.Context, restConfig *rest.Config, agentConfig *configs.AgentConfig, c client.Client) int {
if err := completeConfig(ctx, c, agentConfig); err != nil {
setupLog.Error(err, "failed to get managed hub configuration from command line flags")
return 1
Expand All @@ -106,8 +105,8 @@ func doMain(ctx context.Context, restConfig *rest.Config, agentConfig *config.Ag
return 0
}

func parseFlags() *config.AgentConfig {
agentConfig := &config.AgentConfig{
func parseFlags() *configs.AgentConfig {
agentConfig := &configs.AgentConfig{
ElectionConfig: &commonobjects.LeaderElectionConfig{},
TransportConfig: &transport.TransportInternalConfig{
// IsManager specifies the send/receive topics from specTopic and statusTopic
Expand Down Expand Up @@ -162,7 +161,7 @@ func parseFlags() *config.AgentConfig {
return agentConfig
}

func completeConfig(ctx context.Context, c client.Client, agentConfig *config.AgentConfig) error {
func completeConfig(ctx context.Context, c client.Client, agentConfig *configs.AgentConfig) error {
if !agentConfig.Standalone && agentConfig.LeafHubName == "" {
return fmt.Errorf("the leaf-hub-name must not be empty")
}
Expand All @@ -181,7 +180,6 @@ func completeConfig(ctx context.Context, c client.Client, agentConfig *config.Ag
}
agentConfig.LeafHubName = clusterID
}
statusconfig.SetLeafHubName(agentConfig.LeafHubName)

if agentConfig.MetricsAddress == "" {
agentConfig.MetricsAddress = fmt.Sprintf("%s:%d", metricsHost, metricsPort)
Expand All @@ -196,11 +194,11 @@ func completeConfig(ctx context.Context, c client.Client, agentConfig *config.Ag
return fmt.Errorf("flag consumer-worker-pool-size should be in the scope [1, 100]")
}
}
config.SetAgentConfig(agentConfig)
configs.SetAgentConfig(agentConfig)
return nil
}

func createManager(restConfig *rest.Config, agentConfig *config.AgentConfig) (
func createManager(restConfig *rest.Config, agentConfig *configs.AgentConfig) (
ctrl.Manager, error,
) {
leaseDuration := time.Duration(agentConfig.ElectionConfig.LeaseDuration) * time.Second
Expand All @@ -223,7 +221,7 @@ func createManager(restConfig *rest.Config, agentConfig *config.AgentConfig) (
BindAddress: agentConfig.MetricsAddress,
},
LeaderElection: true,
Scheme: config.GetRuntimeScheme(),
Scheme: configs.GetRuntimeScheme(),
LeaderElectionConfig: leaderElectionConfig,
LeaderElectionID: leaderElectionLockID,
LeaderElectionNamespace: agentConfig.PodNamespace,
Expand Down Expand Up @@ -253,16 +251,10 @@ func createManager(restConfig *rest.Config, agentConfig *config.AgentConfig) (
}

// if the transport consumer and producer is ready then the func will be invoked by the transport controller
func transportCallback(mgr ctrl.Manager, agentConfig *config.AgentConfig,
func transportCallback(mgr ctrl.Manager, agentConfig *configs.AgentConfig,
) controller.TransportCallback {
return func(transportClient transport.TransportClient) error {
// Need this controller to update the value of clusterclaim hub.open-cluster-management.io
// we use the value to decide whether install the ACM or not
if err := controllers.AddHubClusterClaimController(mgr); err != nil {
return fmt.Errorf("failed to add hub.open-cluster-management.io clusterclaim controller: %w", err)
}

if err := controllers.AddCRDController(mgr, mgr.GetConfig(), agentConfig, transportClient); err != nil {
if err := controllers.AddInitController(mgr, mgr.GetConfig(), agentConfig, transportClient); err != nil {
return fmt.Errorf("failed to add crd controller: %w", err)
}

Expand All @@ -284,9 +276,9 @@ func initCache(restConfig *rest.Config, cacheOpts cache.Options) (cache.Cache, e
&clusterv1beta1.PlacementDecision{}: {},
&appsv1alpha1.SubscriptionReport{}: {},
&coordinationv1.Lease{}: {
Field: fields.OneTermEqualSelector("metadata.namespace", config.GetAgentConfig().PodNamespace),
Field: fields.OneTermEqualSelector("metadata.namespace", configs.GetAgentConfig().PodNamespace),
},
&corev1.Event{}: {}, // TODO: need a filter for the target events
&corev1.Event{}: {},
}
return cache.New(restConfig, cacheOpts)
}
27 changes: 14 additions & 13 deletions agent/cmd/agent/main_test.go → agent/cmd/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

"github.com/stolostron/multicluster-global-hub/agent/pkg/config"
"github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
config "github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
"github.com/stolostron/multicluster-global-hub/pkg/transport"
)

Expand All @@ -38,14 +39,14 @@ func TestParseFlags(t *testing.T) {
func TestCompleteConfig(t *testing.T) {
testCases := []struct {
name string
agentConfig *config.AgentConfig
agentConfig *configs.AgentConfig
fakeClient client.Client
expectConfig *config.AgentConfig
expectConfig *configs.AgentConfig
expectErrorMsg string
}{
{
name: "Invalid leaf-hub-name without standalone mode",
agentConfig: &config.AgentConfig{
agentConfig: &configs.AgentConfig{
LeafHubName: "",
Standalone: false,
},
Expand All @@ -57,7 +58,7 @@ func TestCompleteConfig(t *testing.T) {
},
{
name: "Empty leaf-hub-name(clusterId) with standalone mode",
agentConfig: &config.AgentConfig{
agentConfig: &configs.AgentConfig{
LeafHubName: "",
Standalone: true,
},
Expand All @@ -69,7 +70,7 @@ func TestCompleteConfig(t *testing.T) {
},
{
name: "Invalid leaf-hub-name(clusterId) under standalone mode",
agentConfig: &config.AgentConfig{
agentConfig: &configs.AgentConfig{
LeafHubName: "",
Standalone: true,
},
Expand All @@ -78,7 +79,7 @@ func TestCompleteConfig(t *testing.T) {
},
{
name: "Valid configuration under standalone mode",
agentConfig: &config.AgentConfig{
agentConfig: &configs.AgentConfig{
LeafHubName: "",
Standalone: true,
TransportConfig: &transport.TransportInternalConfig{
Expand All @@ -90,7 +91,7 @@ func TestCompleteConfig(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "version"},
Spec: configv1.ClusterVersionSpec{ClusterID: configv1.ClusterID("123")},
}).Build(),
expectConfig: &config.AgentConfig{
expectConfig: &configs.AgentConfig{
LeafHubName: "123",
Standalone: true,
SpecWorkPoolSize: 0,
Expand All @@ -103,19 +104,19 @@ func TestCompleteConfig(t *testing.T) {
},
{
name: "Invalid work pool size",
agentConfig: &config.AgentConfig{
agentConfig: &configs.AgentConfig{
LeafHubName: "hub1",
Standalone: false,
TransportConfig: &transport.TransportInternalConfig{
TransportType: string(transport.Kafka),
},
},
fakeClient: fake.NewClientBuilder().WithScheme(config.GetRuntimeScheme()).WithObjects().Build(),
fakeClient: fake.NewClientBuilder().WithScheme(configs.GetRuntimeScheme()).WithObjects().Build(),
expectErrorMsg: "flag consumer-worker-pool-size should be in the scope [1, 100]",
},
{
name: "Valid configuration without standalone mode",
agentConfig: &config.AgentConfig{
agentConfig: &configs.AgentConfig{
LeafHubName: "hub1",
Standalone: false,
SpecWorkPoolSize: 5,
Expand All @@ -124,7 +125,7 @@ func TestCompleteConfig(t *testing.T) {
},
},
fakeClient: fake.NewClientBuilder().WithScheme(config.GetRuntimeScheme()).WithObjects().Build(),
expectConfig: &config.AgentConfig{
expectConfig: &configs.AgentConfig{
LeafHubName: "hub1",
Standalone: false,
SpecWorkPoolSize: 5,
Expand Down Expand Up @@ -152,7 +153,7 @@ func TestCompleteConfig(t *testing.T) {
}

func TestDoMain(t *testing.T) {
agentConfig := &config.AgentConfig{
agentConfig := &configs.AgentConfig{
LeafHubName: "hub1",
Standalone: false,
SpecWorkPoolSize: 0,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package config
package configs

import (
"time"
Expand Down Expand Up @@ -36,6 +36,10 @@ func GetAgentConfig() *AgentConfig {
return agentConfigData
}

func GetLeafHubName() string {
return agentConfigData.LeafHubName
}

var mchVersion string

func GetMCHVersion() string {
Expand Down
2 changes: 1 addition & 1 deletion agent/pkg/config/scheme.go → agent/pkg/configs/scheme.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Copyright (c) 2023 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project

package config
package configs

import (
configv1 "github.com/openshift/api/config/v1"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"

"github.com/stolostron/multicluster-global-hub/agent/pkg/config"
"github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
"github.com/stolostron/multicluster-global-hub/pkg/constants"
)

Expand All @@ -28,7 +28,7 @@ type versionClusterClaimController struct {
log logr.Logger
}

// TODO: consider to unify the hub and version claim in one controller
// consider to unify the hub and version claim in one controller
func (c *versionClusterClaimController) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
reqLogger := c.log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.V(2).Info("cluster claim controller", "NamespacedName:", request.NamespacedName)
Expand All @@ -40,7 +40,7 @@ func (c *versionClusterClaimController) Reconcile(ctx context.Context, request c
}

if mch != nil && mch.Status.CurrentVersion != "" {
config.SetMCHVersion(mch.Status.CurrentVersion)
configs.SetMCHVersion(mch.Status.CurrentVersion)
return ctrl.Result{}, updateClusterClaim(ctx, c.client,
constants.VersionClusterClaimName, mch.Status.CurrentVersion)
}
Expand All @@ -58,6 +58,7 @@ func AddVersionClusterClaimController(mgr ctrl.Manager) error {
constants.GlobalHubOwnerLabelKey: constants.GHAgentOwnerLabelValue,
},
})

err := ctrl.NewControllerManagedBy(mgr).Named("clusterclaim-controller").
For(&clustersv1alpha1.ClusterClaim{}, builder.WithPredicates(clusterClaimPredicate)).
Watches(&mchv1.MultiClusterHub{}, &handler.EnqueueRequestForObject{}).
Expand Down
Loading
Loading