Skip to content

Commit 2063132

Browse files
committed
spec
Signed-off-by: myan <[email protected]> add Signed-off-by: myan <[email protected]> format agent Signed-off-by: myan <[email protected]> add the document Signed-off-by: myan <[email protected]> document Signed-off-by: myan <[email protected]>
1 parent 131e005 commit 2063132

File tree

89 files changed

+1694
-1786
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

89 files changed

+1694
-1786
lines changed

agent/Containerfile.operator

+21-21
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ COPY go.mod go.sum ./
1010
COPY ./agent/ ./agent/
1111
COPY ./pkg/ ./pkg/
1212

13-
RUN CGO_ENABLED=1 GOFLAGS="-p=4" go build -tags strictfipsruntime -a -v -o bin/agent ./agent/cmd/agent/main.go
13+
RUN CGO_ENABLED=1 GOFLAGS="-p=4" go build -tags strictfipsruntime -a -v -o bin/agent ./agent/cmd/main.go
1414

1515
# Stage 2: Copy the binaries from the image builder to the base image
1616
FROM registry.access.redhat.com/ubi9/ubi-minimal:latest
@@ -29,22 +29,22 @@ ARG IMAGE_SUMMARY
2929
ARG IMAGE_OPENSHIFT_TAGS
3030

3131
LABEL org.label-schema.vendor="Red Hat" \
32-
org.label-schema.name="$IMAGE_NAME_ARCH" \
33-
org.label-schema.description="$IMAGE_DESCRIPTION" \
34-
org.label-schema.vcs-ref=$VCS_REF \
35-
org.label-schema.vcs-url=$VCS_URL \
36-
org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \
37-
org.label-schema.schema-version="1.0" \
38-
name="$IMAGE_NAME" \
39-
maintainer="$IMAGE_MAINTAINER" \
40-
vendor="$IMAGE_VENDOR" \
41-
version="$IMAGE_VERSION" \
42-
release="$IMAGE_RELEASE" \
43-
description="$IMAGE_DESCRIPTION" \
44-
summary="$IMAGE_SUMMARY" \
45-
io.k8s.display-name="$IMAGE_DISPLAY_NAME" \
46-
io.k8s.description="$IMAGE_DESCRIPTION" \
47-
io.openshift.tags="$IMAGE_OPENSHIFT_TAGS"
32+
org.label-schema.name="$IMAGE_NAME_ARCH" \
33+
org.label-schema.description="$IMAGE_DESCRIPTION" \
34+
org.label-schema.vcs-ref=$VCS_REF \
35+
org.label-schema.vcs-url=$VCS_URL \
36+
org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \
37+
org.label-schema.schema-version="1.0" \
38+
name="$IMAGE_NAME" \
39+
maintainer="$IMAGE_MAINTAINER" \
40+
vendor="$IMAGE_VENDOR" \
41+
version="$IMAGE_VERSION" \
42+
release="$IMAGE_RELEASE" \
43+
description="$IMAGE_DESCRIPTION" \
44+
summary="$IMAGE_SUMMARY" \
45+
io.k8s.display-name="$IMAGE_DISPLAY_NAME" \
46+
io.k8s.description="$IMAGE_DESCRIPTION" \
47+
io.openshift.tags="$IMAGE_OPENSHIFT_TAGS"
4848

4949
ENV USER_UID=1001
5050
ENV USER_NAME=agent
@@ -53,10 +53,10 @@ ENV USER_NAME=agent
5353
COPY --from=builder /workspace/bin/agent /usr/local/bin/agent
5454

5555
COPY ./agent/scripts/user_setup /usr/local/scripts/user_setup
56-
RUN /usr/local/scripts/user_setup
56+
RUN /usr/local/scripts/user_setup
5757

58-
RUN microdnf update -y && \
59-
microdnf clean all
58+
RUN microdnf update -y &&
59+
microdnf clean all
6060

6161
USER ${USER_UID}
62-
ENTRYPOINT ["/usr/local/bin/agent"]
62+
ENTRYPOINT ["/usr/local/bin/agent"]

agent/Dockerfile

+3-4
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ COPY go.mod go.sum ./
99
COPY ./agent/ ./agent/
1010
COPY ./pkg/ ./pkg/
1111

12-
RUN go build -o bin/agent ./agent/cmd/agent/main.go
12+
RUN go build -o bin/agent ./agent/cmd/main.go
1313

1414
# Stage 2: Copy the binaries from the image builder to the base image
1515
FROM registry.access.redhat.com/ubi9/ubi-minimal:latest
@@ -20,10 +20,9 @@ ENV USER_NAME=agent
2020
COPY --from=builder /workspace/bin/agent /usr/local/bin/agent
2121

2222
COPY ./agent/scripts/user_setup /usr/local/scripts/user_setup
23-
RUN /usr/local/scripts/user_setup
23+
RUN /usr/local/scripts/user_setup
2424

25-
RUN microdnf update -y && \
26-
microdnf clean all
25+
RUN microdnf update -y && microdnf clean all
2726

2827
USER ${USER_UID}
2928
ENTRYPOINT ["/usr/local/bin/agent"]

agent/Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ manifests: controller-gen
3636

3737
.PHONY: build ##builds the binary
3838
build:
39-
@go build -o bin/${COMPONENT} cmd/agent/main.go
39+
@go build -o bin/${COMPONENT} cmd/main.go
4040

4141
.PHONY: clean ##cleans the build directories
4242
clean:

agent/README.md

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Multicluster Global Hub Agent
2+
3+
The **Global Hub Agent** component is responsible for applying resources to the hub cluster (spec path) and reporting the resource status to the Global Hub Manager via Kafka. It also synchronizes events with the Inventory API (status path). Additionally, the agent can be run in [standalone mode](./../doc/event-exporter/README.md), which only enables the status path feature.
4+
5+
## Structure
6+
7+
- **agent**
8+
- **cmd**: Command-line utilities for the agent.
9+
- **pkg**: Contains the core logic and functionalities.
10+
- **configs**: Holds configurations, schemas, and related assets.
11+
- **controllers**: Common controllers, including initialization, cluster claim, and lease controllers.
12+
- **spec**:
13+
- **rbac**: Manages role-based access control.
14+
- **syncers**: Syncs resources and signals from the Global Hub Manager.
15+
- **workers**: Backend goroutines that execute tasks received from the spec syncers.
16+
- **status**:
17+
- **filter**: Deduplicates events when reporting resource statuses.
18+
- **generic**: Common implementations for the status syncer.
19+
- **controller**: Specifies the types of resources to be synced.
20+
- **handler**: Updates the bundle synced to the manager by the watched resources.
21+
- **emitter**: Sends the bundle created/updated by the handler to the transport layer (e.g., via CloudEvents).
22+
- **multi-event syncer**: A template for sending multiple events related to a single object, such as the policy syncer.
23+
- **multi-object syncer**: A template for sending one event related to multiple objects, such as the managedhub info syncer.
24+
- **interfaces**: Defines the behaviors for the Controller, Handler, and Emitter.
25+
- **syncers**: Specifies the resources to be synced, following templates provided by the generic syncers.

agent/cmd/agent/main.go agent/cmd/main.go

+12-20
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,8 @@ import (
3030
"sigs.k8s.io/controller-runtime/pkg/log/zap"
3131
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
3232

33-
"github.com/stolostron/multicluster-global-hub/agent/pkg/config"
33+
"github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
3434
"github.com/stolostron/multicluster-global-hub/agent/pkg/controllers"
35-
statusconfig "github.com/stolostron/multicluster-global-hub/agent/pkg/status/controller/config"
3635
"github.com/stolostron/multicluster-global-hub/pkg/constants"
3736
"github.com/stolostron/multicluster-global-hub/pkg/jobs"
3837
commonobjects "github.com/stolostron/multicluster-global-hub/pkg/objects"
@@ -60,7 +59,7 @@ func main() {
6059
restConfig.QPS = agentConfig.QPS
6160
restConfig.Burst = agentConfig.Burst
6261

63-
c, err := client.New(restConfig, client.Options{Scheme: config.GetRuntimeScheme()})
62+
c, err := client.New(restConfig, client.Options{Scheme: configs.GetRuntimeScheme()})
6463
if err != nil {
6564
setupLog.Error(err, "failed to int controller runtime client")
6665
os.Exit(1)
@@ -82,7 +81,7 @@ func doTermination(ctx context.Context, c client.Client) int {
8281
}
8382

8483
// function to handle defers with exit, see https://stackoverflow.com/a/27629493/553720.
85-
func doMain(ctx context.Context, restConfig *rest.Config, agentConfig *config.AgentConfig, c client.Client) int {
84+
func doMain(ctx context.Context, restConfig *rest.Config, agentConfig *configs.AgentConfig, c client.Client) int {
8685
if err := completeConfig(ctx, c, agentConfig); err != nil {
8786
setupLog.Error(err, "failed to get managed hub configuration from command line flags")
8887
return 1
@@ -106,8 +105,8 @@ func doMain(ctx context.Context, restConfig *rest.Config, agentConfig *config.Ag
106105
return 0
107106
}
108107

109-
func parseFlags() *config.AgentConfig {
110-
agentConfig := &config.AgentConfig{
108+
func parseFlags() *configs.AgentConfig {
109+
agentConfig := &configs.AgentConfig{
111110
ElectionConfig: &commonobjects.LeaderElectionConfig{},
112111
TransportConfig: &transport.TransportInternalConfig{
113112
// IsManager specifies the send/receive topics from specTopic and statusTopic
@@ -162,7 +161,7 @@ func parseFlags() *config.AgentConfig {
162161
return agentConfig
163162
}
164163

165-
func completeConfig(ctx context.Context, c client.Client, agentConfig *config.AgentConfig) error {
164+
func completeConfig(ctx context.Context, c client.Client, agentConfig *configs.AgentConfig) error {
166165
if !agentConfig.Standalone && agentConfig.LeafHubName == "" {
167166
return fmt.Errorf("the leaf-hub-name must not be empty")
168167
}
@@ -181,7 +180,6 @@ func completeConfig(ctx context.Context, c client.Client, agentConfig *config.Ag
181180
}
182181
agentConfig.LeafHubName = clusterID
183182
}
184-
statusconfig.SetLeafHubName(agentConfig.LeafHubName)
185183

186184
if agentConfig.MetricsAddress == "" {
187185
agentConfig.MetricsAddress = fmt.Sprintf("%s:%d", metricsHost, metricsPort)
@@ -196,11 +194,11 @@ func completeConfig(ctx context.Context, c client.Client, agentConfig *config.Ag
196194
return fmt.Errorf("flag consumer-worker-pool-size should be in the scope [1, 100]")
197195
}
198196
}
199-
config.SetAgentConfig(agentConfig)
197+
configs.SetAgentConfig(agentConfig)
200198
return nil
201199
}
202200

203-
func createManager(restConfig *rest.Config, agentConfig *config.AgentConfig) (
201+
func createManager(restConfig *rest.Config, agentConfig *configs.AgentConfig) (
204202
ctrl.Manager, error,
205203
) {
206204
leaseDuration := time.Duration(agentConfig.ElectionConfig.LeaseDuration) * time.Second
@@ -223,7 +221,7 @@ func createManager(restConfig *rest.Config, agentConfig *config.AgentConfig) (
223221
BindAddress: agentConfig.MetricsAddress,
224222
},
225223
LeaderElection: true,
226-
Scheme: config.GetRuntimeScheme(),
224+
Scheme: configs.GetRuntimeScheme(),
227225
LeaderElectionConfig: leaderElectionConfig,
228226
LeaderElectionID: leaderElectionLockID,
229227
LeaderElectionNamespace: agentConfig.PodNamespace,
@@ -253,16 +251,10 @@ func createManager(restConfig *rest.Config, agentConfig *config.AgentConfig) (
253251
}
254252

255253
// if the transport consumer and producer is ready then the func will be invoked by the transport controller
256-
func transportCallback(mgr ctrl.Manager, agentConfig *config.AgentConfig,
254+
func transportCallback(mgr ctrl.Manager, agentConfig *configs.AgentConfig,
257255
) controller.TransportCallback {
258256
return func(transportClient transport.TransportClient) error {
259-
// Need this controller to update the value of clusterclaim hub.open-cluster-management.io
260-
// we use the value to decide whether install the ACM or not
261-
if err := controllers.AddHubClusterClaimController(mgr); err != nil {
262-
return fmt.Errorf("failed to add hub.open-cluster-management.io clusterclaim controller: %w", err)
263-
}
264-
265-
if err := controllers.AddCRDController(mgr, mgr.GetConfig(), agentConfig, transportClient); err != nil {
257+
if err := controllers.AddInitController(mgr, mgr.GetConfig(), agentConfig, transportClient); err != nil {
266258
return fmt.Errorf("failed to add crd controller: %w", err)
267259
}
268260

@@ -284,7 +276,7 @@ func initCache(restConfig *rest.Config, cacheOpts cache.Options) (cache.Cache, e
284276
&clusterv1beta1.PlacementDecision{}: {},
285277
&appsv1alpha1.SubscriptionReport{}: {},
286278
&coordinationv1.Lease{}: {
287-
Field: fields.OneTermEqualSelector("metadata.namespace", config.GetAgentConfig().PodNamespace),
279+
Field: fields.OneTermEqualSelector("metadata.namespace", configs.GetAgentConfig().PodNamespace),
288280
},
289281
&corev1.Event{}: {}, // TODO: need a filter for the target events
290282
}

agent/cmd/agent/main_test.go agent/cmd/main_test.go

+14-13
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@ import (
1111
"sigs.k8s.io/controller-runtime/pkg/client"
1212
"sigs.k8s.io/controller-runtime/pkg/client/fake"
1313

14-
"github.com/stolostron/multicluster-global-hub/agent/pkg/config"
14+
"github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
15+
config "github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
1516
"github.com/stolostron/multicluster-global-hub/pkg/transport"
1617
)
1718

@@ -38,14 +39,14 @@ func TestParseFlags(t *testing.T) {
3839
func TestCompleteConfig(t *testing.T) {
3940
testCases := []struct {
4041
name string
41-
agentConfig *config.AgentConfig
42+
agentConfig *configs.AgentConfig
4243
fakeClient client.Client
43-
expectConfig *config.AgentConfig
44+
expectConfig *configs.AgentConfig
4445
expectErrorMsg string
4546
}{
4647
{
4748
name: "Invalid leaf-hub-name without standalone mode",
48-
agentConfig: &config.AgentConfig{
49+
agentConfig: &configs.AgentConfig{
4950
LeafHubName: "",
5051
Standalone: false,
5152
},
@@ -57,7 +58,7 @@ func TestCompleteConfig(t *testing.T) {
5758
},
5859
{
5960
name: "Empty leaf-hub-name(clusterId) with standalone mode",
60-
agentConfig: &config.AgentConfig{
61+
agentConfig: &configs.AgentConfig{
6162
LeafHubName: "",
6263
Standalone: true,
6364
},
@@ -69,7 +70,7 @@ func TestCompleteConfig(t *testing.T) {
6970
},
7071
{
7172
name: "Invalid leaf-hub-name(clusterId) under standalone mode",
72-
agentConfig: &config.AgentConfig{
73+
agentConfig: &configs.AgentConfig{
7374
LeafHubName: "",
7475
Standalone: true,
7576
},
@@ -78,7 +79,7 @@ func TestCompleteConfig(t *testing.T) {
7879
},
7980
{
8081
name: "Valid configuration under standalone mode",
81-
agentConfig: &config.AgentConfig{
82+
agentConfig: &configs.AgentConfig{
8283
LeafHubName: "",
8384
Standalone: true,
8485
TransportConfig: &transport.TransportInternalConfig{
@@ -90,7 +91,7 @@ func TestCompleteConfig(t *testing.T) {
9091
ObjectMeta: metav1.ObjectMeta{Name: "version"},
9192
Spec: configv1.ClusterVersionSpec{ClusterID: configv1.ClusterID("123")},
9293
}).Build(),
93-
expectConfig: &config.AgentConfig{
94+
expectConfig: &configs.AgentConfig{
9495
LeafHubName: "123",
9596
Standalone: true,
9697
SpecWorkPoolSize: 0,
@@ -103,19 +104,19 @@ func TestCompleteConfig(t *testing.T) {
103104
},
104105
{
105106
name: "Invalid work pool size",
106-
agentConfig: &config.AgentConfig{
107+
agentConfig: &configs.AgentConfig{
107108
LeafHubName: "hub1",
108109
Standalone: false,
109110
TransportConfig: &transport.TransportInternalConfig{
110111
TransportType: string(transport.Kafka),
111112
},
112113
},
113-
fakeClient: fake.NewClientBuilder().WithScheme(config.GetRuntimeScheme()).WithObjects().Build(),
114+
fakeClient: fake.NewClientBuilder().WithScheme(configs.GetRuntimeScheme()).WithObjects().Build(),
114115
expectErrorMsg: "flag consumer-worker-pool-size should be in the scope [1, 100]",
115116
},
116117
{
117118
name: "Valid configuration without standalone mode",
118-
agentConfig: &config.AgentConfig{
119+
agentConfig: &configs.AgentConfig{
119120
LeafHubName: "hub1",
120121
Standalone: false,
121122
SpecWorkPoolSize: 5,
@@ -124,7 +125,7 @@ func TestCompleteConfig(t *testing.T) {
124125
},
125126
},
126127
fakeClient: fake.NewClientBuilder().WithScheme(config.GetRuntimeScheme()).WithObjects().Build(),
127-
expectConfig: &config.AgentConfig{
128+
expectConfig: &configs.AgentConfig{
128129
LeafHubName: "hub1",
129130
Standalone: false,
130131
SpecWorkPoolSize: 5,
@@ -152,7 +153,7 @@ func TestCompleteConfig(t *testing.T) {
152153
}
153154

154155
func TestDoMain(t *testing.T) {
155-
agentConfig := &config.AgentConfig{
156+
agentConfig := &configs.AgentConfig{
156157
LeafHubName: "hub1",
157158
Standalone: false,
158159
SpecWorkPoolSize: 0,

agent/pkg/config/agent_config.go agent/pkg/configs/agent_config.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package config
1+
package configs
22

33
import (
44
"time"
@@ -36,6 +36,10 @@ func GetAgentConfig() *AgentConfig {
3636
return agentConfigData
3737
}
3838

39+
func GetLeafHubName() string {
40+
return agentConfigData.LeafHubName
41+
}
42+
3943
var mchVersion string
4044

4145
func GetMCHVersion() string {

agent/pkg/config/scheme.go agent/pkg/configs/scheme.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// Copyright (c) 2023 Red Hat, Inc.
22
// Copyright Contributors to the Open Cluster Management project
33

4-
package config
4+
package configs
55

66
import (
77
configv1 "github.com/openshift/api/config/v1"

agent/pkg/controllers/version_clusterclaim_controller.go agent/pkg/controllers/clusterclaim_version_controller.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ import (
1717
"sigs.k8s.io/controller-runtime/pkg/handler"
1818
"sigs.k8s.io/controller-runtime/pkg/predicate"
1919

20-
"github.com/stolostron/multicluster-global-hub/agent/pkg/config"
20+
"github.com/stolostron/multicluster-global-hub/agent/pkg/configs"
2121
"github.com/stolostron/multicluster-global-hub/pkg/constants"
2222
)
2323

@@ -40,7 +40,7 @@ func (c *versionClusterClaimController) Reconcile(ctx context.Context, request c
4040
}
4141

4242
if mch != nil && mch.Status.CurrentVersion != "" {
43-
config.SetMCHVersion(mch.Status.CurrentVersion)
43+
configs.SetMCHVersion(mch.Status.CurrentVersion)
4444
return ctrl.Result{}, updateClusterClaim(ctx, c.client,
4545
constants.VersionClusterClaimName, mch.Status.CurrentVersion)
4646
}
@@ -58,6 +58,7 @@ func AddVersionClusterClaimController(mgr ctrl.Manager) error {
5858
constants.GlobalHubOwnerLabelKey: constants.GHAgentOwnerLabelValue,
5959
},
6060
})
61+
6162
err := ctrl.NewControllerManagedBy(mgr).Named("clusterclaim-controller").
6263
For(&clustersv1alpha1.ClusterClaim{}, builder.WithPredicates(clusterClaimPredicate)).
6364
Watches(&mchv1.MultiClusterHub{}, &handler.EnqueueRequestForObject{}).

0 commit comments

Comments
 (0)