Skip to content

Commit e8dff26

Browse files
committed
debug e2e
Signed-off-by: root <[email protected]>
1 parent 48cf882 commit e8dff26

File tree

19 files changed

+667
-29
lines changed

19 files changed

+667
-29
lines changed

agent/Makefile

-2
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,6 @@ clean-all: clean-vendor clean
4848
.PHONY: lint ##runs code analysis tools
4949
lint:
5050
go vet ./cmd/... ./pkg/...
51-
golint ./cmd/... ./pkg/...
52-
golangci-lint run ./cmd/... ./pkg/...
5351

5452
.PHONY: help ##show this help message
5553
help:

agent/pkg/spec/dispatcher.go

+16
Original file line numberDiff line numberDiff line change
@@ -59,42 +59,58 @@ func (d *genericDispatcher) Start(ctx context.Context) error {
5959
}
6060

6161
func (d *genericDispatcher) dispatch(ctx context.Context) {
62+
d.log.Debugf("in dispatch")
6263
for {
6364
select {
6465
case <-ctx.Done():
6566
return
6667
case evt := <-d.consumer.EventChan():
68+
d.log.Debugf("in event chan")
6769
// if destination is explicitly specified and does not match, drop bundle
6870
clusterNameVal, err := evt.Context.GetExtension(constants.CloudEventExtensionKeyClusterName)
6971
if err != nil {
7072
d.log.Infow("event dropped due to cluster name retrieval error", "error", err)
7173
continue
7274
}
75+
d.log.Debugf("in event chan")
76+
7377
clusterName, ok := clusterNameVal.(string)
7478
if !ok {
7579
d.log.Infow("event dropped due to invalid cluster name", "clusterName", clusterNameVal)
7680
continue
7781
}
82+
d.log.Debugf("in event chan:%v", clusterName)
83+
7884
if clusterName != transport.Broadcast && clusterName != d.agentConfig.LeafHubName {
7985
// d.log.Infow("event dropped due to cluster name mismatch", "clusterName", clusterName)
8086
continue
8187
}
88+
d.log.Debugf("in event chan")
89+
8290
syncer, found := d.syncers[evt.Type()]
8391
if !found {
8492
d.log.Debugw("dispatching to the default generic syncer", "eventType", evt.Type())
8593
syncer = d.syncers[constants.GenericSpecMsgKey]
8694
}
95+
d.log.Debugf("in event chan")
96+
8797
if syncer == nil || evt == nil {
8898
d.log.Warnw("nil syncer or event: incompatible event will be resolved after upgrade.",
8999
"syncer", syncer, "event", evt)
90100
continue
91101
}
102+
d.log.Debugf("in event chan")
103+
92104
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
105+
d.log.Debugf("in event chan")
106+
93107
if err := syncer.Sync(ctx, evt.Data()); err != nil {
108+
d.log.Debugf("in event chan:%v", err)
94109
return err
95110
}
96111
return nil
97112
}); err != nil {
113+
d.log.Debugf("in event chan:%v", err)
98114
d.log.Errorw("sync failed", "type", evt.Type(), "error", err)
99115
}
100116
}

agent/pkg/spec/spec.go

+5-2
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,14 @@ func AddToManager(context context.Context, mgr ctrl.Manager, transportClient tra
1818
agentConfig *configs.AgentConfig,
1919
) error {
2020
log := logger.DefaultZapLogger()
21+
2122
if transportClient.GetConsumer() == nil {
2223
log.Info("the consumer is not initialized for the spec controllers")
23-
return nil
24+
return fmt.Errorf("the consumer is not initialized")
2425
}
2526
if transportClient.GetProducer() == nil {
2627
log.Info("the producer is not initialized for the spec controllers")
27-
return nil
28+
return fmt.Errorf("the producer is not initialized")
2829
}
2930

3031
// add worker pool to manager
@@ -39,10 +40,12 @@ func AddToManager(context context.Context, mgr ctrl.Manager, transportClient tra
3940
return fmt.Errorf("failed to add bundle dispatcher to runtime manager: %w", err)
4041
}
4142

43+
log.Infof("agentConfig.EnableGlobalResource:%v", agentConfig.EnableGlobalResource)
4244
// register syncer to the dispatcher
4345
if agentConfig.EnableGlobalResource {
4446
dispatcher.RegisterSyncer(constants.GenericSpecMsgKey,
4547
syncers.NewGenericSyncer(workers, agentConfig))
48+
log.Debugf("regist ManagedClustersLabelsMsgKey")
4649
dispatcher.RegisterSyncer(constants.ManagedClustersLabelsMsgKey,
4750
syncers.NewManagedClusterLabelSyncer(workers))
4851
}

agent/pkg/spec/syncers/clusterlabel_syncer.go

+10-1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ import (
2020
"github.com/stolostron/multicluster-global-hub/pkg/utils"
2121
)
2222

23+
var log = logger.DefaultZapLogger()
24+
2325
const (
2426
// periodicApplyInterval = 5 * time.Second
2527
hohFieldManager = "mgh-agent"
@@ -51,14 +53,17 @@ func (syncer *managedClusterLabelsBundleSyncer) Sync(ctx context.Context, payloa
5153
if err := json.Unmarshal(payload, bundle); err != nil {
5254
return err
5355
}
56+
syncer.log.Debugf("start sync bundle: %v", bundle)
5457
syncer.setLatestBundle(bundle) // uses latestBundle
58+
syncer.log.Debugf("handle bundle: %v", bundle)
5559
syncer.handleBundle()
5660

5761
return nil
5862
}
5963

6064
func (syncer *managedClusterLabelsBundleSyncer) setLatestBundle(newBundle *specbundle.ManagedClusterLabelsSpecBundle) {
6165
syncer.latestBundleLock.Lock()
66+
syncer.log.Debugf("lock sync bundle")
6267
defer syncer.latestBundleLock.Unlock()
6368

6469
syncer.latestBundle = newBundle
@@ -67,8 +72,10 @@ func (syncer *managedClusterLabelsBundleSyncer) setLatestBundle(newBundle *specb
6772
func (syncer *managedClusterLabelsBundleSyncer) handleBundle() {
6873
syncer.latestBundleLock.Lock()
6974
defer syncer.latestBundleLock.Unlock()
70-
75+
log.Debugf("handle managedClusterLabelsBundleSyncer bundle")
7176
for _, managedClusterLabelsSpec := range syncer.latestBundle.Objects {
77+
log.Debugf("managedClusterLabelsSpec:%v", *managedClusterLabelsSpec)
78+
7279
lastProcessedTimestampPtr := syncer.getManagedClusterLastProcessedTimestamp(managedClusterLabelsSpec.ClusterName)
7380
if managedClusterLabelsSpec.UpdateTimestamp.After(*lastProcessedTimestampPtr) { // handle (success) once
7481
syncer.bundleProcessingWaitingGroup.Add(1)
@@ -115,11 +122,13 @@ func (s *managedClusterLabelsBundleSyncer) updateManagedClusterAsync(
115122
for key, value := range labelsSpec.Labels {
116123
managedCluster.Labels[key] = value
117124
}
125+
log.Debugf("managedCluster.Labels: %v", managedCluster.Labels)
118126

119127
// delete labels by key
120128
for _, labelKey := range labelsSpec.DeletedLabelKeys {
121129
delete(managedCluster.Labels, labelKey)
122130
}
131+
log.Debugf("managedCluster.Labels: %v", managedCluster.Labels)
123132

124133
if err := s.updateManagedFieldEntry(managedCluster, labelsSpec); err != nil {
125134
s.log.Error(err, "failed to update managed cluster", "name", labelsSpec.ClusterName)

agent/pkg/status/generic/generic_handler.go

+11-1
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,11 @@ import (
66

77
"github.com/stolostron/multicluster-global-hub/agent/pkg/status/interfaces"
88
genericpayload "github.com/stolostron/multicluster-global-hub/pkg/bundle/generic"
9+
"github.com/stolostron/multicluster-global-hub/pkg/logger"
910
)
1011

12+
var log = logger.ZapLogger("generic-handler")
13+
1114
type genericHandler struct {
1215
eventData *genericpayload.GenericObjectBundle
1316
// isSpec is to let the handler only update the event when spec is changed.
@@ -32,23 +35,28 @@ func NewGenericHandler(eventData *genericpayload.GenericObjectBundle, opts ...Ha
3235
}
3336

3437
func (h *genericHandler) Get() interface{} {
38+
log.Debugf("get obj:%v", *h.eventData)
3539
return h.eventData
3640
}
3741

3842
func (h *genericHandler) Update(obj client.Object) bool {
43+
log.Debugf("update obj:%v", obj)
44+
3945
if h.shouldUpdate != nil {
4046
if updated := h.shouldUpdate(obj); !updated {
47+
log.Debugf("h.shouldUpdate false")
4148
return false
4249
}
4350
}
44-
4551
index := getObjectIndexByUID(obj.GetUID(), (*h.eventData))
4652
if index == -1 { // object not found, need to add it to the bundle
4753
(*h.eventData) = append((*h.eventData), obj)
4854
return true
4955
}
5056

5157
old := (*h.eventData)[index]
58+
log.Debugf("obj: %v", old)
59+
5260
if h.isSpec && old.GetGeneration() == obj.GetGeneration() {
5361
return false
5462
}
@@ -121,6 +129,8 @@ func WithSpec(onlySpec bool) HandlerOption {
121129
}
122130

123131
func WithShouldUpdate(shouldUpdate func(client.Object) bool) HandlerOption {
132+
log.Debugf("g.shouldUpdate:%v, shouldUpdate:%v", shouldUpdate, shouldUpdate)
133+
124134
return func(g *genericHandler) {
125135
g.shouldUpdate = shouldUpdate
126136
}

install.sh

+59
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#!/usr/bin/env bash
2+
3+
4+
# This script is for installing OLM from a GitHub release
5+
6+
set -e
7+
8+
default_base_url=https://github.com/operator-framework/operator-lifecycle-manager/releases/download
9+
10+
if [[ ${#@} -lt 1 || ${#@} -gt 2 ]]; then
11+
echo "Usage: $0 version [base_url]"
12+
echo "* version: the github release version"
13+
echo "* base_url: the github base URL (Default: $default_base_url)"
14+
exit 1
15+
fi
16+
17+
if kubectl get deployment olm-operator -n openshift-operator-lifecycle-manager > /dev/null 2>&1; then
18+
echo "OLM is already installed in a different configuration. This is common if you are not running a vanilla Kubernetes cluster. Exiting..."
19+
exit 1
20+
fi
21+
22+
release="$1"
23+
base_url="${2:-${default_base_url}}"
24+
url="${base_url}/${release}"
25+
namespace=olm
26+
27+
if kubectl get deployment olm-operator -n ${namespace} > /dev/null 2>&1; then
28+
echo "OLM is already installed in ${namespace} namespace. Exiting..."
29+
exit 1
30+
fi
31+
32+
kubectl create -f "${url}/crds.yaml"
33+
kubectl wait --for=condition=Established -f "${url}/crds.yaml"
34+
kubectl create -f "${url}/olm.yaml"
35+
36+
# wait for deployments to be ready
37+
kubectl rollout status -w deployment/olm-operator --namespace="${namespace}"
38+
kubectl rollout status -w deployment/catalog-operator --namespace="${namespace}"
39+
40+
retries=30
41+
until [[ $retries == 0 ]]; do
42+
new_csv_phase=$(kubectl get csv -n "${namespace}" packageserver -o jsonpath='{.status.phase}' 2>/dev/null || echo "Waiting for CSV to appear")
43+
if [[ $new_csv_phase != "$csv_phase" ]]; then
44+
csv_phase=$new_csv_phase
45+
echo "Package server phase: $csv_phase"
46+
fi
47+
if [[ "$new_csv_phase" == "Succeeded" ]]; then
48+
break
49+
fi
50+
sleep 10
51+
retries=$((retries - 1))
52+
done
53+
54+
if [ $retries == 0 ]; then
55+
echo "CSV \"packageserver\" failed to reach phase succeeded"
56+
exit 1
57+
fi
58+
59+
kubectl rollout status -w deployment/packageserver --namespace="${namespace}"

0 commit comments

Comments
 (0)