Skip to content

Commit

Permalink
Merge pull request #1962 from cgwalters/nodecontroller-events
Browse files Browse the repository at this point in the history
Bug 1852047: controller: Emit events
  • Loading branch information
openshift-merge-robot authored Aug 27, 2020
2 parents 16d243c + 4e133e7 commit ab32432
Show file tree
Hide file tree
Showing 7 changed files with 200 additions and 24 deletions.
9 changes: 9 additions & 0 deletions manifests/machineconfigcontroller/events-clusterrole.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: machine-config-controller-events
namespace: {{.TargetNamespace}}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
12 changes: 12 additions & 0 deletions manifests/machineconfigcontroller/events-rolebinding-default.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: machine-config-controller-events
namespace: default
roleRef:
kind: ClusterRole
name: machine-config-controller-events
subjects:
- kind: ServiceAccount
namespace: {{.TargetNamespace}}
name: machine-config-controller
12 changes: 12 additions & 0 deletions manifests/machineconfigcontroller/events-rolebinding-target.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: machine-config-controller-events
namespace: {{.TargetNamespace}}
roleRef:
kind: ClusterRole
name: machine-config-controller-events
subjects:
- kind: ServiceAccount
namespace: {{.TargetNamespace}}
name: machine-config-controller
48 changes: 36 additions & 12 deletions pkg/controller/node/node_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,16 @@ func (ctrl *Controller) addNode(obj interface{}) {
}
}

func (ctrl *Controller) logPool(pool *mcfgv1.MachineConfigPool, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
glog.Infof("Pool %s: %s", pool.Name, msg)
}

func (ctrl *Controller) logPoolNode(pool *mcfgv1.MachineConfigPool, node *corev1.Node, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
glog.Infof("Pool %s: node %s: %s", pool.Name, node.Name, msg)
}

func (ctrl *Controller) updateNode(old, cur interface{}) {
oldNode := old.(*corev1.Node)
curNode := cur.(*corev1.Node)
Expand Down Expand Up @@ -441,16 +451,16 @@ func (ctrl *Controller) updateNode(old, cur interface{}) {
if oldReady != newReady {
changed = true
if newReadyErr != nil {
glog.Infof("Pool %s: node %s is now reporting unready: %v", pool.Name, curNode.Name, newReadyErr)
ctrl.logPoolNode(pool, curNode, "Reporting unready: %v", newReadyErr)
} else {
glog.Infof("Pool %s: node %s is now reporting ready", pool.Name, curNode.Name)
ctrl.logPoolNode(pool, curNode, "Reporting ready")
}
}

// Specifically log when a node has completed an update so the MCC logs are a useful central aggregate of state changes
if oldNode.Annotations[daemonconsts.CurrentMachineConfigAnnotationKey] != oldNode.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] &&
isNodeDone(curNode) {
glog.Infof("Pool %s: node %s has completed update to %s", pool.Name, curNode.Name, curNode.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey])
ctrl.logPoolNode(pool, curNode, "Completed update to %s", curNode.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey])
changed = true
} else {
annos := []string{
Expand All @@ -459,13 +469,18 @@ func (ctrl *Controller) updateNode(old, cur interface{}) {
daemonconsts.MachineConfigDaemonStateAnnotationKey,
}
for _, anno := range annos {
if oldNode.Annotations[anno] != curNode.Annotations[anno] {
glog.Infof("Pool %s: node %s changed %s = %s", pool.Name, curNode.Name, anno, curNode.Annotations[anno])
newValue := curNode.Annotations[anno]
if oldNode.Annotations[anno] != newValue {
ctrl.logPoolNode(pool, curNode, "changed annotation %s = %s", anno, newValue)
changed = true
// For the control plane, emit events for these since they're important
if pool.Name == masterPoolName {
ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "AnnotationChange", "Node %s now has %s=%s", curNode.Name, anno, newValue)
}
}
}
if !reflect.DeepEqual(oldNode.Labels, curNode.Labels) {
glog.Infof("Pool %s: node %s changed labels", pool.Name, curNode.Name)
ctrl.logPoolNode(pool, curNode, "changed labels")
changed = true
}
}
Expand Down Expand Up @@ -742,7 +757,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error {

candidates, capacity := getAllCandidateMachines(pool, nodes, maxunavail)
if len(candidates) > 0 {
glog.Infof("Pool %s: %d candidate nodes for update, capacity: %d", pool.Name, len(candidates), capacity)
ctrl.logPool(pool, "%d candidate nodes for update, capacity: %d", len(candidates), capacity)
if err := ctrl.updateCandidateMachines(pool, candidates, capacity); err != nil {
if syncErr := ctrl.syncStatusOnly(pool); syncErr != nil {
return goerrs.Wrapf(err, "error setting desired machine config annotation for pool %q, sync error: %v", pool.Name, syncErr)
Expand Down Expand Up @@ -783,7 +798,6 @@ func (ctrl *Controller) getNodesForPool(pool *mcfgv1.MachineConfigPool) ([]*core
}

func (ctrl *Controller) setDesiredMachineConfigAnnotation(nodeName, currentConfig string) error {
glog.Infof("Setting node %s to desired config %s", nodeName, currentConfig)
return clientretry.RetryOnConflict(nodeUpdateBackoff, func() error {
oldNode, err := ctrl.kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
Expand Down Expand Up @@ -869,7 +883,7 @@ func (ctrl *Controller) getCurrentEtcdLeader(candidates []*corev1.Node) (*corev1
// filterControlPlaneCandidateNodes adjusts the candidates and capacity specifically
// for the control plane, e.g. based on which node is the etcd leader at the time.
// nolint:unparam
func (ctrl *Controller) filterControlPlaneCandidateNodes(candidates []*corev1.Node, capacity uint) ([]*corev1.Node, uint, error) {
func (ctrl *Controller) filterControlPlaneCandidateNodes(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node, capacity uint) ([]*corev1.Node, uint, error) {
if len(candidates) <= 1 {
return candidates, capacity, nil
}
Expand All @@ -880,6 +894,8 @@ func (ctrl *Controller) filterControlPlaneCandidateNodes(candidates []*corev1.No
var newCandidates []*corev1.Node
for _, node := range candidates {
if node == etcdLeader {
// For now make this an event so we know it's working, even though it's more of a non-event
ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "DeferringEtcdLeaderUpdate", "Deferring update of etcd leader %s", node.Name)
glog.Infof("Deferring update of etcd leader: %s", node.Name)
continue
}
Expand All @@ -892,23 +908,31 @@ func (ctrl *Controller) filterControlPlaneCandidateNodes(candidates []*corev1.No
func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node, capacity uint) error {
if pool.Name == masterPoolName {
var err error
candidates, capacity, err = ctrl.filterControlPlaneCandidateNodes(candidates, capacity)
candidates, capacity, err = ctrl.filterControlPlaneCandidateNodes(pool, candidates, capacity)
if err != nil {
return err
}
// In practice right now these counts will be 1 but let's stay general to support 5 etcd nodes in the future
glog.Infof("Pool %s: filtered to %d candidate nodes for update, capacity: %d", pool.Name, len(candidates), capacity)
ctrl.logPool(pool, "filtered to %d candidate nodes for update, capacity: %d", len(candidates), capacity)
}
if capacity < uint(len(candidates)) {
// Arbitrarily pick the first N candidates; no attempt at sorting.
// Perhaps later we allow admins to weight somehow, or do something more intelligent.
candidates = candidates[:capacity]
}
targetConfig := pool.Spec.Configuration.Name
for _, node := range candidates {
if err := ctrl.setDesiredMachineConfigAnnotation(node.Name, pool.Spec.Configuration.Name); err != nil {
ctrl.logPool(pool, "Setting node %s target to %s", node.Name, targetConfig)
if err := ctrl.setDesiredMachineConfigAnnotation(node.Name, targetConfig); err != nil {
return goerrs.Wrapf(err, "setting desired config for node %s", node.Name)
}
}
if len(candidates) == 1 {
candidate := candidates[0]
ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "SetDesiredConfig", "Targeted node %s to config %s", candidate.Name, targetConfig)
} else {
ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "SetDesiredConfig", "Set target for %d nodes to config %s", targetConfig)
}
return nil
}

Expand Down
6 changes: 6 additions & 0 deletions pkg/controller/node/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error {
newPool := pool
newPool.Status = newStatus
_, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{})
if pool.Spec.Configuration.Name != newPool.Spec.Configuration.Name {
ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, newPool.Spec.Configuration.Name)
}
if pool.Status.Configuration.Name != newPool.Status.Configuration.Name {
ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Completed", "Pool %s has completed update to %s", pool.Name, newPool.Status.Configuration.Name)
}
return err
}

Expand Down
103 changes: 98 additions & 5 deletions pkg/operator/assets/bindata.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
// manifests/machineconfigcontroller/clusterrolebinding.yaml
// manifests/machineconfigcontroller/controllerconfig.yaml
// manifests/machineconfigcontroller/deployment.yaml
// manifests/machineconfigcontroller/events-clusterrole.yaml
// manifests/machineconfigcontroller/events-rolebinding-default.yaml
// manifests/machineconfigcontroller/events-rolebinding-target.yaml
// manifests/machineconfigcontroller/sa.yaml
// manifests/machineconfigdaemon/clusterrole.yaml
// manifests/machineconfigdaemon/clusterrolebinding.yaml
Expand Down Expand Up @@ -1161,6 +1164,90 @@ func manifestsMachineconfigcontrollerDeploymentYaml() (*asset, error) {
return a, nil
}

var _manifestsMachineconfigcontrollerEventsClusterroleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: machine-config-controller-events
namespace: {{.TargetNamespace}}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
`)

func manifestsMachineconfigcontrollerEventsClusterroleYamlBytes() ([]byte, error) {
return _manifestsMachineconfigcontrollerEventsClusterroleYaml, nil
}

func manifestsMachineconfigcontrollerEventsClusterroleYaml() (*asset, error) {
bytes, err := manifestsMachineconfigcontrollerEventsClusterroleYamlBytes()
if err != nil {
return nil, err
}

info := bindataFileInfo{name: "manifests/machineconfigcontroller/events-clusterrole.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}

var _manifestsMachineconfigcontrollerEventsRolebindingDefaultYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: machine-config-controller-events
namespace: default
roleRef:
kind: ClusterRole
name: machine-config-controller-events
subjects:
- kind: ServiceAccount
namespace: {{.TargetNamespace}}
name: machine-config-controller
`)

func manifestsMachineconfigcontrollerEventsRolebindingDefaultYamlBytes() ([]byte, error) {
return _manifestsMachineconfigcontrollerEventsRolebindingDefaultYaml, nil
}

func manifestsMachineconfigcontrollerEventsRolebindingDefaultYaml() (*asset, error) {
bytes, err := manifestsMachineconfigcontrollerEventsRolebindingDefaultYamlBytes()
if err != nil {
return nil, err
}

info := bindataFileInfo{name: "manifests/machineconfigcontroller/events-rolebinding-default.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}

var _manifestsMachineconfigcontrollerEventsRolebindingTargetYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: machine-config-controller-events
namespace: {{.TargetNamespace}}
roleRef:
kind: ClusterRole
name: machine-config-controller-events
subjects:
- kind: ServiceAccount
namespace: {{.TargetNamespace}}
name: machine-config-controller
`)

func manifestsMachineconfigcontrollerEventsRolebindingTargetYamlBytes() ([]byte, error) {
return _manifestsMachineconfigcontrollerEventsRolebindingTargetYaml, nil
}

func manifestsMachineconfigcontrollerEventsRolebindingTargetYaml() (*asset, error) {
bytes, err := manifestsMachineconfigcontrollerEventsRolebindingTargetYamlBytes()
if err != nil {
return nil, err
}

info := bindataFileInfo{name: "manifests/machineconfigcontroller/events-rolebinding-target.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}

var _manifestsMachineconfigcontrollerSaYaml = []byte(`apiVersion: v1
kind: ServiceAccount
metadata:
Expand Down Expand Up @@ -2761,6 +2848,9 @@ var _bindata = map[string]func() (*asset, error){
"manifests/machineconfigcontroller/clusterrolebinding.yaml": manifestsMachineconfigcontrollerClusterrolebindingYaml,
"manifests/machineconfigcontroller/controllerconfig.yaml": manifestsMachineconfigcontrollerControllerconfigYaml,
"manifests/machineconfigcontroller/deployment.yaml": manifestsMachineconfigcontrollerDeploymentYaml,
"manifests/machineconfigcontroller/events-clusterrole.yaml": manifestsMachineconfigcontrollerEventsClusterroleYaml,
"manifests/machineconfigcontroller/events-rolebinding-default.yaml": manifestsMachineconfigcontrollerEventsRolebindingDefaultYaml,
"manifests/machineconfigcontroller/events-rolebinding-target.yaml": manifestsMachineconfigcontrollerEventsRolebindingTargetYaml,
"manifests/machineconfigcontroller/sa.yaml": manifestsMachineconfigcontrollerSaYaml,
"manifests/machineconfigdaemon/clusterrole.yaml": manifestsMachineconfigdaemonClusterroleYaml,
"manifests/machineconfigdaemon/clusterrolebinding.yaml": manifestsMachineconfigdaemonClusterrolebindingYaml,
Expand Down Expand Up @@ -2847,11 +2937,14 @@ var _bintree = &bintree{nil, map[string]*bintree{
"bootstrap-pod-v2.yaml": &bintree{manifestsBootstrapPodV2Yaml, map[string]*bintree{}},
"controllerconfig.crd.yaml": &bintree{manifestsControllerconfigCrdYaml, map[string]*bintree{}},
"machineconfigcontroller": &bintree{nil, map[string]*bintree{
"clusterrole.yaml": &bintree{manifestsMachineconfigcontrollerClusterroleYaml, map[string]*bintree{}},
"clusterrolebinding.yaml": &bintree{manifestsMachineconfigcontrollerClusterrolebindingYaml, map[string]*bintree{}},
"controllerconfig.yaml": &bintree{manifestsMachineconfigcontrollerControllerconfigYaml, map[string]*bintree{}},
"deployment.yaml": &bintree{manifestsMachineconfigcontrollerDeploymentYaml, map[string]*bintree{}},
"sa.yaml": &bintree{manifestsMachineconfigcontrollerSaYaml, map[string]*bintree{}},
"clusterrole.yaml": &bintree{manifestsMachineconfigcontrollerClusterroleYaml, map[string]*bintree{}},
"clusterrolebinding.yaml": &bintree{manifestsMachineconfigcontrollerClusterrolebindingYaml, map[string]*bintree{}},
"controllerconfig.yaml": &bintree{manifestsMachineconfigcontrollerControllerconfigYaml, map[string]*bintree{}},
"deployment.yaml": &bintree{manifestsMachineconfigcontrollerDeploymentYaml, map[string]*bintree{}},
"events-clusterrole.yaml": &bintree{manifestsMachineconfigcontrollerEventsClusterroleYaml, map[string]*bintree{}},
"events-rolebinding-default.yaml": &bintree{manifestsMachineconfigcontrollerEventsRolebindingDefaultYaml, map[string]*bintree{}},
"events-rolebinding-target.yaml": &bintree{manifestsMachineconfigcontrollerEventsRolebindingTargetYaml, map[string]*bintree{}},
"sa.yaml": &bintree{manifestsMachineconfigcontrollerSaYaml, map[string]*bintree{}},
}},
"machineconfigdaemon": &bintree{nil, map[string]*bintree{
"clusterrole.yaml": &bintree{manifestsMachineconfigdaemonClusterroleYaml, map[string]*bintree{}},
Expand Down
34 changes: 27 additions & 7 deletions pkg/operator/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -395,14 +395,34 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig) error {
}

func (optr *Operator) syncMachineConfigController(config *renderConfig) error {
crBytes, err := renderAsset(config, "manifests/machineconfigcontroller/clusterrole.yaml")
if err != nil {
return err
for _, path := range []string{
"manifests/machineconfigcontroller/clusterrole.yaml",
"manifests/machineconfigcontroller/events-clusterrole.yaml",
} {
crBytes, err := renderAsset(config, path)
if err != nil {
return err
}
cr := resourceread.ReadClusterRoleV1OrDie(crBytes)
_, _, err = resourceapply.ApplyClusterRole(optr.kubeClient.RbacV1(), cr)
if err != nil {
return err
}
}
cr := resourceread.ReadClusterRoleV1OrDie(crBytes)
_, _, err = resourceapply.ApplyClusterRole(optr.kubeClient.RbacV1(), cr)
if err != nil {
return err

for _, path := range []string{
"manifests/machineconfigcontroller/events-rolebinding-default.yaml",
"manifests/machineconfigcontroller/events-rolebinding-target.yaml",
} {
crbBytes, err := renderAsset(config, path)
if err != nil {
return err
}
crb := resourceread.ReadRoleBindingV1OrDie(crbBytes)
_, _, err = resourceapply.ApplyRoleBinding(optr.kubeClient.RbacV1(), crb)
if err != nil {
return err
}
}

crbBytes, err := renderAsset(config, "manifests/machineconfigcontroller/clusterrolebinding.yaml")
Expand Down

0 comments on commit ab32432

Please sign in to comment.