diff --git a/cmd/icingadb/main.go b/cmd/icingadb/main.go index d2178db12..3e2815a8a 100644 --- a/cmd/icingadb/main.go +++ b/cmd/icingadb/main.go @@ -30,7 +30,7 @@ import ( const ( ExitSuccess = 0 ExitFailure = 1 - expectedRedisSchemaVersion = "5" + expectedRedisSchemaVersion = "6" ) func main() { diff --git a/pkg/contracts/contracts.go b/pkg/contracts/contracts.go index 424d059f2..2ca71e742 100644 --- a/pkg/contracts/contracts.go +++ b/pkg/contracts/contracts.go @@ -27,3 +27,10 @@ func SafeInit(v any) { initer.Init() } } + +// Equaler is implemented by any entity that can be compared with another entity of the same type. +// The Equal method should return true if the receiver is equal to the other entity. +type Equaler interface { + // Equal returns whether the receiver is equal to the other entity. + Equal(other any) bool +} diff --git a/pkg/icingadb/delta.go b/pkg/icingadb/delta.go index e370fd03a..99241a49b 100644 --- a/pkg/icingadb/delta.go +++ b/pkg/icingadb/delta.go @@ -53,7 +53,7 @@ func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan database desired := EntitiesById{} // only read from desiredCh (so far) var update EntitiesById - if delta.Subject.WithChecksum() { + if _, ok := delta.Subject.Entity().(contracts.Equaler); ok || delta.Subject.WithChecksum() { update = EntitiesById{} // read from actualCh and desiredCh with mismatching checksums } @@ -70,7 +70,7 @@ func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan database id := actualValue.ID().String() if desiredValue, ok := desired[id]; ok { delete(desired, id) - if update != nil && !checksumsMatch(actualValue, desiredValue) { + if update != nil && !entitiesEqual(actualValue, desiredValue) { update[id] = desiredValue } } else { @@ -88,7 +88,7 @@ func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan database id := desiredValue.ID().String() if actualValue, ok := actual[id]; ok { delete(actual, id) - if update != nil && !checksumsMatch(actualValue, desiredValue) { + if update != nil && !entitiesEqual(actualValue, desiredValue) { update[id] = desiredValue } } else { @@ -117,8 +117,14 @@ func (delta *Delta) run(ctx context.Context, actualCh, desiredCh <-chan database zap.Int("delete", len(delta.Delete))) } -// checksumsMatch returns whether the checksums of two entities are the same. -// Both entities must implement contracts.Checksumer. -func checksumsMatch(a, b database.Entity) bool { - return cmp.Equal(a.(contracts.Checksumer).Checksum(), b.(contracts.Checksumer).Checksum()) +// entitiesEqual returns whether the two entities are equal either based on their checksum or by comparing them. +// +// Both entities must either implement contracts.Checksumer or contracts.Equaler for this to work. If neither +// interface is implemented nor if both entities don't implement the same interface, this function will panic. +func entitiesEqual(a, b database.Entity) bool { + if _, ok := a.(contracts.Checksumer); ok { + return cmp.Equal(a.(contracts.Checksumer).Checksum(), b.(contracts.Checksumer).Checksum()) + } + + return a.(contracts.Equaler).Equal(b) } diff --git a/pkg/icingadb/schema.go b/pkg/icingadb/schema.go index 94ac8c3ce..c7ded7168 100644 --- a/pkg/icingadb/schema.go +++ b/pkg/icingadb/schema.go @@ -15,8 +15,8 @@ import ( ) const ( - expectedMysqlSchemaVersion = 6 - expectedPostgresSchemaVersion = 4 + expectedMysqlSchemaVersion = 7 + expectedPostgresSchemaVersion = 5 ) // ErrSchemaNotExists implies that no Icinga DB schema has been imported. diff --git a/pkg/icingadb/sync.go b/pkg/icingadb/sync.go index 6b39ee64f..b353c7848 100644 --- a/pkg/icingadb/sync.go +++ b/pkg/icingadb/sync.go @@ -148,9 +148,18 @@ func (s Sync) ApplyDelta(ctx context.Context, delta *Delta) error { entitiesWithoutChecksum, errs := icingaredis.CreateEntities(ctx, delta.Subject.Factory(), pairs, runtime.NumCPU()) // Let errors from CreateEntities cancel our group. com.ErrgroupReceive(g, errs) - entities, errs := icingaredis.SetChecksums(ctx, entitiesWithoutChecksum, delta.Update, runtime.NumCPU()) - // Let errors from SetChecksums cancel our group. - com.ErrgroupReceive(g, errs) + + var entities <-chan database.Entity + // Apply the checksums only if the sync subject supports it, i.e, it implements contracts.Checksumer. + // This is necessary because not only entities that implement contracts.Checksumer can be updated, but + // also entities that implement contracts.Equaler interface. + if delta.Subject.WithChecksum() { + entities, errs = icingaredis.SetChecksums(ctx, entitiesWithoutChecksum, delta.Update, runtime.NumCPU()) + // Let errors from SetChecksums cancel our group. + com.ErrgroupReceive(g, errs) + } else { + entities = entitiesWithoutChecksum + } g.Go(func() error { // Using upsert here on purpose as this is the fastest way to do bulk updates. diff --git a/pkg/icingadb/v1/checkable.go b/pkg/icingadb/v1/checkable.go index 78d75f5b3..3dfa268f4 100644 --- a/pkg/icingadb/v1/checkable.go +++ b/pkg/icingadb/v1/checkable.go @@ -15,6 +15,7 @@ type Checkable struct { CheckTimeperiodName string `json:"check_timeperiod_name"` CheckTimeperiodId types.Binary `json:"check_timeperiod_id"` CheckRetryInterval float64 `json:"check_retry_interval"` + TotalChildren types.Int `json:"total_children"` CheckTimeout float64 `json:"check_timeout"` CheckcommandName string `json:"checkcommand_name"` CheckcommandId types.Binary `json:"checkcommand_id"` diff --git a/pkg/icingadb/v1/dependency.go b/pkg/icingadb/v1/dependency.go new file mode 100644 index 000000000..dbea9e4eb --- /dev/null +++ b/pkg/icingadb/v1/dependency.go @@ -0,0 +1,100 @@ +package v1 + +import ( + "bytes" + "github.com/icinga/icinga-go-library/database" + "github.com/icinga/icinga-go-library/types" +) + +type Redundancygroup struct { + EntityWithoutChecksum `json:",inline"` + EnvironmentMeta `json:",inline"` + DisplayName string `json:"display_name"` +} + +// TableName implements [database.TableNamer]. +func (r *Redundancygroup) TableName() string { + return "redundancy_group" +} + +type RedundancygroupState struct { + EntityWithoutChecksum `json:",inline"` + EnvironmentMeta `json:",inline"` + RedundancyGroupId types.Binary `json:"redundancy_group_id"` + Failed types.Bool `json:"failed"` + IsReachable types.Bool `json:"is_reachable"` + LastStateChange types.UnixMilli `json:"last_state_change"` +} + +// TableName implements [database.TableNamer]. +func (r *RedundancygroupState) TableName() string { + return "redundancy_group_state" +} + +// Equal implements the [contracts.Equaler] interface. +func (r *RedundancygroupState) Equal(other any) bool { + if o, ok := other.(*RedundancygroupState); ok { + return bytes.Equal(r.Id, o.Id) && + bytes.Equal(r.EnvironmentId, o.EnvironmentId) && + bytes.Equal(r.RedundancyGroupId, o.RedundancyGroupId) && + r.Failed == o.Failed && + r.IsReachable == o.IsReachable && + r.LastStateChange.Time().Equal(o.LastStateChange.Time()) + } + + return false +} + +type DependencyNode struct { + EntityWithoutChecksum `json:",inline"` + EnvironmentMeta `json:",inline"` + HostId types.Binary `json:"host_id"` + ServiceId types.Binary `json:"service_id"` + RedundancyGroupId types.Binary `json:"redundancy_group_id"` +} + +type DependencyEdgeState struct { + EntityWithoutChecksum `json:",inline"` + EnvironmentMeta `json:",inline"` + Failed types.Bool `json:"failed"` +} + +// Equal implements the [contracts.Equaler] interface. +func (es *DependencyEdgeState) Equal(other any) bool { + if other, ok := other.(*DependencyEdgeState); ok { + return bytes.Equal(es.Id, other.Id) && + bytes.Equal(es.EnvironmentId, other.EnvironmentId) && + es.Failed == other.Failed + } + + return false +} + +type DependencyEdge struct { + EntityWithoutChecksum `json:",inline"` + EnvironmentMeta `json:",inline"` + FromNodeId types.Binary `json:"from_node_id"` + ToNodeId types.Binary `json:"to_node_id"` + DependencyEdgeStateId types.Binary `json:"dependency_edge_state_id"` + DisplayName string `json:"display_name"` +} + +func NewRedundancygroup() database.Entity { + return &Redundancygroup{} +} + +func NewRedundancygroupState() database.Entity { + return &RedundancygroupState{} +} + +func NewDependencyNode() database.Entity { + return &DependencyNode{} +} + +func NewDependencyEdgeState() database.Entity { + return &DependencyEdgeState{} +} + +func NewDependencyEdge() database.Entity { + return &DependencyEdge{} +} diff --git a/pkg/icingadb/v1/state.go b/pkg/icingadb/v1/state.go index d2e48e814..6ca8a4d00 100644 --- a/pkg/icingadb/v1/state.go +++ b/pkg/icingadb/v1/state.go @@ -17,6 +17,7 @@ type State struct { ExecutionTime float64 `json:"execution_time"` HardState uint8 `json:"hard_state"` InDowntime types.Bool `json:"in_downtime"` + AffectsChildren types.Bool `json:"affects_children"` IsAcknowledged icingadbTypes.AcknowledgementState `json:"is_acknowledged"` IsFlapping types.Bool `json:"is_flapping"` IsHandled types.Bool `json:"is_handled"` diff --git a/pkg/icingadb/v1/v1.go b/pkg/icingadb/v1/v1.go index f7677c775..8c536ec79 100644 --- a/pkg/icingadb/v1/v1.go +++ b/pkg/icingadb/v1/v1.go @@ -4,7 +4,12 @@ import ( "github.com/icinga/icinga-go-library/database" ) -var StateFactories = []database.EntityFactoryFunc{NewHostState, NewServiceState} +var StateFactories = []database.EntityFactoryFunc{ + NewHostState, + NewServiceState, + NewDependencyEdgeState, + NewRedundancygroupState, +} var ConfigFactories = []database.EntityFactoryFunc{ NewActionUrl, @@ -51,6 +56,9 @@ var ConfigFactories = []database.EntityFactoryFunc{ NewUsergroupCustomvar, NewUsergroupMember, NewZone, + NewRedundancygroup, + NewDependencyNode, + NewDependencyEdge, } // contextKey is an unexported type for context keys defined in this package. diff --git a/schema/mysql/schema.sql b/schema/mysql/schema.sql index a1db99084..ad5b32d60 100644 --- a/schema/mysql/schema.sql +++ b/schema/mysql/schema.sql @@ -188,6 +188,8 @@ CREATE TABLE host ( check_interval int unsigned NOT NULL, check_retry_interval int unsigned NOT NULL, + total_children int unsigned DEFAULT NULL, + active_checks_enabled enum('n', 'y') NOT NULL, passive_checks_enabled enum('n', 'y') NOT NULL, event_handler_enabled enum('n', 'y') NOT NULL, @@ -313,6 +315,8 @@ CREATE TABLE host_state ( in_downtime enum('n', 'y') NOT NULL, + affects_children enum('n', 'y') NOT NULL, + execution_time int unsigned DEFAULT NULL, latency int unsigned DEFAULT NULL, check_timeout int unsigned DEFAULT NULL, @@ -356,6 +360,8 @@ CREATE TABLE service ( check_interval int unsigned NOT NULL, check_retry_interval int unsigned NOT NULL, + total_children int unsigned DEFAULT NULL, + active_checks_enabled enum('n', 'y') NOT NULL, passive_checks_enabled enum('n', 'y') NOT NULL, event_handler_enabled enum('n', 'y') NOT NULL, @@ -482,6 +488,8 @@ CREATE TABLE service_state ( in_downtime enum('n', 'y') NOT NULL, + affects_children enum('n', 'y') NOT NULL, + execution_time int unsigned DEFAULT NULL, latency int unsigned DEFAULT NULL, check_timeout int unsigned DEFAULT NULL, @@ -1334,6 +1342,63 @@ CREATE TABLE sla_history_downtime ( INDEX idx_sla_history_downtime_env_downtime_end (environment_id, downtime_end) COMMENT 'Filter for sla history retention' ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; +CREATE TABLE redundancy_group ( + id binary(20) NOT NULL COMMENT 'sha1(name + all(member parent_name + timeperiod.name + states + ignore_soft_states))', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + display_name text NOT NULL, + + CONSTRAINT pk_redundancy_group PRIMARY KEY (id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE redundancy_group_state ( + id binary(20) NOT NULL COMMENT 'redundancy_group.id', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + redundancy_group_id binary(20) NOT NULL COMMENT 'redundancy_group.id', + failed enum('n', 'y') NOT NULL, + is_reachable enum('n', 'y') NOT NULL, + last_state_change BIGINT UNSIGNED NOT NULL, + + CONSTRAINT pk_redundancy_group_state PRIMARY KEY (id), + + UNIQUE INDEX idx_redundancy_group_state_redundancy_group_id (redundancy_group_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE dependency_node ( + id binary(20) NOT NULL COMMENT 'host.id|service.id|redundancy_group.id', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + host_id binary(20) DEFAULT NULL COMMENT 'host.id', + service_id binary(20) DEFAULT NULL COMMENT 'service.id', + redundancy_group_id binary(20) DEFAULT NULL COMMENT 'redundancy_group.id', + + CONSTRAINT pk_dependency_node PRIMARY KEY (id), + + UNIQUE INDEX idx_dependency_node_host_service_redundancygroup_id (host_id, service_id, redundancy_group_id), + CONSTRAINT ck_dependency_node_either_checkable_or_redundancy_group_id CHECK ( + IF(redundancy_group_id IS NULL, host_id IS NOT NULL, host_id IS NULL AND service_id IS NULL) = 1 + ) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE dependency_edge_state ( + id binary(20) NOT NULL COMMENT 'sha1([dependency_edge.from_node_id|parent_name + timeperiod.name + states + ignore_soft_states] + dependency_edge.to_node_id)', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + failed enum('n', 'y') NOT NULL, + + CONSTRAINT pk_dependency_edge_state PRIMARY KEY (id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE dependency_edge ( + id binary(20) NOT NULL COMMENT 'sha1(from_node_id + to_node_id)', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + from_node_id binary(20) NOT NULL COMMENT 'dependency_node.id', + to_node_id binary(20) NOT NULL COMMENT 'dependency_node.id', + dependency_edge_state_id binary(20) NOT NULL COMMENT 'dependency_edge_state.id', + display_name text NOT NULL, + + CONSTRAINT pk_dependency_edge PRIMARY KEY (id), + + UNIQUE INDEX idx_dependency_edge_from_node_to_node_id (from_node_id, to_node_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + CREATE TABLE icingadb_schema ( id int unsigned NOT NULL AUTO_INCREMENT, version smallint unsigned NOT NULL, @@ -1343,4 +1408,4 @@ CREATE TABLE icingadb_schema ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; INSERT INTO icingadb_schema (version, timestamp) - VALUES (6, UNIX_TIMESTAMP() * 1000); + VALUES (7, UNIX_TIMESTAMP() * 1000); diff --git a/schema/mysql/upgrades/1.4.0.sql b/schema/mysql/upgrades/1.4.0.sql new file mode 100644 index 000000000..4b375e476 --- /dev/null +++ b/schema/mysql/upgrades/1.4.0.sql @@ -0,0 +1,67 @@ +ALTER TABLE host ADD COLUMN total_children int unsigned DEFAULT NULL AFTER check_retry_interval; +ALTER TABLE host_state ADD COLUMN affects_children enum('n', 'y') NOT NULL DEFAULT 'n' AFTER in_downtime; +ALTER TABLE host_state MODIFY COLUMN affects_children enum('n', 'y') NOT NULL; + +ALTER TABLE service ADD COLUMN total_children int unsigned DEFAULT NULL AFTER check_retry_interval; +ALTER TABLE service_state ADD COLUMN affects_children enum('n', 'y') NOT NULL DEFAULT 'n' AFTER in_downtime; +ALTER TABLE service_state MODIFY COLUMN affects_children enum('n', 'y') NOT NULL; + +CREATE TABLE redundancy_group ( + id binary(20) NOT NULL COMMENT 'sha1(name + all(member parent_name + timeperiod.name + states + ignore_soft_states))', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + display_name text NOT NULL, + + CONSTRAINT pk_redundancy_group PRIMARY KEY (id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE redundancy_group_state ( + id binary(20) NOT NULL COMMENT 'redundancy_group.id', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + redundancy_group_id binary(20) NOT NULL COMMENT 'redundancy_group.id', + failed enum('n', 'y') NOT NULL, + is_reachable enum('n', 'y') NOT NULL, + last_state_change BIGINT UNSIGNED NOT NULL, + + CONSTRAINT pk_redundancy_group_state PRIMARY KEY (id), + + UNIQUE INDEX idx_redundancy_group_state_redundancy_group_id (redundancy_group_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE dependency_node ( + id binary(20) NOT NULL COMMENT 'host.id|service.id|redundancy_group.id', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + host_id binary(20) DEFAULT NULL COMMENT 'host.id', + service_id binary(20) DEFAULT NULL COMMENT 'service.id', + redundancy_group_id binary(20) DEFAULT NULL COMMENT 'redundancy_group.id', + + CONSTRAINT pk_dependency_node PRIMARY KEY (id), + + UNIQUE INDEX idx_dependency_node_host_service_redundancygroup_id (host_id, service_id, redundancy_group_id), + CONSTRAINT ck_dependency_node_either_checkable_or_redundancy_group_id CHECK ( + IF(redundancy_group_id IS NULL, host_id IS NOT NULL, host_id IS NULL AND service_id IS NULL) = 1 + ) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE dependency_edge_state ( + id binary(20) NOT NULL COMMENT 'sha1([dependency_edge.from_node_id|parent_name + timeperiod.name + states + ignore_soft_states] + dependency_edge.to_node_id)', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + failed enum('n', 'y') NOT NULL, + + CONSTRAINT pk_dependency_edge_state PRIMARY KEY (id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +CREATE TABLE dependency_edge ( + id binary(20) NOT NULL COMMENT 'sha1(from_node_id + to_node_id)', + environment_id binary(20) NOT NULL COMMENT 'environment.id', + from_node_id binary(20) NOT NULL COMMENT 'dependency_node.id', + to_node_id binary(20) NOT NULL COMMENT 'dependency_node.id', + dependency_edge_state_id binary(20) NOT NULL COMMENT 'dependency_edge_state.id', + display_name text NOT NULL, + + CONSTRAINT pk_dependency_edge PRIMARY KEY (id), + + UNIQUE INDEX idx_dependency_edge_from_node_to_node_id (from_node_id, to_node_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC; + +INSERT INTO icingadb_schema (version, timestamp) + VALUES (7, UNIX_TIMESTAMP() * 1000); diff --git a/schema/pgsql/schema.sql b/schema/pgsql/schema.sql index 0ba8e49db..258e80f44 100644 --- a/schema/pgsql/schema.sql +++ b/schema/pgsql/schema.sql @@ -215,6 +215,8 @@ CREATE TABLE host ( check_interval uint NOT NULL, check_retry_interval uint NOT NULL, + total_children uint DEFAULT NULL, + active_checks_enabled boolenum NOT NULL DEFAULT 'n', passive_checks_enabled boolenum NOT NULL DEFAULT 'n', event_handler_enabled boolenum NOT NULL DEFAULT 'n', @@ -427,6 +429,8 @@ CREATE TABLE host_state ( in_downtime boolenum NOT NULL DEFAULT 'n', + affects_children boolenum NOT NULL, + execution_time uint DEFAULT NULL, latency uint DEFAULT NULL, check_timeout uint DEFAULT NULL, @@ -489,6 +493,8 @@ CREATE TABLE service ( check_interval uint NOT NULL, check_retry_interval uint NOT NULL, + total_children uint DEFAULT NULL, + active_checks_enabled boolenum NOT NULL DEFAULT 'n', passive_checks_enabled boolenum NOT NULL DEFAULT 'n', event_handler_enabled boolenum NOT NULL DEFAULT 'n', @@ -697,6 +703,8 @@ CREATE TABLE service_state ( in_downtime boolenum NOT NULL DEFAULT 'n', + affects_children boolenum NOT NULL, + execution_time uint DEFAULT NULL, latency uint DEFAULT NULL, check_timeout uint DEFAULT NULL, @@ -2170,6 +2178,108 @@ COMMENT ON COLUMN sla_history_downtime.downtime_id IS 'downtime.id (may referenc COMMENT ON COLUMN sla_history_downtime.downtime_start IS 'start time of the downtime'; COMMENT ON COLUMN sla_history_downtime.downtime_end IS 'end time of the downtime'; +CREATE TABLE redundancy_group ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + display_name text NOT NULL, + + CONSTRAINT pk_redundancy_group PRIMARY KEY (id) +); + +ALTER TABLE redundancy_group ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE redundancy_group ALTER COLUMN environment_id SET STORAGE PLAIN; + +COMMENT ON COLUMN redundancy_group.id IS 'sha1(name + all(member parent_name + timeperiod.name + states + ignore_soft_states))'; +COMMENT ON COLUMN redundancy_group.environment_id IS 'environment.id'; + +CREATE TABLE redundancy_group_state ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + redundancy_group_id bytea20 NOT NULL, + failed boolenum NOT NULL, + is_reachable boolenum NOT NULL, + last_state_change biguint NOT NULL, + + CONSTRAINT pk_redundancy_group_state PRIMARY KEY (id) +); + +ALTER TABLE redundancy_group_state ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE redundancy_group_state ALTER COLUMN environment_id SET STORAGE PLAIN; +ALTER TABLE redundancy_group_state ALTER COLUMN redundancy_group_id SET STORAGE PLAIN; + +CREATE UNIQUE INDEX idx_redundancy_group_state_redundancy_group_id ON redundancy_group_state(redundancy_group_id); + +COMMENT ON COLUMN redundancy_group_state.id IS 'redundancy_group.id'; +COMMENT ON COLUMN redundancy_group_state.environment_id IS 'environment.id'; +COMMENT ON COLUMN redundancy_group_state.redundancy_group_id IS 'redundancy_group.id'; + +CREATE TABLE dependency_node ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + host_id bytea20 DEFAULT NULL, + service_id bytea20 DEFAULT NULL, + redundancy_group_id bytea20 DEFAULT NULL, + + CONSTRAINT pk_dependency_node PRIMARY KEY (id), + + CONSTRAINT ck_dependency_node_either_checkable_or_redundancy_group_id CHECK ( + CASE WHEN redundancy_group_id IS NULL THEN host_id IS NOT NULL ELSE host_id IS NULL AND service_id IS NULL END + ) +); + +ALTER TABLE dependency_node ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN environment_id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN host_id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN service_id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN redundancy_group_id SET STORAGE PLAIN; + +CREATE UNIQUE INDEX idx_dependency_node_host_service_redundancygroup_id ON dependency_node(host_id, service_id, redundancy_group_id); + +COMMENT ON COLUMN dependency_node.id IS 'host.id|service.id|redundancy_group.id'; +COMMENT ON COLUMN dependency_node.environment_id IS 'environment.id'; +COMMENT ON COLUMN dependency_node.host_id IS 'host.id'; +COMMENT ON COLUMN dependency_node.service_id IS 'service.id'; +COMMENT ON COLUMN dependency_node.redundancy_group_id IS 'redundancy_group.id'; + +CREATE TABLE dependency_edge_state ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + failed boolenum NOT NULL, + + CONSTRAINT pk_dependency_edge_state PRIMARY KEY (id) +); + +ALTER TABLE dependency_edge_state ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE dependency_edge_state ALTER COLUMN environment_id SET STORAGE PLAIN; + +COMMENT ON COLUMN dependency_edge_state.id IS 'sha1([dependency_edge.from_node_id|parent_name + timeperiod.name + states + ignore_soft_states] + dependency_edge.to_node_id)'; +COMMENT ON COLUMN dependency_edge_state.environment_id IS 'environment.id'; + +CREATE TABLE dependency_edge ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + from_node_id bytea20 NOT NULL, + to_node_id bytea20 NOT NULL, + dependency_edge_state_id bytea20 NOT NULL, + display_name text NOT NULL, + + CONSTRAINT pk_dependency_edge PRIMARY KEY (id) +); + +ALTER TABLE dependency_edge ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN environment_id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN from_node_id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN to_node_id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN dependency_edge_state_id SET STORAGE PLAIN; + +CREATE UNIQUE INDEX idx_dependency_edge_from_node_to_node_id ON dependency_edge(from_node_id, to_node_id); + +COMMENT ON COLUMN dependency_edge.id IS 'sha1(from_node_id + to_node_id)'; +COMMENT ON COLUMN dependency_edge.environment_id IS 'environment.id'; +COMMENT ON COLUMN dependency_edge.from_node_id IS 'dependency_node.id'; +COMMENT ON COLUMN dependency_edge.to_node_id IS 'dependency_node.id'; +COMMENT ON COLUMN dependency_edge.dependency_edge_state_id IS 'sha1(dependency_edge_state.id)'; + CREATE SEQUENCE icingadb_schema_id_seq; CREATE TABLE icingadb_schema ( @@ -2183,4 +2293,4 @@ CREATE TABLE icingadb_schema ( ALTER SEQUENCE icingadb_schema_id_seq OWNED BY icingadb_schema.id; INSERT INTO icingadb_schema (version, timestamp) - VALUES (4, extract(epoch from now()) * 1000); + VALUES (5, extract(epoch from now()) * 1000); diff --git a/schema/pgsql/upgrades/1.4.0.sql b/schema/pgsql/upgrades/1.4.0.sql new file mode 100644 index 000000000..8ed178004 --- /dev/null +++ b/schema/pgsql/upgrades/1.4.0.sql @@ -0,0 +1,112 @@ +ALTER TABLE host ADD COLUMN total_children uint DEFAULT NULL; +ALTER TABLE host_state ADD COLUMN affects_children boolenum NOT NULL DEFAULT 'n'; +ALTER TABLE host_state ALTER COLUMN affects_children DROP DEFAULT; + +ALTER TABLE service ADD COLUMN total_children uint DEFAULT NULL; +ALTER TABLE service_state ADD COLUMN affects_children boolenum NOT NULL DEFAULT 'n'; +ALTER TABLE service_state ALTER COLUMN affects_children DROP DEFAULT; + +CREATE TABLE redundancy_group ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + display_name text NOT NULL, + + CONSTRAINT pk_redundancy_group PRIMARY KEY (id) +); + +ALTER TABLE redundancy_group ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE redundancy_group ALTER COLUMN environment_id SET STORAGE PLAIN; + +COMMENT ON COLUMN redundancy_group.id IS 'sha1(name + all(member parent_name + timeperiod.name + states + ignore_soft_states))'; +COMMENT ON COLUMN redundancy_group.environment_id IS 'environment.id'; + +CREATE TABLE redundancy_group_state ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + redundancy_group_id bytea20 NOT NULL, + failed boolenum NOT NULL, + is_reachable boolenum NOT NULL, + last_state_change biguint NOT NULL, + + CONSTRAINT pk_redundancy_group_state PRIMARY KEY (id) +); + +ALTER TABLE redundancy_group_state ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE redundancy_group_state ALTER COLUMN environment_id SET STORAGE PLAIN; +ALTER TABLE redundancy_group_state ALTER COLUMN redundancy_group_id SET STORAGE PLAIN; + +CREATE UNIQUE INDEX idx_redundancy_group_state_redundancy_group_id ON redundancy_group_state(redundancy_group_id); + +COMMENT ON COLUMN redundancy_group_state.id IS 'redundancy_group.id'; +COMMENT ON COLUMN redundancy_group_state.environment_id IS 'environment.id'; +COMMENT ON COLUMN redundancy_group_state.redundancy_group_id IS 'redundancy_group.id'; + +CREATE TABLE dependency_node ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + host_id bytea20 DEFAULT NULL, + service_id bytea20 DEFAULT NULL, + redundancy_group_id bytea20 DEFAULT NULL, + + CONSTRAINT pk_dependency_node PRIMARY KEY (id), + + CONSTRAINT ck_dependency_node_either_checkable_or_redundancy_group_id CHECK ( + CASE WHEN redundancy_group_id IS NULL THEN host_id IS NOT NULL ELSE host_id IS NULL AND service_id IS NULL END + ) +); + +ALTER TABLE dependency_node ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN environment_id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN host_id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN service_id SET STORAGE PLAIN; +ALTER TABLE dependency_node ALTER COLUMN redundancy_group_id SET STORAGE PLAIN; + +CREATE UNIQUE INDEX idx_dependency_node_host_service_redundancygroup_id ON dependency_node(host_id, service_id, redundancy_group_id); + +COMMENT ON COLUMN dependency_node.id IS 'host.id|service.id|redundancy_group.id'; +COMMENT ON COLUMN dependency_node.environment_id IS 'environment.id'; +COMMENT ON COLUMN dependency_node.host_id IS 'host.id'; +COMMENT ON COLUMN dependency_node.service_id IS 'service.id'; +COMMENT ON COLUMN dependency_node.redundancy_group_id IS 'redundancy_group.id'; + +CREATE TABLE dependency_edge_state ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + failed boolenum NOT NULL, + + CONSTRAINT pk_dependency_edge_state PRIMARY KEY (id) +); + +ALTER TABLE dependency_edge_state ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE dependency_edge_state ALTER COLUMN environment_id SET STORAGE PLAIN; + +COMMENT ON COLUMN dependency_edge_state.id IS 'sha1([dependency_edge.from_node_id|parent_name + timeperiod.name + states + ignore_soft_states] + dependency_edge.to_node_id)'; +COMMENT ON COLUMN dependency_edge_state.environment_id IS 'environment.id'; + +CREATE TABLE dependency_edge ( + id bytea20 NOT NULL, + environment_id bytea20 NOT NULL, + from_node_id bytea20 NOT NULL, + to_node_id bytea20 NOT NULL, + dependency_edge_state_id bytea20 NOT NULL, + display_name text NOT NULL, + + CONSTRAINT pk_dependency_edge PRIMARY KEY (id) +); + +ALTER TABLE dependency_edge ALTER COLUMN id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN environment_id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN from_node_id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN to_node_id SET STORAGE PLAIN; +ALTER TABLE dependency_edge ALTER COLUMN dependency_edge_state_id SET STORAGE PLAIN; + +CREATE UNIQUE INDEX idx_dependency_edge_from_node_to_node_id ON dependency_edge(from_node_id, to_node_id); + +COMMENT ON COLUMN dependency_edge.id IS 'sha1(from_node_id + to_node_id)'; +COMMENT ON COLUMN dependency_edge.environment_id IS 'environment.id'; +COMMENT ON COLUMN dependency_edge.from_node_id IS 'dependency_node.id'; +COMMENT ON COLUMN dependency_edge.to_node_id IS 'dependency_node.id'; +COMMENT ON COLUMN dependency_edge.dependency_edge_state_id IS 'sha1(dependency_edge_state.id)'; + +INSERT INTO icingadb_schema (version, timestamp) + VALUES (5, extract(epoch from now()) * 1000); diff --git a/tests/go.mod b/tests/go.mod index 94b7e0559..e1c439f66 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -4,15 +4,16 @@ go 1.23.0 require ( github.com/go-sql-driver/mysql v1.9.1 - github.com/goccy/go-yaml v1.12.0 + github.com/goccy/go-yaml v1.13.0 github.com/google/uuid v1.6.0 + github.com/icinga/icinga-go-library v0.6.1 github.com/icinga/icinga-testing v0.0.0-20240322142451-494ccd6d03e8 github.com/jmoiron/sqlx v1.4.0 github.com/lib/pq v1.10.9 github.com/redis/go-redis/v9 v9.7.3 github.com/stretchr/testify v1.10.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20221012112151-59b0eab1532e + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/sync v0.12.0 ) @@ -28,13 +29,13 @@ require ( github.com/docker/docker v25.0.6+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/fatih/color v1.10.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/mattn/go-colorable v0.1.8 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -46,10 +47,9 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.36.0 // indirect golang.org/x/sys v0.30.0 // indirect - golang.org/x/tools v0.16.1 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/tools v0.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tests/go.sum b/tests/go.sum index 4824452cc..aff30ba26 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -35,27 +35,29 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= +github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= -github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= +github.com/goccy/go-yaml v1.13.0 h1:0Wtp0FZLd7Sm8gERmR9S6Iczzb3vItJj7NaHmFg8pTs= +github.com/goccy/go-yaml v1.13.0/go.mod h1:IjYwxUiJDoqpx2RmbdjMUceGHZwYLon3sfOGl5Hi9lc= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -67,6 +69,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/icinga/icinga-go-library v0.6.1 h1:IabSHYnwYkeXpThhtTxAZWklFlmxuFnXPLD7lT8CGmQ= +github.com/icinga/icinga-go-library v0.6.1/go.mod h1:Rcyn5qvBJM6JJ5EFozKNU1IN7vAqgtVJ7x3g1hZe3HU= github.com/icinga/icinga-testing v0.0.0-20240322142451-494ccd6d03e8 h1:PI+39IY1BjN24JC3B6Jy0rhwm3hqC4SnQFxbZjXOaHk= github.com/icinga/icinga-testing v0.0.0-20240322142451-494ccd6d03e8/go.mod h1:xjNiwePgnSVKJWPG/iFG7pNOibU/OWp01Zdl08o+EeI= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -81,14 +85,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= @@ -172,8 +177,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/exp v0.0.0-20221012112151-59b0eab1532e h1:/SJUJZl3kz7J5GzAx5lgaKvqKGd4OfzshwDMr6YJCC4= -golang.org/x/exp v0.0.0-20221012112151-59b0eab1532e/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -182,8 +187,9 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -215,8 +221,6 @@ golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -227,8 +231,10 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -265,12 +271,12 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= diff --git a/tests/object_sync_test.conf b/tests/object_sync_test.conf index e4c6da67d..47eb5d79e 100644 --- a/tests/object_sync_test.conf +++ b/tests/object_sync_test.conf @@ -52,3 +52,42 @@ object User "{{$user}}" { ] } {{end}} + +object TimePeriod "workhours" { + ranges = {} +} + +object TimePeriod "never-ever" { + ranges = {} +} + +template Host "dependency-host-template" { + check_command = "dummy" + max_check_attempts = 1 + check_interval = 300s + vars.dummy_text = "I'm just testing something" + vars.dummy_state = 0 +} + +for (suffix in ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]) { + object Host "Host"+suffix { + import "dependency-host-template" + } +} + +{{range $dependencyGroups := .DependencyGroups}} + {{range $dependencyGroup := .Groups}} + {{range $parent := .Parents}} + {{range $child := $dependencyGroup.Children}} + object Dependency "dependency"+random() { + parent_host_name = "{{$parent}}" + child_host_name = "{{$child}}" + ignore_soft_states = {{$dependencyGroup.IgnoreSoftStates}} + states = [ {{NaturalJoin $dependencyGroup.StatesFilter ", "}} ] + period = "{{$dependencyGroup.TimePeriod}}" + redundancy_group = "{{$dependencyGroup.RedundancyGroupName}}" + } + {{end}} + {{end}} + {{end}} +{{end}} diff --git a/tests/object_sync_test.go b/tests/object_sync_test.go index 23d2c96a0..7c3716b9f 100644 --- a/tests/object_sync_test.go +++ b/tests/object_sync_test.go @@ -6,6 +6,7 @@ import ( "database/sql" _ "embed" "fmt" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icinga-testing/services" "github.com/icinga/icinga-testing/utils" "github.com/icinga/icinga-testing/utils/eventually" @@ -28,7 +29,7 @@ import ( //go:embed object_sync_test.conf var testSyncConfRaw string -var testSyncConfTemplate = template.Must(template.New("testdata.conf").Parse(testSyncConfRaw)) +var testSyncConfTemplate = template.Must(template.New("testdata.conf").Funcs(template.FuncMap{"NaturalJoin": strings.Join}).Parse(testSyncConfRaw)) var usergroups = []string{ "testusergroup1", @@ -54,6 +55,7 @@ func TestObjectSync(t *testing.T) { Notifications []Notification NotificationUsers map[string]map[string]struct{} NotificationUserGroups []string + DependencyGroups []DependencyGroupTestCases } data := &Data{ // Some name prefixes to loop over in the template to generate multiple instances of objects, @@ -66,6 +68,7 @@ func TestObjectSync(t *testing.T) { Notifications: makeTestNotifications(t), NotificationUsers: users, NotificationUserGroups: usergroups, + DependencyGroups: makeDependencyGroupTestCases(), } r := it.RedisServerT(t) @@ -93,7 +96,7 @@ func TestObjectSync(t *testing.T) { //logger.Sugar().Infof("config:\n\n%s\n\n", conf.String()) i.WriteConfig("etc/icinga2/conf.d/testdata.conf", conf.Bytes()) i.EnableIcingaDb(r) - i.Reload() + require.NoError(t, i.Reload(), "reload Icinga 2 daemon") // Wait for Icinga 2 to signal a successful dump before starting // Icinga DB to ensure that we actually test the initial sync. @@ -311,6 +314,24 @@ func TestObjectSync(t *testing.T) { t.Skip() }) + t.Run("Dependency", func(t *testing.T) { + t.Parallel() + + t.Cleanup(func() { assertNoDependencyDanglingReferences(t, r, db) }) + + for _, dependencyGroupTest := range data.DependencyGroups { + t.Run(dependencyGroupTest.TestName, func(t *testing.T) { + t.Parallel() + + for _, dependencyGroup := range dependencyGroupTest.Groups { + eventually.Assert(t, func(t require.TestingT) { + dependencyGroup.verify(t, db, &dependencyGroupTest) + }, 20*time.Second, 200*time.Millisecond) + } + }) + } + }) + t.Run("RuntimeUpdates", func(t *testing.T) { t.Parallel() @@ -612,6 +633,149 @@ func TestObjectSync(t *testing.T) { }) }) + t.Run("Dependency", func(t *testing.T) { + t.Parallel() + + // Make sure to check for any dangling references after all the subtests have run, i.e. as part of the + // parent test (Dependencies) teardown process. Note, this isn't the same as using plain defer ..., as + // all the subtests runs in parallel, and we want to make sure that the check is performed after all of + // them have completed and not when this closure returns. + t.Cleanup(func() { assertNoDependencyDanglingReferences(t, r, db) }) + + for _, testCase := range makeRuntimeDependencyGroupTestCases() { + t.Run("CreateAndDelete-"+testCase.TestName, func(t *testing.T) { + t.Parallel() + + var totalChildren []string + for _, group := range testCase.Groups { + if group.HasNewParents { + // The last test case has parents that do not exist, so we need to create them. + for _, parent := range group.Parents { + client.CreateObject(t, "hosts", parent, map[string]any{ + "templates": []string{"dependency-host-template"}, + }) + } + } + + for _, child := range group.Children { + if !slices.Contains(totalChildren, child) { // Create the children only once + totalChildren = append(totalChildren, child) + client.CreateObject(t, "hosts", child, map[string]any{ + "templates": []string{"dependency-host-template"}, + }) + } + + for _, parent := range group.Parents { + client.CreateObject(t, "dependencies", child+"!"+utils.RandomString(4), map[string]any{ + "attrs": map[string]any{ + "parent_host_name": parent, + "child_host_name": child, + "redundancy_group": group.RedundancyGroupName, + "ignore_soft_states": group.IgnoreSoftStates, + "period": group.TimePeriod, + "states": group.StatesFilter, + }, + }) + } + } + } + + // Verify that the dependencies have been correctly created and serialized. + for _, group := range testCase.Groups { + eventually.Assert(t, func(t require.TestingT) { + group.verify(t, db, &testCase) + }, 20*time.Second, 200*time.Millisecond) + } + + // Now delete all the children one by one and verify that the dependencies are correctly updated. + for i, child := range totalChildren { + client.DeleteObject(t, "hosts", child, true) + + // Remove the child from all the dependency groups + for _, g := range testCase.Groups { + g.RemoveChild(child) + } + + // Verify child's runtime deletion event. + for _, g := range testCase.Groups { + if i == len(totalChildren)-1 { + assert.Emptyf(t, g.Children, "all children should have been deleted") + } + eventually.Assert(t, func(t require.TestingT) { g.verify(t, db, &testCase) }, 20*time.Second, 200*time.Millisecond) + } + } + }) + } + + t.Run("Update", func(t *testing.T) { + t.Parallel() + + // Add some test cases for joining and leaving existing dependency groups. + for _, testCase := range makeDependencyGroupTestCases() { + t.Run(testCase.TestName, func(t *testing.T) { + t.Parallel() + + var newChildren []string + for i := 0; i < 4; i++ { + child := utils.RandomString(8) + newChildren = append(newChildren, child) + client.CreateObject(t, "hosts", child, map[string]any{ + "templates": []string{"dependency-host-template"}, + }) + } + + for _, group := range testCase.Groups { + // Add the new children to the dependency group + group.Children = append(group.Children, newChildren...) + + // Now, create for each of the new children a dependency to each of the parents in + // the group using the group's settings and verify they've joined the group. + for _, child := range newChildren { + for _, parent := range group.Parents { + client.CreateObject(t, "dependencies", child+"!"+utils.RandomString(4), map[string]any{ + "attrs": map[string]any{ + "parent_host_name": parent, + "child_host_name": child, + "redundancy_group": group.RedundancyGroupName, + "ignore_soft_states": group.IgnoreSoftStates, + "period": group.TimePeriod, + "states": group.StatesFilter, + }, + }) + } + } + } + + // Perform the verification for each group with the new children added. + for _, group := range testCase.Groups { + eventually.Assert(t, func(t require.TestingT) { + group.verify(t, db, &testCase) + }, 20*time.Second, 200*time.Millisecond) + } + + // Now, remove the new children from the dependency groups and verify they've left the group. + for _, child := range newChildren { + client.DeleteObject(t, "hosts", child, true) + + // Decouple the child from all the dependency groups + for _, group := range testCase.Groups { + group.RemoveChild(child) + } + + for _, group := range testCase.Groups { + eventually.Assert(t, func(t require.TestingT) { + group.verify(t, db, &testCase) + }, 20*time.Second, 200*time.Millisecond) + } + } + // The last iteration of the above loop should have removed all the children from the + // groups and performed the final verification with their original state, so we don't + // need to repeat it here. + }) + } + }) + }) + // TODO(jb): add tests for remaining config object types }) } @@ -1049,6 +1213,687 @@ func makeTestNotifications(t *testing.T) []Notification { return notifications } +type DependencyGroup struct { + RedundancyGroupId types.Binary // The ID of the redundancy group (nil if not a redundancy group). + RedundancyGroupName string // The name of the redundancy group (empty if not a redundancy group). + + // These fields define how and to which checkable objects the dependency group applies to and + // from which objects it depends on. They're used to produce the configuration objects and won't + // be used for verification except the Parents and Children fields. + Parents []string + Children []string + StatesFilter []string + TimePeriod string + IgnoreSoftStates bool + + // HasNewParents determines if the dependency group has new parents that do not exist in the database. + // This is only used to test the runtime creation of a dependency group with new parents. + HasNewParents bool +} + +// DependencyGroupTestCases contains identical dependency group that can be tested in a single subtest. +// The TestName field is used to name the subtest for those dependency groups. +type DependencyGroupTestCases struct { + TestName string + Groups []*DependencyGroup +} + +func (g *DependencyGroup) IsRedundancyGroup() bool { return g.RedundancyGroupName != "" } + +func (g *DependencyGroup) RemoveChild(child string) { + g.Children = slices.DeleteFunc(g.Children, func(c string) bool { return c == child }) +} + +// assertNoDependencyDanglingReferences verifies that there are no dangling references in the dependency_node, +// dependency_edge and redundancy_group_state tables. +// +// Since the dependency group tests are executed in parallel, there is a chance that the database becomes inconsistent +// for a split second, which can be detected by this function and will cause the test to fail. So, calling this function +// only once after all tests have finished is sufficient. +func assertNoDependencyDanglingReferences(t require.TestingT, r services.RedisServer, db *sqlx.DB) { + rc := r.Open() + defer func() { _ = rc.Close() }() + + redisHGetCheck := func(key, field string) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + result, err := rc.HGet(ctx, key, field).Result() + if err != nil { + assert.Equal(t, redis.Nil, err) + } + assert.Emptyf(t, result, "%s %q exists in Redis but not in the database", strings.Split(key, ":")[1], field) + } + + var nodes []struct { + HostID types.Binary `db:"host_id"` + ServiceID types.Binary `db:"service_id"` + RedundancyGroupID types.Binary `db:"redundancy_group_id"` + } + err := db.Select(&nodes, `SELECT host_id, service_id, redundancy_group_id FROM dependency_node`) + require.NoError(t, err, "querying dependency nodes") + + // Check if there are any dangling references in the dependency_node table, i.e. nodes that reference + // unknown hosts, services or redundancy groups. + for _, node := range nodes { + var exists bool + if node.HostID != nil { + assert.Nilf(t, node.RedundancyGroupID, "node redudancy group ID should be nil if host ID is set") + + err := db.Get(&exists, db.Rebind(`SELECT EXISTS (SELECT 1 FROM host WHERE id = ?)`), node.HostID) + assert.NoError(t, err, "querying host existence") + assert.Truef(t, exists, "host %q should exist", node.HostID) + + if !exists { + redisHGetCheck("icinga:host", node.HostID.String()) + } + } + + if node.ServiceID != nil { + assert.NotNil(t, node.HostID, "node host ID should be set if service ID is set") + assert.Nilf(t, node.RedundancyGroupID, "node redudancy group ID should be nil if service ID is set") + + err := db.Get(&exists, db.Rebind(`SELECT EXISTS (SELECT 1 FROM service WHERE id = ?)`), node.ServiceID) + assert.NoError(t, err, "querying service existence") + assert.Truef(t, exists, "service %q should exist", node.ServiceID) + + if !exists { + redisHGetCheck("icinga:service", node.ServiceID.String()) + } + } + + if node.RedundancyGroupID != nil { + assert.Nilf(t, node.HostID, "node host ID should be nil if redundancy group ID is set") + assert.Nilf(t, node.ServiceID, "node service ID should be nil if redundancy group ID is set") + + err := db.Get(&exists, db.Rebind(`SELECT EXISTS (SELECT 1 FROM redundancy_group WHERE id = ?)`), node.RedundancyGroupID) + assert.NoError(t, err, "querying redundancy group existence") + assert.Truef(t, exists, "redundancy group %q should exist", node.RedundancyGroupID) + + if !exists { + redisHGetCheck("icinga:redundancygroup", node.RedundancyGroupID.String()) + } + } + } + + var edges []struct { + FromNodeID types.Binary `db:"from_node_id"` + ToNodeID types.Binary `db:"to_node_id"` + StateID types.Binary `db:"dependency_edge_state_id"` + } + err = db.Select(&edges, `SELECT from_node_id, to_node_id, dependency_edge_state_id FROM dependency_edge`) + require.NoError(t, err, "querying dependency edges") + + // Check if there are any dangling references in the dependency_edge table, i.e. edges that reference + // unknown from/to nodes or dependency edge states. + for _, edge := range edges { + assert.NotNil(t, edge.FromNodeID, "from node ID should be set") + assert.NotNil(t, edge.ToNodeID, "to node ID should be set") + assert.NotNil(t, edge.StateID, "dependency edge state ID should be set") + + var exists bool + err := db.Get(&exists, db.Rebind(`SELECT EXISTS (SELECT 1 FROM dependency_node WHERE id = ?)`), edge.FromNodeID) + assert.NoError(t, err, "querying child/from node existence") + assert.Truef(t, exists, "child/from node %q should exist", edge.FromNodeID) + + if !exists { + redisHGetCheck("icinga:dependency:node", edge.FromNodeID.String()) + } + + err = db.Get(&exists, db.Rebind(`SELECT EXISTS (SELECT 1 FROM dependency_node WHERE id = ?)`), edge.ToNodeID) + assert.NoError(t, err, "querying parent/to node existence") + assert.Truef(t, exists, "parent/to node %q should exist", edge.ToNodeID) + + if !exists { + redisHGetCheck("icinga:dependency:node", edge.ToNodeID.String()) + } + + err = db.Get(&exists, db.Rebind(`SELECT EXISTS (SELECT 1 FROM dependency_edge_state WHERE id = ?)`), edge.StateID) + assert.NoError(t, err, "querying dependency edge state existence") + assert.Truef(t, exists, "dependency edge state %q should exist", edge.StateID) + + if !exists { + redisHGetCheck("icinga:dependency:edge:state", edge.StateID.String()) + } + } + + // TODO: Icinga 2 doesn't send runtime delete event for those two tables, since Icinga DB does not handle + // them well yet. This is because the runtime sync pipeline is set to process upsert/delete events concurrently, + // which can sometimes lead to race conditions where the two events are processed in the wrong order. + /*var stateIDs []types.Binary + err = db.Select(&stateIDs, `SELECT id FROM dependency_edge_state WHERE id NOT IN (SELECT dependency_edge_state_id FROM dependency_edge)`) + assert.NoError(t, err, "querying dangling dependency edge states") + assert.Len(t, stateIDs, 0, "all dependency_edge_state IDs should be referenced by a dependency_edge") + + for _, stateID := range stateIDs { + // Check if these dangling state IDs are still present in Redis. + redisHGetCheck("icinga:dependency:edge:state", stateID.String()) + } + + stateIDs = nil + // Verify that all redundancy group states do reference an existing redundancy group. + err = db.Select(&stateIDs, `SELECT id FROM redundancy_group_state WHERE id NOT IN (SELECT id FROM redundancy_group)`) + assert.NoError(t, err, "querying dangling redundancy group states") + assert.Len(t, stateIDs, 0, "redundancy_group_state referencing unknown redundancy groups") + + for _, stateID := range stateIDs { + // Check if these dangling state IDs are still present in Redis. + redisHGetCheck("icinga:redundancygroup:state", stateID.String()) + }*/ +} + +// verify performs a series of checks to ensure that the dependency group is correctly represented in the database. +// +// The following checks are performed: +// - Verify that the redundancy group (if any) is referenced by the children hosts as their parent node. +// - Verify that all child and parent Checkables of this group are in the regular Icinga DB tables and the redundancy +// group (if any) is in the redundancy_group table as well as having a state in the redundancy_group_state table. +// - Verify that the dependency_node and dependency_edge tables are correctly populated with the expected from/to +// nodes according to the group's configuration. This includes verifying the connection between the child Checkables +// and the redundancy group (if any) and from redundancy group to parent Checkables. +func (g *DependencyGroup) verify(t require.TestingT, db *sqlx.DB, dependencyGroupTest *DependencyGroupTestCases) { + if len(g.Children) == 0 { + if g.IsRedundancyGroup() { + // Verify that the redundancy group was deleted from the database after removing all of its children. + // Requires that this redundancy group has been verified before using this method and its ID is set accordingly. + require.NotNilf(t, g.RedundancyGroupId, "ID should be set for redundancy group %q", g.RedundancyGroupName) + + var exists bool + err := db.Get(&exists, db.Rebind(`SELECT EXISTS(SELECT 1 FROM redundancy_group WHERE id = ?)`), g.RedundancyGroupId) + assert.NoErrorf(t, err, "fetching redundancy group %q with ID %q", g.RedundancyGroupName, g.RedundancyGroupId) + assert.Falsef(t, exists, "redundancy group %q with ID %q should not exist", g.RedundancyGroupName, g.RedundancyGroupId) + + // Runtime state deletion doesn't work reliably (see the TODO in assertNoDependencyDanglingReferences()). + /*err = db.Get(&exists, db.Rebind(`SELECT EXISTS(SELECT 1 FROM redundancy_group_state WHERE id = ?)`), g.RedundancyGroupId) + assert.NoErrorf(t, err, "fetching redundancy group state by ID %q", g.RedundancyGroupId) + assert.Falsef(t, exists, "redundancy group state with ID %q should not exist", g.RedundancyGroupId)*/ + } + + return + } + + expectedTotalChildren := g.Children + expectedTotalParents := g.Parents + // Retrieve all children and parents of all the dependency groups of this subtest. + for _, sibling := range dependencyGroupTest.Groups { + for _, child := range sibling.Children { + if !slices.Contains(expectedTotalChildren, child) { + expectedTotalChildren = append(expectedTotalChildren, child) + } + } + + for _, parent := range sibling.Parents { + if !slices.Contains(expectedTotalParents, parent) { + expectedTotalParents = append(expectedTotalParents, parent) + } + } + } + + // Fetch the redundancy group referenced by the children hosts as their parent node (if any). + // If the dependency group is a redundancy group, the redundancy group itself should be the parent node + // and if the dependency serialization is correct, we should find only a single redundancy group. + query, args, err := sqlx.In(`SELECT DISTINCT rg.id, rg.display_name + FROM redundancy_group rg + INNER JOIN dependency_node parent ON parent.redundancy_group_id = rg.id + INNER JOIN dependency_edge tn ON tn.to_node_id = parent.id + INNER JOIN dependency_node child ON child.id = tn.from_node_id + INNER JOIN host child_host ON child_host.id = child.host_id + INNER JOIN redundancy_group_state rgs ON rgs.redundancy_group_id = rg.id + WHERE child_host.name IN (?) AND rg.display_name = ?`, + expectedTotalChildren, g.RedundancyGroupName, + ) + require.NoError(t, err, "expanding SQL IN clause for redundancy groups query") + + var redundancyGroups []struct { + Id types.Binary `db:"id"` + Name string `db:"display_name"` + } + + err = db.Select(&redundancyGroups, db.Rebind(query), args...) + require.NoError(t, err, "fetching redundancy groups") + + if g.IsRedundancyGroup() { + require.Lenf(t, redundancyGroups, 1, "there should be exactly one redundancy group %q", g.RedundancyGroupName) + g.RedundancyGroupId = redundancyGroups[0].Id + assert.Equal(t, g.RedundancyGroupName, redundancyGroups[0].Name, "redundancy group name should match") + } else { + assert.Lenf(t, redundancyGroups, 0, "there should be no redundancy group %q", g.RedundancyGroupName) + } + + type Checkable struct { + NodeId types.Binary + EdgeStateId types.Binary + Id types.Binary `db:"id"` + Name string `db:"name"` + } + + // Perform some basic sanity checks on the hosts and redundancy groups (if any). + query, args, err = sqlx.In( + "SELECT id, name FROM host WHERE name IN (?)", + append(append([]string(nil), expectedTotalParents...), expectedTotalChildren...), + ) + require.NoError(t, err, "expanding SQL IN clause for hosts query") + + hostRows, err := db.Queryx(db.Rebind(query), args...) + require.NoError(t, err, "querying parent and child hosts") + defer hostRows.Close() + + checkables := make(map[string]*Checkable) + for hostRows.Next() { + var c Checkable + require.NoError(t, hostRows.StructScan(&c), "scanning host row") + checkables[c.Name] = &c + + // Retrieve the dependency node and dependency edge state ID of the current host. + assert.NoError(t, db.Get(&c.NodeId, db.Rebind(`SELECT id FROM dependency_node WHERE host_id = ?`), c.Id)) + assert.NotNilf(t, c.NodeId, "host %q should have a dependency node", c.Name) + + if slices.Contains(expectedTotalChildren, c.Name) && g.IsRedundancyGroup() { + assert.NoError(t, db.Get(&c.EdgeStateId, db.Rebind(`SELECT dependency_edge_state_id FROM dependency_edge WHERE to_node_id = ?`), g.RedundancyGroupId)) + assert.NotNilf(t, c.EdgeStateId, "host %q should have a dependency edge state", c.Name) + } + } + assert.NoError(t, hostRows.Err(), "scanned host rows should not have errors") + assert.Len(t, checkables, len(expectedTotalParents)+len(expectedTotalChildren), "all hosts should be in the database") + + type Edge struct { + ParentRedundancyGroupId types.Binary `db:"node_id"` + ParentName string `db:"name"` + FromNodeId types.Binary `db:"from_node_id"` + ToNodeId types.Binary `db:"to_node_id"` + } + + // For all children, retrieve edges to their parent nodes and join information about the parent host or redundancy group. + query, args, err = sqlx.In(`SELECT + redundancy_group.id AS node_id, + COALESCE(parent_host.name, redundancy_group.display_name) AS name, + edge.from_node_id, + edge.to_node_id + FROM dependency_edge edge + INNER JOIN dependency_node parent ON parent.id = edge.to_node_id + INNER JOIN dependency_node child ON child.id = edge.from_node_id + INNER JOIN host child_host ON child_host.id = child.host_id + LEFT JOIN host parent_host ON parent_host.id = parent.host_id + LEFT JOIN redundancy_group ON redundancy_group.id = parent.redundancy_group_id + WHERE child_host.name IN (?)`, + expectedTotalChildren, + ) + require.NoError(t, err, "expanding SQL IN clause for parent nodes query") + + edgesByParentName := make(map[string]*Edge) + dbEdges, err := db.Queryx(db.Rebind(query), args...) + require.NoError(t, err, "querying parent nodes") + defer dbEdges.Close() + + redundancyGroupChildNodeIds := make(map[string]bool) + checkableChildNodeIds := make(map[string]map[string]bool) + for dbEdges.Next() { + var edge Edge + require.NoError(t, dbEdges.StructScan(&edge), "scanning parent node row") + + if g.IsRedundancyGroup() && edge.ParentName == g.RedundancyGroupName { + edgesByParentName[edge.ParentName] = &edge + // Cache the from_node_id of these retrieved parent nodes (this redundancy group), as we need to verify + // that these IDs represent those of the children hosts of this group. + redundancyGroupChildNodeIds[edge.FromNodeId.String()] = true + } else if !g.IsRedundancyGroup() { + edgesByParentName[edge.ParentName] = &edge + // Cache the from_node_id of these retrieved parent nodes (the parent hosts), as we need to + // verify that these IDs represent those of the children hosts of this group. + if _, ok := checkableChildNodeIds[edge.FromNodeId.String()]; !ok { + checkableChildNodeIds[edge.FromNodeId.String()] = make(map[string]bool) + } + checkableChildNodeIds[edge.FromNodeId.String()][edge.ToNodeId.String()] = true + } + } + assert.NoError(t, dbEdges.Err(), "scanned parent node rows should not have errors") + + expectedParentCount := len(expectedTotalParents) + if g.IsRedundancyGroup() { + expectedParentCount = 1 // All the children should have the redundancy group as parent node! + assert.Lenf(t, + redundancyGroupChildNodeIds, + len(expectedTotalChildren), + "children %v should only reference to %q as parent node", + expectedTotalChildren, g.RedundancyGroupName, + ) + + for _, child := range expectedTotalChildren { + h := checkables[child] + require.NotNil(t, h, "child node should be a Checkable") + assert.Truef(t, redundancyGroupChildNodeIds[h.NodeId.String()], "child %q should reference %q as parent node", child, g.RedundancyGroupName) + // The edge state ID of all the children of this redundancy group should be the same as the ID + // of the redundancy group ID, i.e. they all share the same edge state. This is just a duplicate check! + assert.Equalf(t, g.RedundancyGroupId, h.EdgeStateId, "child %q should have the correct edge state", child) + } + } else { + for _, child := range expectedTotalChildren { + h := checkables[child] + require.NotNilf(t, h, "child %q should be a Checkable", child) + + parents := checkableChildNodeIds[h.NodeId.String()] + require.NotNilf(t, parents, "child %q should have parent nodes", child) + assert.Lenf(t, parents, len(expectedTotalParents), "child %q should reference %d parent nodes", child, len(expectedTotalParents)) + + // Verify that the parent nodes of the children hosts are the correct ones. + for _, p := range expectedTotalParents { + parent := checkables[p] + require.NotNilf(t, parent, "parent %q should be an existing Checkable", p) + assert.Truef(t, parents[parent.NodeId.String()], "child %q should reference parent node %q", child, parent) + } + } + } + assert.Len(t, edgesByParentName, expectedParentCount) + + for _, edge := range edgesByParentName { + assert.Truef(t, edge.FromNodeId.Valid(), "parent %q should have a from_node_id set", edge.ParentName) + assert.Truef(t, edge.ToNodeId.Valid(), "parent %q should have a to_node_id set", edge.ParentName) + + if g.IsRedundancyGroup() { + assert.Equal(t, edge.ParentName, g.RedundancyGroupName, "parent node should be the redundancy group itself") + assert.Equal(t, g.RedundancyGroupId, edge.ParentRedundancyGroupId, "parent redundancy group should be the same") + + // Verify whether the connection between the current redundancy group and the parent Checkable is correct. + query := `SELECT from_node_id, to_node_id FROM dependency_edge WHERE from_node_id = ?` + var edges []Edge + assert.NoError(t, db.Select(&edges, db.Rebind(query), g.RedundancyGroupId)) + assert.Lenf(t, edges, len(expectedTotalParents), "redundancy group %q should reference %d parents", g.RedundancyGroupName, len(expectedTotalParents)) + + for _, parent := range expectedTotalParents { + h := checkables[parent] + require.NotNil(t, h, "parent node should be an existing Checkable") + assert.Truef(t, slices.ContainsFunc(edges, func(edge Edge) bool { + return bytes.Equal(h.NodeId, edge.ToNodeId) + }), "redundancy group %q should reference parent node %q", g.RedundancyGroupName, parent) + } + } else { + assert.Falsef(t, edge.ParentRedundancyGroupId.Valid(), "non-redundant parent %q should not reference a redundancy group", edge.ParentName) + assert.Contains(t, expectedTotalParents, edge.ParentName, "should be in the parents list") + assert.NotContains(t, expectedTotalChildren, edge.ParentName, "should not be in the children list") + + parent := checkables[edge.ParentName] + require.NotNil(t, parent, "parent node should be an existing Checkable") + assert.Equal(t, parent.Id, edge.ToNodeId, "parent node should reference the correct Checkable") + } + } +} + +// makeDependencyGroupTestCases generates a set of dependency groups that can be used for testing the dependency sync. +// +// All the parent and child Checkables used within this function are defined in the object_sync_test.conf file. +// Therefore, if you want to add some more dependency groups with new Checkables, you need to add them to the +// object_sync_test.conf file as well. +// +// Note: Due to how the dependency groups verification works, a given Checkable should only be part of a single +// test case. This is necessary because when performing the actual tests, it is assumed that the children within +// a given subtest will only reference the parent nodes of that subtest. Otherwise, the verification process will +// fail, if a Checkable references parent nodes from another subtest. +func makeDependencyGroupTestCases() []DependencyGroupTestCases { + return []DependencyGroupTestCases{ + { + TestName: "Simple Parent-Child1", + Groups: []*DependencyGroup{ + { + Parents: []string{"HostA"}, + Children: []string{"HostB"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + TimePeriod: "never-ever", + }, + }, + }, + { + TestName: "Simple Parent-Child2", + Groups: []*DependencyGroup{ + { + Parents: []string{"HostB"}, + Children: []string{"HostC"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: false, + }, + }, + }, + { + TestName: "Simple Redundancy Group", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "LDAP", + Parents: []string{"HostD", "HostE", "HostF"}, + Children: []string{"HostG", "HostH", "HostI"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + }, + }, + }, + { + TestName: "Redundancy Group SQL Server1", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "SQL Server", + Parents: []string{"HostF"}, + Children: []string{"HostJ"}, + StatesFilter: []string{"Up"}, + IgnoreSoftStates: false, + TimePeriod: "never-ever", + }, + }, + }, + { + // This redundancy group is named exactly the same as the above one and has the same parent, + // but applies to a different child and has also different dependency configuration. So, this + // should not be merged into the above group even though they have the same name. + TestName: "Redundancy Group SQL Server2", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "SQL Server", + Parents: []string{"HostF"}, + Children: []string{"HostK"}, + StatesFilter: []string{"Up"}, + IgnoreSoftStates: true, + TimePeriod: "never-ever", + }, + }, + }, + { + // Test non-redundant dependency groups deduplication, i.e. different dependencies with the same + // parent and child but maybe different states filter, time period, etc. should be merged into a + // single edge for each parent-child pair. + TestName: "Dependency Deduplication", + Groups: []*DependencyGroup{ + { + Parents: []string{"HostA"}, + Children: []string{"HostL", "HostM", "HostN"}, + StatesFilter: []string{"Up"}, + IgnoreSoftStates: true, + }, + { + Parents: []string{"HostA"}, + Children: []string{"HostL", "HostM", "HostN"}, + StatesFilter: []string{"Down"}, + IgnoreSoftStates: false, + }, + { + Parents: []string{"HostA"}, + Children: []string{"HostL", "HostM", "HostN"}, + StatesFilter: []string{"Up"}, + IgnoreSoftStates: false, + TimePeriod: "workhours", + }, + { + Parents: []string{"HostA"}, + Children: []string{"HostL", "HostM", "HostN"}, + StatesFilter: []string{"Down", "Up"}, // <-- This is the difference to the above group. + IgnoreSoftStates: false, + TimePeriod: "workhours", + }, + { // Just for more variety :), replicate the above group as-is. + Parents: []string{"HostA"}, + Children: []string{"HostL", "HostM", "HostN"}, + StatesFilter: []string{"Down", "Up"}, + IgnoreSoftStates: false, + TimePeriod: "workhours", + }, + }, + }, + { + // Test basic redundancy group deduplication, i.e. different dependencies with the same parent, + // children and all other relevant properties should be merged into a single redundancy group. + TestName: "Redundancy Group Replica", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "DNS", + Parents: []string{"HostA", "HostB"}, + Children: []string{"HostO", "HostP", "HostQ"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + }, + { + RedundancyGroupName: "DNS", + Parents: []string{"HostA", "HostB"}, + Children: []string{"HostO"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + }, + { + RedundancyGroupName: "DNS", + Parents: []string{"HostA", "HostB"}, + Children: []string{"HostP"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + }, + { + RedundancyGroupName: "DNS", + Parents: []string{"HostA", "HostB"}, + Children: []string{"HostQ"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + }, + }, + }, + { + // This test case is similar to the above one but with a slight difference in the children list and + // the dependencies aren't an exact replica of each other, but they should still be merged into a single + // redundancy group. Even though all of them have kind of different dependency config, they apply to the + // same children, thus they should form a single redundancy group. + TestName: "Redundancy Group Deduplication1", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "LDAP", + Parents: []string{"HostC"}, + Children: []string{"HostR", "HostS"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + }, + { + RedundancyGroupName: "LDAP", + Parents: []string{"HostD"}, + Children: []string{"HostR", "HostS"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: false, + }, + { + RedundancyGroupName: "LDAP", + Parents: []string{"HostE"}, + Children: []string{"HostR", "HostS"}, + StatesFilter: []string{"Down"}, + IgnoreSoftStates: true, + TimePeriod: "never-ever", + }, + }, + }, + { + // Same as the above test case but with a different redundancy group and children. + TestName: "Redundancy Group Deduplication2", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "Web Servers", + Parents: []string{"HostC", "HostD"}, + Children: []string{"HostT", "HostU", "HostV", "HostW"}, + StatesFilter: []string{"Up", "Down"}, + TimePeriod: "workhours", + }, + { + RedundancyGroupName: "Web Servers", + Parents: []string{"HostC", "HostD"}, + Children: []string{"HostT", "HostU", "HostV", "HostW"}, + StatesFilter: []string{"Up", "Down"}, + IgnoreSoftStates: true, + TimePeriod: "never-ever", + }, + { + RedundancyGroupName: "Web Servers", + Parents: []string{"HostE", "HostF", "HostG"}, + Children: []string{"HostT", "HostU", "HostV", "HostW"}, + StatesFilter: []string{"Down"}, + IgnoreSoftStates: true, + }, + }, + }, + } +} + +// makeRuntimeDependencyGroupTestCases generates a set of dependency groups that can be used +// for testing the runtime dependency creation and deletion. +func makeRuntimeDependencyGroupTestCases() []DependencyGroupTestCases { + return []DependencyGroupTestCases{ + { + TestName: "Mail Servers Runtime", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "Mail Servers", + Parents: []string{"HostT", "HostU", "HostV"}, // HostT, HostU, HostV already exist + Children: []string{"Child1", "Child2", "Child3"}, + StatesFilter: []string{"Down"}, + IgnoreSoftStates: true, + }, + }, + }, + { + TestName: "Web Servers Runtime Deduplication", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "Web Servers", + Parents: []string{"HostA", "HostB", "HostC"}, // HostA, HostB, HostC already exist + Children: []string{"Child4", "Child5", "Child6"}, + StatesFilter: []string{"Down"}, + }, + { + RedundancyGroupName: "Web Servers", + Parents: []string{"HostD", "HostE", "HostF"}, // HostD, HostE, HostF already exist + Children: []string{"Child4", "Child5", "Child6"}, + StatesFilter: []string{"Up"}, + }, + }, + }, + { + TestName: "Non Redundant Runtime Deduplication", + Groups: []*DependencyGroup{ + { + Parents: []string{"HostG", "HostH", "HostI"}, // HostG, HostH, HostI already exist + Children: []string{"Child7", "Child8", "Child9"}, // These children are new + StatesFilter: []string{"Down"}, + }, + { + Parents: []string{"HostG", "HostH", "HostI"}, + Children: []string{"Child7", "Child8", "Child9"}, + IgnoreSoftStates: true, + TimePeriod: "workhours", // workhours already exist + }, + }, + }, + { + TestName: "Redundancy Group With New Parents", + Groups: []*DependencyGroup{ + { + RedundancyGroupName: "Redundancy Group With New Parents", + Parents: []string{"Parent1", "Parent2", "Parent3"}, // Parent1, Parent2, Parent3 do not exist + Children: []string{"Child10", "Child11", "Child12"}, + HasNewParents: true, + }, + }, + }, + } +} + // writeIcinga2ConfigObjects emits config objects as icinga2 DSL to a writer // based on the type of obj and its field having icinga2 struct tags. func writeIcinga2ConfigObject(w io.Writer, obj interface{}) error { diff --git a/tests/sql/sla_test.go b/tests/sql/sla_test.go index de2bace2e..ad5882b72 100644 --- a/tests/sql/sla_test.go +++ b/tests/sql/sla_test.go @@ -5,6 +5,7 @@ import ( "database/sql/driver" "fmt" "github.com/go-sql-driver/mysql" + "github.com/icinga/icinga-go-library/types" "github.com/jmoiron/sqlx" "github.com/lib/pq" "github.com/stretchr/testify/assert" @@ -366,27 +367,29 @@ func (c *CurrentState) WriteSlaEventToDatabase(db *sqlx.DB, m *SlaHistoryMeta) e type values struct { *SlaHistoryMeta State uint8 `db:"state"` + AffectsChildren types.Bool `db:"affects_children"` PropertiesChecksum NullableBytes `db:"properties_checksum"` } v := values{ SlaHistoryMeta: m, State: c.State, + AffectsChildren: types.Bool{Bool: false, Valid: true}, PropertiesChecksum: make([]byte, 20), } if len(m.ServiceId) == 0 { _, err := db.NamedExec("INSERT INTO host_state"+ " (id, host_id, environment_id, properties_checksum, soft_state, previous_soft_state,"+ - " hard_state, previous_hard_state, check_attempt, severity, last_state_change, next_check, next_update)"+ - " VALUES (:host_id, :host_id, :environment_id, :properties_checksum, :state, :state, :state, :state, 0, 0, 0, 0, 0)", + " hard_state, previous_hard_state, affects_children, check_attempt, severity, last_state_change, next_check, next_update)"+ + " VALUES (:host_id, :host_id, :environment_id, :properties_checksum, :state, :state, :state, :state, :affects_children, 0, 0, 0, 0, 0)", &v) return err } else { _, err := db.NamedExec("INSERT INTO service_state"+ " (id, host_id, service_id, environment_id, properties_checksum, soft_state, previous_soft_state,"+ - " hard_state, previous_hard_state, check_attempt, severity, last_state_change, next_check, next_update)"+ - " VALUES (:service_id, :host_id, :service_id, :environment_id, :properties_checksum, :state, :state, :state, :state, 0, 0, 0, 0, 0)", + " hard_state, previous_hard_state, affects_children, check_attempt, severity, last_state_change, next_check, next_update)"+ + " VALUES (:service_id, :host_id, :service_id, :environment_id, :properties_checksum, :state, :state, :state, :state, :affects_children, 0, 0, 0, 0, 0)", &v) return err }