Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions migrations/074_fct_node_host_spec.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_host_spec ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_host_spec_local ON CLUSTER '{cluster}';
53 changes: 53 additions & 0 deletions migrations/074_fct_node_host_spec.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
-- Time-history fact table for node host hardware specifications
CREATE TABLE `${NETWORK_NAME}`.fct_node_host_spec_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`wallclock_slot_start_date_time` DateTime64(3) COMMENT 'The wall clock time when the slot started' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`host_id` String COMMENT 'Unique host identifier' CODEC(ZSTD(1)),
`kernel_release` LowCardinality(String) COMMENT 'OS kernel release version',
`os_name` LowCardinality(String) COMMENT 'Operating system name',
`architecture` LowCardinality(String) COMMENT 'CPU architecture (e.g. x86_64, aarch64)',
`cpu_model` String COMMENT 'CPU model name' CODEC(ZSTD(1)),
`cpu_vendor` LowCardinality(String) COMMENT 'CPU vendor (e.g. GenuineIntel, AuthenticAMD)',
`cpu_online_cores` UInt16 COMMENT 'Number of online CPU cores' CODEC(ZSTD(1)),
`cpu_logical_cores` UInt16 COMMENT 'Number of logical CPU cores' CODEC(ZSTD(1)),
`cpu_physical_cores` UInt16 COMMENT 'Number of physical CPU cores' CODEC(ZSTD(1)),
`cpu_performance_cores` UInt16 COMMENT 'Number of performance cores (hybrid CPUs)' CODEC(ZSTD(1)),
`cpu_efficiency_cores` UInt16 COMMENT 'Number of efficiency cores (hybrid CPUs)' CODEC(ZSTD(1)),
`cpu_unknown_type_cores` UInt16 COMMENT 'Number of cores with unknown type' CODEC(ZSTD(1)),
`cpu_core_types` Array(UInt8) COMMENT 'Core type identifiers per core',
`cpu_core_type_labels` Array(LowCardinality(String)) COMMENT 'Core type labels per core',
`cpu_max_freq_khz` Array(UInt64) COMMENT 'Maximum frequency per core in kHz',
`cpu_base_freq_khz` Array(UInt64) COMMENT 'Base frequency per core in kHz',
`memory_total_bytes` UInt64 COMMENT 'Total system memory in bytes' CODEC(ZSTD(1)),
`memory_type` LowCardinality(String) COMMENT 'Memory type (e.g. DDR4, DDR5)',
`memory_speed_mts` UInt32 COMMENT 'Memory speed in MT/s' CODEC(ZSTD(1)),
`memory_dimm_count` UInt16 COMMENT 'Number of memory DIMMs' CODEC(ZSTD(1)),
`memory_dimm_sizes_bytes` Array(UInt64) COMMENT 'Size of each DIMM in bytes',
`memory_dimm_types` Array(LowCardinality(String)) COMMENT 'Type of each DIMM',
`memory_dimm_speeds_mts` Array(UInt32) COMMENT 'Speed of each DIMM in MT/s',
`disk_count` UInt16 COMMENT 'Number of disk devices' CODEC(ZSTD(1)),
`disk_total_bytes` UInt64 COMMENT 'Total disk capacity in bytes' CODEC(ZSTD(1)),
`disk_names` Array(String) COMMENT 'Device names of each disk',
`disk_models` Array(String) COMMENT 'Model names of each disk',
`disk_sizes_bytes` Array(UInt64) COMMENT 'Size of each disk in bytes',
`disk_rotational` Array(UInt8) COMMENT 'Whether each disk is rotational (1) or SSD (0)'
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toStartOfMonth(wallclock_slot_start_date_time)
ORDER BY
(wallclock_slot_start_date_time, meta_client_name)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Node host hardware specifications over time with node classification';

CREATE TABLE `${NETWORK_NAME}`.fct_node_host_spec ON CLUSTER '{cluster}' AS `${NETWORK_NAME}`.fct_node_host_spec_local ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_host_spec_local,
cityHash64(wallclock_slot_start_date_time, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/075_fct_node_cpu_utilization_hourly.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_cpu_utilization_hourly ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_cpu_utilization_hourly_local ON CLUSTER '{cluster}';
32 changes: 32 additions & 0 deletions migrations/075_fct_node_cpu_utilization_hourly.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
-- Hourly aggregation of node CPU utilization across all processes
CREATE TABLE `${NETWORK_NAME}`.fct_node_cpu_utilization_hourly_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`hour_start_date_time` DateTime COMMENT 'Start of the hour period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`system_cores` UInt16 COMMENT 'Total system CPU cores' CODEC(ZSTD(1)),
`slot_count` UInt32 COMMENT 'Number of slots in this hour' CODEC(ZSTD(1)),
`avg_core_pct` Float32 COMMENT 'Average total CPU core utilization percentage' CODEC(ZSTD(1)),
`min_core_pct` Float32 COMMENT 'Minimum total CPU core utilization percentage' CODEC(ZSTD(1)),
`max_core_pct` Float32 COMMENT 'Maximum total CPU core utilization percentage' CODEC(ZSTD(1)),
`p50_core_pct` Float32 COMMENT '50th percentile total CPU core utilization' CODEC(ZSTD(1)),
`p95_core_pct` Float32 COMMENT '95th percentile total CPU core utilization' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toStartOfMonth(hour_start_date_time)
ORDER BY (hour_start_date_time, meta_client_name)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Hourly aggregated node CPU utilization statistics per node';

CREATE TABLE `${NETWORK_NAME}`.fct_node_cpu_utilization_hourly ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_cpu_utilization_hourly_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_cpu_utilization_hourly_local,
cityHash64(hour_start_date_time, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/076_fct_node_memory_usage_hourly.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_memory_usage_hourly ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_memory_usage_hourly_local ON CLUSTER '{cluster}';
32 changes: 32 additions & 0 deletions migrations/076_fct_node_memory_usage_hourly.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
-- Hourly aggregation of node memory usage across all processes
CREATE TABLE `${NETWORK_NAME}`.fct_node_memory_usage_hourly_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`hour_start_date_time` DateTime COMMENT 'Start of the hour period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`slot_count` UInt32 COMMENT 'Number of slots in this hour' CODEC(ZSTD(1)),
`avg_vm_rss_bytes` UInt64 COMMENT 'Average total RSS memory in bytes' CODEC(ZSTD(1)),
`min_vm_rss_bytes` UInt64 COMMENT 'Minimum total RSS memory in bytes' CODEC(ZSTD(1)),
`max_vm_rss_bytes` UInt64 COMMENT 'Maximum total RSS memory in bytes' CODEC(ZSTD(1)),
`avg_rss_anon_bytes` UInt64 COMMENT 'Average total anonymous RSS memory in bytes' CODEC(ZSTD(1)),
`avg_rss_file_bytes` UInt64 COMMENT 'Average total file-backed RSS memory in bytes' CODEC(ZSTD(1)),
`avg_vm_swap_bytes` UInt64 COMMENT 'Average total swap memory in bytes' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toStartOfMonth(hour_start_date_time)
ORDER BY (hour_start_date_time, meta_client_name)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Hourly aggregated node memory usage statistics per node';

CREATE TABLE `${NETWORK_NAME}`.fct_node_memory_usage_hourly ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_memory_usage_hourly_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_memory_usage_hourly_local,
cityHash64(hour_start_date_time, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/077_fct_node_disk_io_hourly.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_disk_io_hourly ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_disk_io_hourly_local ON CLUSTER '{cluster}';
31 changes: 31 additions & 0 deletions migrations/077_fct_node_disk_io_hourly.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
-- Hourly aggregation of node disk I/O across all processes
CREATE TABLE `${NETWORK_NAME}`.fct_node_disk_io_hourly_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`hour_start_date_time` DateTime COMMENT 'Start of the hour period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`rw` LowCardinality(String) COMMENT 'Read or write operation',
`slot_count` UInt32 COMMENT 'Number of slots in this hour' CODEC(ZSTD(1)),
`sum_io_bytes` Float64 COMMENT 'Total bytes transferred in this hour' CODEC(ZSTD(1)),
`avg_io_bytes` Float32 COMMENT 'Average bytes transferred per slot' CODEC(ZSTD(1)),
`sum_io_ops` UInt64 COMMENT 'Total I/O operations in this hour' CODEC(ZSTD(1)),
`avg_io_ops` UInt32 COMMENT 'Average I/O operations per slot' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toStartOfMonth(hour_start_date_time)
ORDER BY (hour_start_date_time, meta_client_name, rw)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Hourly aggregated node disk I/O statistics per node and read/write direction';

CREATE TABLE `${NETWORK_NAME}`.fct_node_disk_io_hourly ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_disk_io_hourly_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_disk_io_hourly_local,
cityHash64(hour_start_date_time, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/078_fct_node_network_io_hourly.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_network_io_hourly ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_network_io_hourly_local ON CLUSTER '{cluster}';
32 changes: 32 additions & 0 deletions migrations/078_fct_node_network_io_hourly.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
-- Hourly aggregation of node network I/O across all processes
CREATE TABLE `${NETWORK_NAME}`.fct_node_network_io_hourly_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`hour_start_date_time` DateTime COMMENT 'Start of the hour period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`port_label` LowCardinality(String) COMMENT 'Port classification (e.g. cl_p2p_tcp, el_json_rpc)',
`direction` LowCardinality(String) COMMENT 'Traffic direction: tx or rx',
`slot_count` UInt32 COMMENT 'Number of slots in this hour' CODEC(ZSTD(1)),
`sum_io_bytes` Float64 COMMENT 'Total bytes transferred in this hour' CODEC(ZSTD(1)),
`avg_io_bytes` Float32 COMMENT 'Average bytes transferred per slot' CODEC(ZSTD(1)),
`sum_io_count` UInt64 COMMENT 'Total packet count in this hour' CODEC(ZSTD(1)),
`avg_io_count` UInt32 COMMENT 'Average packet count per slot' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toStartOfMonth(hour_start_date_time)
ORDER BY (hour_start_date_time, meta_client_name, port_label, direction)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Hourly aggregated node network I/O statistics per node, port, and direction';

CREATE TABLE `${NETWORK_NAME}`.fct_node_network_io_hourly ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_network_io_hourly_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_network_io_hourly_local,
cityHash64(hour_start_date_time, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/079_fct_node_cpu_utilization_daily.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_cpu_utilization_daily ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_cpu_utilization_daily_local ON CLUSTER '{cluster}';
32 changes: 32 additions & 0 deletions migrations/079_fct_node_cpu_utilization_daily.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
-- Daily aggregation of node CPU utilization
CREATE TABLE `${NETWORK_NAME}`.fct_node_cpu_utilization_daily_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`day_start_date` Date COMMENT 'Start of the day period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`system_cores` UInt16 COMMENT 'Total system CPU cores' CODEC(ZSTD(1)),
`hour_count` UInt32 COMMENT 'Number of source hourly slots in this day' CODEC(ZSTD(1)),
`avg_core_pct` Float32 COMMENT 'Weighted average total CPU core utilization percentage' CODEC(ZSTD(1)),
`min_core_pct` Float32 COMMENT 'Minimum total CPU core utilization percentage' CODEC(ZSTD(1)),
`max_core_pct` Float32 COMMENT 'Maximum total CPU core utilization percentage' CODEC(ZSTD(1)),
`p50_core_pct` Float32 COMMENT 'Weighted 50th percentile total CPU core utilization' CODEC(ZSTD(1)),
`p95_core_pct` Float32 COMMENT 'Maximum of hourly 95th percentile CPU utilization' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toYYYYMM(day_start_date)
ORDER BY (day_start_date, meta_client_name)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Daily aggregated node CPU utilization statistics per node';

CREATE TABLE `${NETWORK_NAME}`.fct_node_cpu_utilization_daily ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_cpu_utilization_daily_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_cpu_utilization_daily_local,
cityHash64(day_start_date, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/080_fct_node_memory_usage_daily.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_memory_usage_daily ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_memory_usage_daily_local ON CLUSTER '{cluster}';
32 changes: 32 additions & 0 deletions migrations/080_fct_node_memory_usage_daily.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
-- Daily aggregation of node memory usage
CREATE TABLE `${NETWORK_NAME}`.fct_node_memory_usage_daily_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`day_start_date` Date COMMENT 'Start of the day period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`hour_count` UInt32 COMMENT 'Number of source hourly slots in this day' CODEC(ZSTD(1)),
`avg_vm_rss_bytes` UInt64 COMMENT 'Weighted average total RSS memory in bytes' CODEC(ZSTD(1)),
`min_vm_rss_bytes` UInt64 COMMENT 'Minimum total RSS memory in bytes' CODEC(ZSTD(1)),
`max_vm_rss_bytes` UInt64 COMMENT 'Maximum total RSS memory in bytes' CODEC(ZSTD(1)),
`avg_rss_anon_bytes` UInt64 COMMENT 'Weighted average total anonymous RSS memory in bytes' CODEC(ZSTD(1)),
`avg_rss_file_bytes` UInt64 COMMENT 'Weighted average total file-backed RSS memory in bytes' CODEC(ZSTD(1)),
`avg_vm_swap_bytes` UInt64 COMMENT 'Weighted average total swap memory in bytes' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toYYYYMM(day_start_date)
ORDER BY (day_start_date, meta_client_name)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Daily aggregated node memory usage statistics per node';

CREATE TABLE `${NETWORK_NAME}`.fct_node_memory_usage_daily ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_memory_usage_daily_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_memory_usage_daily_local,
cityHash64(day_start_date, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/081_fct_node_disk_io_daily.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_disk_io_daily ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_disk_io_daily_local ON CLUSTER '{cluster}';
31 changes: 31 additions & 0 deletions migrations/081_fct_node_disk_io_daily.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
-- Daily aggregation of node disk I/O
CREATE TABLE `${NETWORK_NAME}`.fct_node_disk_io_daily_local ON CLUSTER '{cluster}' (
`updated_date_time` DateTime COMMENT 'Timestamp when the record was last updated' CODEC(DoubleDelta, ZSTD(1)),
`day_start_date` Date COMMENT 'Start of the day period' CODEC(DoubleDelta, ZSTD(1)),
`meta_client_name` LowCardinality(String) COMMENT 'Name of the observoor client that collected the data',
`meta_network_name` LowCardinality(String) COMMENT 'Ethereum network name',
`node_class` LowCardinality(String) COMMENT 'Node classification for filtering (e.g. eip7870)',
`rw` LowCardinality(String) COMMENT 'Read or write operation',
`hour_count` UInt32 COMMENT 'Number of source hourly slots in this day' CODEC(ZSTD(1)),
`sum_io_bytes` Float64 COMMENT 'Total bytes transferred in this day' CODEC(ZSTD(1)),
`avg_io_bytes` Float32 COMMENT 'Weighted average bytes transferred per slot' CODEC(ZSTD(1)),
`sum_io_ops` UInt64 COMMENT 'Total I/O operations in this day' CODEC(ZSTD(1)),
`avg_io_ops` UInt32 COMMENT 'Weighted average I/O operations per slot' CODEC(ZSTD(1))
) ENGINE = ReplicatedReplacingMergeTree(
'/clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}',
'{replica}',
`updated_date_time`
) PARTITION BY toYYYYMM(day_start_date)
ORDER BY (day_start_date, meta_client_name, rw)
SETTINGS
deduplicate_merge_projection_mode = 'rebuild'
COMMENT 'Daily aggregated node disk I/O statistics per node and read/write direction';

CREATE TABLE `${NETWORK_NAME}`.fct_node_disk_io_daily ON CLUSTER '{cluster}'
AS `${NETWORK_NAME}`.fct_node_disk_io_daily_local
ENGINE = Distributed(
'{cluster}',
'${NETWORK_NAME}',
fct_node_disk_io_daily_local,
cityHash64(day_start_date, meta_client_name)
);
2 changes: 2 additions & 0 deletions migrations/082_fct_node_network_io_daily.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_network_io_daily ON CLUSTER '{cluster}';
DROP TABLE IF EXISTS `${NETWORK_NAME}`.fct_node_network_io_daily_local ON CLUSTER '{cluster}';
Loading
Loading