13
13
from src .metrics .prometheus .duration_meter import duration_meter
14
14
from src .modules .csm .checkpoint import FrameCheckpointProcessor , FrameCheckpointsIterator , MinStepIsNotReached
15
15
from src .modules .csm .log import FramePerfLog
16
- from src .modules .csm .state import State
16
+ from src .modules .csm .state import State , Frame
17
17
from src .modules .csm .tree import Tree
18
18
from src .modules .csm .types import ReportData , Shares
19
19
from src .modules .submodules .consensus import ConsensusModule
29
29
SlotNumber ,
30
30
StakingModuleAddress ,
31
31
StakingModuleId ,
32
+ ValidatorIndex ,
32
33
)
33
34
from src .utils .blockstamp import build_blockstamp
34
35
from src .utils .cache import global_lru_cache as lru_cache
35
- from src .utils .slot import get_next_non_missed_slot
36
+ from src .utils .slot import get_next_non_missed_slot , get_reference_blockstamp
36
37
from src .utils .web3converter import Web3Converter
37
38
from src .web3py .extensions .lido_validators import NodeOperatorId , StakingModule , ValidatorsByNodeOperator
38
39
from src .web3py .types import Web3
@@ -101,12 +102,12 @@ def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple:
101
102
if (prev_cid is None ) != (prev_root == ZERO_HASH ):
102
103
raise InconsistentData (f"Got inconsistent previous tree data: { prev_root = } { prev_cid = } " )
103
104
104
- distributed , shares , log = self .calculate_distribution (blockstamp )
105
+ distributed , shares , logs = self .calculate_distribution (blockstamp )
105
106
106
107
if distributed != sum (shares .values ()):
107
108
raise InconsistentData (f"Invalid distribution: { sum (shares .values ())= } != { distributed = } " )
108
109
109
- log_cid = self .publish_log (log )
110
+ log_cid = self .publish_log (logs )
110
111
111
112
if not distributed and not shares :
112
113
logger .info ({"msg" : "No shares distributed in the current frame" })
@@ -201,7 +202,7 @@ def collect_data(self, blockstamp: BlockStamp) -> bool:
201
202
logger .info ({"msg" : "The starting epoch of the frame is not finalized yet" })
202
203
return False
203
204
204
- self .state .migrate (l_epoch , r_epoch , consensus_version )
205
+ self .state .init_or_migrate (l_epoch , r_epoch , converter . frame_config . epochs_per_frame , consensus_version )
205
206
self .state .log_progress ()
206
207
207
208
if self .state .is_fulfilled :
@@ -227,25 +228,64 @@ def collect_data(self, blockstamp: BlockStamp) -> bool:
227
228
228
229
def calculate_distribution (
229
230
self , blockstamp : ReferenceBlockStamp
230
- ) -> tuple [int , defaultdict [NodeOperatorId , int ], FramePerfLog ]:
231
+ ) -> tuple [int , defaultdict [NodeOperatorId , int ], list [ FramePerfLog ] ]:
231
232
"""Computes distribution of fee shares at the given timestamp"""
232
-
233
- network_avg_perf = self .state .get_network_aggr ().perf
234
- threshold = network_avg_perf - self .w3 .csm .oracle .perf_leeway_bp (blockstamp .block_hash ) / TOTAL_BASIS_POINTS
235
233
operators_to_validators = self .module_validators_by_node_operators (blockstamp )
236
234
235
+ distributed = 0
236
+ # Calculate share of each CSM node operator.
237
+ shares = defaultdict [NodeOperatorId , int ](int )
238
+ logs : list [FramePerfLog ] = []
239
+
240
+ for frame in self .state .data :
241
+ from_epoch , to_epoch = frame
242
+ logger .info ({"msg" : f"Calculating distribution for frame [{ from_epoch } ;{ to_epoch } ]" })
243
+ frame_blockstamp = blockstamp
244
+ if to_epoch != blockstamp .ref_epoch :
245
+ frame_blockstamp = self ._get_ref_blockstamp_for_frame (blockstamp , to_epoch )
246
+ distributed_in_frame , shares_in_frame , log = self ._calculate_distribution_in_frame (
247
+ frame_blockstamp , operators_to_validators , frame , distributed
248
+ )
249
+ distributed += distributed_in_frame
250
+ for no_id , share in shares_in_frame .items ():
251
+ shares [no_id ] += share
252
+ logs .append (log )
253
+
254
+ return distributed , shares , logs
255
+
256
+ def _get_ref_blockstamp_for_frame (
257
+ self , blockstamp : ReferenceBlockStamp , frame_ref_epoch : EpochNumber
258
+ ) -> ReferenceBlockStamp :
259
+ converter = self .converter (blockstamp )
260
+ return get_reference_blockstamp (
261
+ cc = self .w3 .cc ,
262
+ ref_slot = converter .get_epoch_last_slot (frame_ref_epoch ),
263
+ ref_epoch = frame_ref_epoch ,
264
+ last_finalized_slot_number = blockstamp .slot_number ,
265
+ )
266
+
267
+ def _calculate_distribution_in_frame (
268
+ self ,
269
+ blockstamp : ReferenceBlockStamp ,
270
+ operators_to_validators : ValidatorsByNodeOperator ,
271
+ frame : Frame ,
272
+ distributed : int ,
273
+ ):
274
+ network_perf = self .state .get_network_aggr (frame ).perf
275
+ threshold = network_perf - self .w3 .csm .oracle .perf_leeway_bp (blockstamp .block_hash ) / TOTAL_BASIS_POINTS
276
+
237
277
# Build the map of the current distribution operators.
238
278
distribution : dict [NodeOperatorId , int ] = defaultdict (int )
239
279
stuck_operators = self .stuck_operators (blockstamp )
240
- log = FramePerfLog (blockstamp , self . state . frame , threshold )
280
+ log = FramePerfLog (blockstamp , frame , threshold )
241
281
242
282
for (_ , no_id ), validators in operators_to_validators .items ():
243
283
if no_id in stuck_operators :
244
284
log .operators [no_id ].stuck = True
245
285
continue
246
286
247
287
for v in validators :
248
- aggr = self .state .data .get (v .index )
288
+ aggr = self .state .data [ frame ] .get (ValidatorIndex ( int ( v .index )) )
249
289
250
290
if aggr is None :
251
291
# It's possible that the validator is not assigned to any duty, hence it's performance
@@ -268,13 +308,12 @@ def calculate_distribution(
268
308
# Calculate share of each CSM node operator.
269
309
shares = defaultdict [NodeOperatorId , int ](int )
270
310
total = sum (p for p in distribution .values ())
311
+ to_distribute = self .w3 .csm .fee_distributor .shares_to_distribute (blockstamp .block_hash ) - distributed
312
+ log .distributable = to_distribute
271
313
272
314
if not total :
273
315
return 0 , shares , log
274
316
275
- to_distribute = self .w3 .csm .fee_distributor .shares_to_distribute (blockstamp .block_hash )
276
- log .distributable = to_distribute
277
-
278
317
for no_id , no_share in distribution .items ():
279
318
if no_share :
280
319
shares [no_id ] = to_distribute * no_share // total
@@ -348,9 +387,9 @@ def publish_tree(self, tree: Tree) -> CID:
348
387
logger .info ({"msg" : "Tree dump uploaded to IPFS" , "cid" : repr (tree_cid )})
349
388
return tree_cid
350
389
351
- def publish_log (self , log : FramePerfLog ) -> CID :
352
- log_cid = self .w3 .ipfs .publish (log .encode ())
353
- logger .info ({"msg" : "Frame log uploaded to IPFS" , "cid" : repr (log_cid )})
390
+ def publish_log (self , logs : list [ FramePerfLog ] ) -> CID :
391
+ log_cid = self .w3 .ipfs .publish (FramePerfLog .encode (logs ))
392
+ logger .info ({"msg" : "Frame(s) log uploaded to IPFS" , "cid" : repr (log_cid )})
354
393
return log_cid
355
394
356
395
@lru_cache (maxsize = 1 )
0 commit comments