12
12
)
13
13
from src .metrics .prometheus .duration_meter import duration_meter
14
14
from src .modules .csm .checkpoint import FrameCheckpointProcessor , FrameCheckpointsIterator , MinStepIsNotReached
15
- from src .modules .csm .log import FramePerfLog
16
- from src .modules .csm .state import State , Frame
15
+ from src .modules .csm .log import FramePerfLog , OperatorFrameSummary
16
+ from src .modules .csm .state import State , Frame , AttestationsAccumulator
17
17
from src .modules .csm .tree import Tree
18
18
from src .modules .csm .types import ReportData , Shares
19
19
from src .modules .submodules .consensus import ConsensusModule
29
29
SlotNumber ,
30
30
StakingModuleAddress ,
31
31
StakingModuleId ,
32
- ValidatorIndex ,
33
32
)
34
33
from src .utils .blockstamp import build_blockstamp
35
34
from src .utils .cache import global_lru_cache as lru_cache
36
35
from src .utils .slot import get_next_non_missed_slot , get_reference_blockstamp
37
36
from src .utils .web3converter import Web3Converter
38
- from src .web3py .extensions .lido_validators import NodeOperatorId , StakingModule , ValidatorsByNodeOperator
37
+ from src .web3py .extensions .lido_validators import NodeOperatorId , StakingModule , ValidatorsByNodeOperator , LidoValidator
39
38
from src .web3py .types import Web3
40
39
41
40
logger = logging .getLogger (__name__ )
@@ -102,15 +101,15 @@ def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple:
102
101
if (prev_cid is None ) != (prev_root == ZERO_HASH ):
103
102
raise InconsistentData (f"Got inconsistent previous tree data: { prev_root = } { prev_cid = } " )
104
103
105
- distributed , shares , logs = self .calculate_distribution (blockstamp )
104
+ total_distributed , total_rewards , logs = self .calculate_distribution (blockstamp )
106
105
107
- if distributed != sum (shares .values ()):
108
- raise InconsistentData (f"Invalid distribution: { sum (shares .values ())= } != { distributed = } " )
106
+ if total_distributed != sum (total_rewards .values ()):
107
+ raise InconsistentData (f"Invalid distribution: { sum (total_rewards .values ())= } != { total_distributed = } " )
109
108
110
109
log_cid = self .publish_log (logs )
111
110
112
- if not distributed and not shares :
113
- logger .info ({"msg" : "No shares distributed in the current frame" })
111
+ if not total_distributed and not total_rewards :
112
+ logger .info ({"msg" : "No rewards distributed in the current frame" })
114
113
return ReportData (
115
114
self .get_consensus_version (blockstamp ),
116
115
blockstamp .ref_slot ,
@@ -123,11 +122,11 @@ def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple:
123
122
if prev_cid and prev_root != ZERO_HASH :
124
123
# Update cumulative amount of shares for all operators.
125
124
for no_id , acc_shares in self .get_accumulated_shares (prev_cid , prev_root ):
126
- shares [no_id ] += acc_shares
125
+ total_rewards [no_id ] += acc_shares
127
126
else :
128
127
logger .info ({"msg" : "No previous distribution. Nothing to accumulate" })
129
128
130
- tree = self .make_tree (shares )
129
+ tree = self .make_tree (total_rewards )
131
130
tree_cid = self .publish_tree (tree )
132
131
133
132
return ReportData (
@@ -136,7 +135,7 @@ def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple:
136
135
tree_root = tree .root ,
137
136
tree_cid = tree_cid ,
138
137
log_cid = log_cid ,
139
- distributed = distributed ,
138
+ distributed = total_distributed ,
140
139
).as_tuple ()
141
140
142
141
def is_main_data_submitted (self , blockstamp : BlockStamp ) -> bool :
@@ -232,26 +231,36 @@ def calculate_distribution(
232
231
"""Computes distribution of fee shares at the given timestamp"""
233
232
operators_to_validators = self .module_validators_by_node_operators (blockstamp )
234
233
235
- distributed = 0
236
- # Calculate share of each CSM node operator.
237
- shares = defaultdict [NodeOperatorId , int ](int )
234
+ total_distributed = 0
235
+ total_rewards = defaultdict [NodeOperatorId , int ](int )
238
236
logs : list [FramePerfLog ] = []
239
237
240
- for frame in self .state .data :
238
+ for frame in self .state .frames :
241
239
from_epoch , to_epoch = frame
242
240
logger .info ({"msg" : f"Calculating distribution for frame [{ from_epoch } ;{ to_epoch } ]" })
241
+
243
242
frame_blockstamp = blockstamp
244
243
if to_epoch != blockstamp .ref_epoch :
245
244
frame_blockstamp = self ._get_ref_blockstamp_for_frame (blockstamp , to_epoch )
246
- distributed_in_frame , shares_in_frame , log = self ._calculate_distribution_in_frame (
247
- frame_blockstamp , operators_to_validators , frame , distributed
245
+
246
+ total_rewards_to_distribute = self .w3 .csm .fee_distributor .shares_to_distribute (frame_blockstamp .block_hash )
247
+ rewards_to_distribute_in_frame = total_rewards_to_distribute - total_distributed
248
+
249
+ rewards_in_frame , log = self ._calculate_distribution_in_frame (
250
+ frame , frame_blockstamp , rewards_to_distribute_in_frame , operators_to_validators
248
251
)
249
- distributed += distributed_in_frame
250
- for no_id , share in shares_in_frame .items ():
251
- shares [no_id ] += share
252
+ distributed_in_frame = sum (rewards_in_frame .values ())
253
+
254
+ total_distributed += distributed_in_frame
255
+ if total_distributed > total_rewards_to_distribute :
256
+ raise CSMError (f"Invalid distribution: { total_distributed = } > { total_rewards_to_distribute = } " )
257
+
258
+ for no_id , rewards in rewards_in_frame .items ():
259
+ total_rewards [no_id ] += rewards
260
+
252
261
logs .append (log )
253
262
254
- return distributed , shares , logs
263
+ return total_distributed , total_rewards , logs
255
264
256
265
def _get_ref_blockstamp_for_frame (
257
266
self , blockstamp : ReferenceBlockStamp , frame_ref_epoch : EpochNumber
@@ -266,63 +275,85 @@ def _get_ref_blockstamp_for_frame(
266
275
267
276
def _calculate_distribution_in_frame (
268
277
self ,
269
- blockstamp : ReferenceBlockStamp ,
270
- operators_to_validators : ValidatorsByNodeOperator ,
271
278
frame : Frame ,
272
- distributed : int ,
279
+ blockstamp : ReferenceBlockStamp ,
280
+ rewards_to_distribute : int ,
281
+ operators_to_validators : ValidatorsByNodeOperator
273
282
):
274
- network_perf = self .state .get_network_aggr (frame ).perf
275
- threshold = network_perf - self .w3 .csm .oracle .perf_leeway_bp (blockstamp .block_hash ) / TOTAL_BASIS_POINTS
276
-
277
- # Build the map of the current distribution operators.
278
- distribution : dict [NodeOperatorId , int ] = defaultdict (int )
279
- stuck_operators = self .stuck_operators (blockstamp )
283
+ threshold = self ._get_performance_threshold (frame , blockstamp )
280
284
log = FramePerfLog (blockstamp , frame , threshold )
281
285
286
+ participation_shares : defaultdict [NodeOperatorId , int ] = defaultdict (int )
287
+
288
+ stuck_operators = self .stuck_operators (blockstamp )
282
289
for (_ , no_id ), validators in operators_to_validators .items ():
290
+ log_operator = log .operators [no_id ]
283
291
if no_id in stuck_operators :
284
- log .operators [no_id ].stuck = True
292
+ log_operator .stuck = True
293
+ continue
294
+ for validator in validators :
295
+ duty = self .state .data [frame ].get (validator .index )
296
+ self .process_validator_duty (validator , duty , threshold , participation_shares , log_operator )
297
+
298
+ rewards_distribution = self .calc_rewards_distribution_in_frame (participation_shares , rewards_to_distribute )
299
+
300
+ for no_id , no_rewards in rewards_distribution .items ():
301
+ log .operators [no_id ].distributed = no_rewards
302
+
303
+ log .distributable = rewards_to_distribute
304
+
305
+ return rewards_distribution , log
306
+
307
+ def _get_performance_threshold (self , frame : Frame , blockstamp : ReferenceBlockStamp ) -> float :
308
+ network_perf = self .state .get_network_aggr (frame ).perf
309
+ perf_leeway = self .w3 .csm .oracle .perf_leeway_bp (blockstamp .block_hash ) / TOTAL_BASIS_POINTS
310
+ threshold = network_perf - perf_leeway
311
+ return threshold
312
+
313
+ @staticmethod
314
+ def process_validator_duty (
315
+ validator : LidoValidator ,
316
+ attestation_duty : AttestationsAccumulator | None ,
317
+ threshold : float ,
318
+ participation_shares : defaultdict [NodeOperatorId , int ],
319
+ log_operator : OperatorFrameSummary
320
+ ):
321
+ if attestation_duty is None :
322
+ # It's possible that the validator is not assigned to any duty, hence it's performance
323
+ # is not presented in the aggregates (e.g. exited, pending for activation etc).
324
+ # TODO: check `sync_aggr` to strike (in case of bad sync performance) after validator exit
325
+ return
326
+
327
+ log_validator = log_operator .validators [validator .index ]
328
+
329
+ if validator .validator .slashed is True :
330
+ # It means that validator was active during the frame and got slashed and didn't meet the exit
331
+ # epoch, so we should not count such validator for operator's share.
332
+ log_validator .slashed = True
333
+ return
334
+
335
+ if attestation_duty .perf > threshold :
336
+ # Count of assigned attestations used as a metrics of time
337
+ # the validator was active in the current frame.
338
+ participation_shares [validator .lido_id .operatorIndex ] += attestation_duty .assigned
339
+
340
+ log_validator .attestation_duty = attestation_duty
341
+
342
+ @staticmethod
343
+ def calc_rewards_distribution_in_frame (
344
+ participation_shares : dict [NodeOperatorId , int ],
345
+ rewards_to_distribute : int ,
346
+ ) -> dict [NodeOperatorId , int ]:
347
+ rewards_distribution : dict [NodeOperatorId , int ] = defaultdict (int )
348
+ total_participation = sum (participation_shares .values ())
349
+
350
+ for no_id , no_participation_share in participation_shares .items ():
351
+ if no_participation_share == 0 :
352
+ # Skip operators with zero participation
285
353
continue
354
+ rewards_distribution [no_id ] = rewards_to_distribute * no_participation_share // total_participation
286
355
287
- for v in validators :
288
- aggr = self .state .data [frame ].get (ValidatorIndex (int (v .index )))
289
-
290
- if aggr is None :
291
- # It's possible that the validator is not assigned to any duty, hence it's performance
292
- # is not presented in the aggregates (e.g. exited, pending for activation etc).
293
- continue
294
-
295
- if v .validator .slashed is True :
296
- # It means that validator was active during the frame and got slashed and didn't meet the exit
297
- # epoch, so we should not count such validator for operator's share.
298
- log .operators [no_id ].validators [v .index ].slashed = True
299
- continue
300
-
301
- if aggr .perf > threshold :
302
- # Count of assigned attestations used as a metrics of time
303
- # the validator was active in the current frame.
304
- distribution [no_id ] += aggr .assigned
305
-
306
- log .operators [no_id ].validators [v .index ].perf = aggr
307
-
308
- # Calculate share of each CSM node operator.
309
- shares = defaultdict [NodeOperatorId , int ](int )
310
- total = sum (p for p in distribution .values ())
311
- to_distribute = self .w3 .csm .fee_distributor .shares_to_distribute (blockstamp .block_hash ) - distributed
312
- log .distributable = to_distribute
313
-
314
- if not total :
315
- return 0 , shares , log
316
-
317
- for no_id , no_share in distribution .items ():
318
- if no_share :
319
- shares [no_id ] = to_distribute * no_share // total
320
- log .operators [no_id ].distributed = shares [no_id ]
321
-
322
- distributed = sum (s for s in shares .values ())
323
- if distributed > to_distribute :
324
- raise CSMError (f"Invalid distribution: { distributed = } > { to_distribute = } " )
325
- return distributed , shares , log
356
+ return rewards_distribution
326
357
327
358
def get_accumulated_shares (self , cid : CID , root : HexBytes ) -> Iterator [tuple [NodeOperatorId , Shares ]]:
328
359
logger .info ({"msg" : "Fetching tree by CID from IPFS" , "cid" : repr (cid )})
0 commit comments