diff --git a/neurons/validator.py b/neurons/validator.py index 182e25d..6361ba1 100644 --- a/neurons/validator.py +++ b/neurons/validator.py @@ -1295,20 +1295,7 @@ async def run_step(self): tracker_competition_weights = self.competition_tracker.get_competition_weights( competition.id ) - model_prioritization = { - uid: ( - # Add 1 to ensure it is always greater than a win rate. - 1 + tracker_competition_weights[uid].item() - if tracker_competition_weights[uid].item() >= 0.001 - else wr - ) - for uid, wr in win_rate.items() - } - models_to_keep = set( - sorted(model_prioritization, key=model_prioritization.get, reverse=True)[ - : self.config.sample_min - ] - ) + models_to_keep = self._calculate_models_to_keep(win_rate, tracker_competition_weights) self._update_uids_to_eval( competition.id, models_to_keep, active_competition_ids ) @@ -1343,6 +1330,24 @@ async def run_step(self): # Increment the number of completed run steps by 1 self.run_step_count += 1 + def _calculate_models_to_keep(self, win_rate, tracker_competition_weights): + model_prioritization = { + uid: ( + # Priority should be base on current win rate + historical weight. + wr + (tracker_competition_weights[uid].item() + if tracker_competition_weights[uid].item() >= 0.001 + else 0.0) + ) + for uid, wr in win_rate.items() + } + + models_to_keep = set( + sorted(model_prioritization, key=model_prioritization.get, reverse=True)[ + : self.config.sample_min + ] + ) + return models_to_keep + def _update_uids_to_eval( self, competition_id: CompetitionId, diff --git a/tests/neurons/__init__.py b/tests/neurons/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/neurons/test_validator.py b/tests/neurons/test_validator.py new file mode 100644 index 0000000..21a4c37 --- /dev/null +++ b/tests/neurons/test_validator.py @@ -0,0 +1,55 @@ +import unittest +import torch +import typing +from neurons.validator import Validator + +class MockValidatorConfig: + def __init__(self, sample_min: int): + self.sample_min = sample_min + +class TestValidatorCalculateModelsToKeep(unittest.TestCase): + + def setUp(self): + self.uid_winner = 69 + self.uid_loser_with_historical_weight = 21 + self.uid_loser_no_historical_weight = 37 + self.validator_logic_host = Validator.__new__(Validator) + + def test_buggy_calculate_models_to_keep_drops_winner(self): + """ + Tests the production 'calculate_models_to_keep' method. + This test should FAIL with the current buggy code and PASS with fixed code. + """ + current_sample_min = 1 + self.validator_logic_host.config = MockValidatorConfig(sample_min=current_sample_min) + + win_rate = { + self.uid_winner: 1.0, + self.uid_loser_with_historical_weight: 0.2, + self.uid_loser_no_historical_weight: 0.1, + } + + all_test_uids = list(win_rate.keys()) + max_uid_in_test = max(all_test_uids) + + tracker_competition_weights_tensor = torch.zeros(max_uid_in_test + 1, dtype=torch.float32) + tracker_competition_weights_tensor[self.uid_winner] = 0.0 + tracker_competition_weights_tensor[self.uid_loser_with_historical_weight] = 0.001 + tracker_competition_weights_tensor[self.uid_loser_no_historical_weight] = 0.0 + + # ---- Call the Production Code ---- + models_kept = self.validator_logic_host._calculate_models_to_keep( + win_rate, + tracker_competition_weights_tensor + ) + + self.assertIn( + self.uid_winner, + models_kept, + f"BUG BEHAVIOR: Actual winner (UID {self.uid_winner}) was dropped. " + f"Its priority should be 1.0." + ) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file