Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 19 additions & 14 deletions neurons/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1295,20 +1295,7 @@ async def run_step(self):
tracker_competition_weights = self.competition_tracker.get_competition_weights(
competition.id
)
model_prioritization = {
uid: (
# Add 1 to ensure it is always greater than a win rate.
1 + tracker_competition_weights[uid].item()
if tracker_competition_weights[uid].item() >= 0.001
else wr
)
for uid, wr in win_rate.items()
}
models_to_keep = set(
sorted(model_prioritization, key=model_prioritization.get, reverse=True)[
: self.config.sample_min
]
)
models_to_keep = self._calculate_models_to_keep(win_rate, tracker_competition_weights)
self._update_uids_to_eval(
competition.id, models_to_keep, active_competition_ids
)
Expand Down Expand Up @@ -1343,6 +1330,24 @@ async def run_step(self):
# Increment the number of completed run steps by 1
self.run_step_count += 1

def _calculate_models_to_keep(self, win_rate, tracker_competition_weights):
model_prioritization = {
uid: (
# Priority should be base on current win rate + historical weight.
wr + (tracker_competition_weights[uid].item()
if tracker_competition_weights[uid].item() >= 0.001
else 0.0)
)
for uid, wr in win_rate.items()
}

models_to_keep = set(
sorted(model_prioritization, key=model_prioritization.get, reverse=True)[
: self.config.sample_min
]
)
return models_to_keep

def _update_uids_to_eval(
self,
competition_id: CompetitionId,
Expand Down
Empty file added tests/neurons/__init__.py
Empty file.
55 changes: 55 additions & 0 deletions tests/neurons/test_validator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import unittest
import torch
import typing
from neurons.validator import Validator

class MockValidatorConfig:
def __init__(self, sample_min: int):
self.sample_min = sample_min

class TestValidatorCalculateModelsToKeep(unittest.TestCase):

def setUp(self):
self.uid_winner = 69
self.uid_loser_with_historical_weight = 21
self.uid_loser_no_historical_weight = 37
self.validator_logic_host = Validator.__new__(Validator)

def test_buggy_calculate_models_to_keep_drops_winner(self):
"""
Tests the production 'calculate_models_to_keep' method.
This test should FAIL with the current buggy code and PASS with fixed code.
"""
current_sample_min = 1
self.validator_logic_host.config = MockValidatorConfig(sample_min=current_sample_min)

win_rate = {
self.uid_winner: 1.0,
self.uid_loser_with_historical_weight: 0.2,
self.uid_loser_no_historical_weight: 0.1,
}

all_test_uids = list(win_rate.keys())
max_uid_in_test = max(all_test_uids)

tracker_competition_weights_tensor = torch.zeros(max_uid_in_test + 1, dtype=torch.float32)
tracker_competition_weights_tensor[self.uid_winner] = 0.0
tracker_competition_weights_tensor[self.uid_loser_with_historical_weight] = 0.001
tracker_competition_weights_tensor[self.uid_loser_no_historical_weight] = 0.0

# ---- Call the Production Code ----
models_kept = self.validator_logic_host._calculate_models_to_keep(
win_rate,
tracker_competition_weights_tensor
)

self.assertIn(
self.uid_winner,
models_kept,
f"BUG BEHAVIOR: Actual winner (UID {self.uid_winner}) was dropped. "
f"Its priority should be 1.0."
)


if __name__ == "__main__":
unittest.main()