From 1c1162c8bd04667e624aad37020fe7933d3b8078 Mon Sep 17 00:00:00 2001 From: Wang Boyu Date: Fri, 12 Sep 2025 08:25:16 -0400 Subject: [PATCH 1/2] fix TypeError in agents and networks gis example --- gis/agents_and_networks/src/space/utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gis/agents_and_networks/src/space/utils.py b/gis/agents_and_networks/src/space/utils.py index 8cb8de66..7c4dd460 100644 --- a/gis/agents_and_networks/src/space/utils.py +++ b/gis/agents_and_networks/src/space/utils.py @@ -78,7 +78,9 @@ class UnitTransformer: _degree2meter: pyproj.Transformer _meter2degree: pyproj.Transformer - def __init__(self, degree_crs: pyproj.CRS | None, meter_crs: pyproj.CRS | None): + def __init__( + self, degree_crs: pyproj.CRS | None = None, meter_crs: pyproj.CRS | None = None + ): if degree_crs is None: degree_crs = pyproj.CRS("EPSG:4326") From 8e2792d4c9bb8802652c56b016c33d8cf0e0fa5c Mon Sep 17 00:00:00 2001 From: Wang Boyu Date: Mon, 22 Sep 2025 10:22:03 -0400 Subject: [PATCH 2/2] fix ruff errors --- gis/agents_and_networks/references/GMU-Social.nlogo | 10 +++++----- rl/README.md | 2 +- rl/boltzmann_money/server.py | 4 ++-- rl/epstein_civil_violence/README.md | 2 +- rl/epstein_civil_violence/agent.py | 5 +++-- rl/epstein_civil_violence/model.py | 7 ++++--- rl/epstein_civil_violence/utility.py | 9 ++++++--- rl/train.py | 2 +- rl/wolf_sheep/README.md | 2 +- rl/wolf_sheep/agents.py | 3 ++- rl/wolf_sheep/model.py | 7 ++++--- rl/wolf_sheep/utility.py | 13 ++++++++----- 12 files changed, 38 insertions(+), 28 deletions(-) diff --git a/gis/agents_and_networks/references/GMU-Social.nlogo b/gis/agents_and_networks/references/GMU-Social.nlogo index ab89d429..12bed196 100755 --- a/gis/agents_and_networks/references/GMU-Social.nlogo +++ b/gis/agents_and_networks/references/GMU-Social.nlogo @@ -54,7 +54,7 @@ vertices-own [ entrance? ;;if it is an entrance to a building test ;;used to delete in test - ;;the follwoing variables are used and renewed in each path-selection + ;;the following variables are used and renewed in each path-selection dist ;;distance from original point to here done ;;1 if has calculated the shortest path through this point, 0 otherwise lastnode ;;last node to this point in shortest path @@ -108,7 +108,7 @@ to setup ;;ask patches with [ centroid? = true][sprout 1 [set size 2 set color red]] ;;use this line to verify - ;;create turtles representing the nodes. create links to conect them. + ;;create turtles representing the nodes. create links to connect them. foreach gis:feature-list-of gmu-walkway [ road-feature -> foreach gis:vertex-lists-of road-feature [ v -> ; for the road feature, get the list of vertices let previous-node-pt nobody @@ -813,7 +813,7 @@ PLOT 456 739 672 -Firends at Home +Friends at Home No. of friends at home Count of people 0.0 @@ -852,7 +852,7 @@ You may want to turn off some layers for a clear display. ## THINGS TO TRY -Change the switches for different dispalys. Try different number of coimmuters. Try the verification. +Change the switches for different displays. Try different number of coimmuters. Try the verification. ## EXTENDING THE MODEL @@ -860,7 +860,7 @@ What if the commuters move with a speed (some distance per tick) instead of one ## NETLOGO FEATURES -For faster compuation, this model simplifies the original data by reducing the number of nodes. To do that, the walkway data is loaded to the 20 x 20 grid in Netlogo, which is small, and therefore, many nodes fall on the same patch. In each patch, we only want to keep one node, and duplicate nodes are removed, while their neighbors are connected to the one node left. +For faster computation, this model simplifies the original data by reducing the number of nodes. To do that, the walkway data is loaded to the 20 x 20 grid in Netlogo, which is small, and therefore, many nodes fall on the same patch. In each patch, we only want to keep one node, and duplicate nodes are removed, while their neighbors are connected to the one node left. Also, links are created in this model to represent raods. This is so far the best way I can find to deal with road related problems in Netlogo. However, because the way I create links is to link nodes one by one (see code for more details), so some roads are likely to be left behind. But again there is no better way I can find. Therefore, I also used a loop in setup to delete nodes that are not connected to the whole network. diff --git a/rl/README.md b/rl/README.md index 88ee5f27..01fd4062 100644 --- a/rl/README.md +++ b/rl/README.md @@ -50,7 +50,7 @@ To test the code, simply execute `example.py`: python example.py ``` -*Note: Pre-trained models might not work in some cases because of differnce in versions of libraries used to train and test.* +*Note: Pre-trained models might not work in some cases because of difference in versions of libraries used to train and test.* To learn about individual implementations, please refer to the README files of specific environments. diff --git a/rl/boltzmann_money/server.py b/rl/boltzmann_money/server.py index 8dbbc1e7..dbe374af 100644 --- a/rl/boltzmann_money/server.py +++ b/rl/boltzmann_money/server.py @@ -9,8 +9,8 @@ # Modify the MoneyModel class to take actions from the RL model class MoneyModelRL(BoltzmannWealthModelRL): - def __init__(self, N, width, height): - super().__init__(N, width, height) + def __init__(self, n, width, height): + super().__init__(n, width, height) model_path = os.path.join( os.path.dirname(__file__), "..", "model", "boltzmann_money.zip" ) diff --git a/rl/epstein_civil_violence/README.md b/rl/epstein_civil_violence/README.md index a903ee92..739eb6c9 100644 --- a/rl/epstein_civil_violence/README.md +++ b/rl/epstein_civil_violence/README.md @@ -8,7 +8,7 @@ This project demonstrates the use of the RLlib library to implement Multi-Agent - **Library Utilized**: The project leverages the RLlib library to concurrently train two independent PPO (Proximal Policy Optimization) agents. - **Agents**: - **Police**: Aims to control violence (Reduce active agent) - - **Citizen**: Aims to show resistence (be active) without getting arrested + - **Citizen**: Aims to show resistance (be active) without getting arrested **Input and Observation Space**: - **Observation Grid**: Each agent's policy receives a 4 radius grid centered on itself as input. diff --git a/rl/epstein_civil_violence/agent.py b/rl/epstein_civil_violence/agent.py index c693788a..48827230 100644 --- a/rl/epstein_civil_violence/agent.py +++ b/rl/epstein_civil_violence/agent.py @@ -1,5 +1,6 @@ from mesa.examples.advanced.epstein_civil_violence.agents import Citizen, Cop -from utility import move + +from .utility import move class CitizenRL(Citizen): @@ -11,7 +12,7 @@ def step(self): self.jail_sentence -= 1 else: # RL Logic - # Update condition and postion based on action + # Update condition and position based on action self.condition = "Active" if action_tuple[0] == 1 else "Quiescent" # Update neighbors for updated empty neighbors self.update_neighbors() diff --git a/rl/epstein_civil_violence/model.py b/rl/epstein_civil_violence/model.py index e6d9ab2e..a8255268 100644 --- a/rl/epstein_civil_violence/model.py +++ b/rl/epstein_civil_violence/model.py @@ -4,7 +4,8 @@ from agent import CitizenRL, CopRL from mesa.examples.advanced.epstein_civil_violence.model import EpsteinCivilViolence from ray.rllib.env import MultiAgentEnv -from utility import create_intial_agents, grid_to_observation + +from .utility import create_initial_agents, grid_to_observation class EpsteinCivilViolenceRL(EpsteinCivilViolence, MultiAgentEnv): @@ -143,8 +144,8 @@ def reset(self, *, seed=None, options=None): """ super().reset() self.grid = mesa.space.SingleGrid(self.width, self.height, torus=True) - create_intial_agents(self, CitizenRL, CopRL) - grid_to_observation(self, CitizenRL) + create_initial_agents(self) + grid_to_observation(self) # Initialize action dictionary with no action self.action_dict = {a.unique_id: (0, 0) for a in self.agents} # Update neighbors for observation space diff --git a/rl/epstein_civil_violence/utility.py b/rl/epstein_civil_violence/utility.py index a2f0e876..9447dc6f 100644 --- a/rl/epstein_civil_violence/utility.py +++ b/rl/epstein_civil_violence/utility.py @@ -1,11 +1,14 @@ -def create_intial_agents(self, CitizenRL, CopRL): +from .agent import CitizenRL, CopRL + + +def create_initial_agents(self): # Create agents unique_id = 0 if self.cop_density + self.citizen_density > 1: raise ValueError("CopRL density + citizen density must be less than 1") cops = [] citizens = [] - for contents, (x, y) in self.grid.coord_iter(): + for _, (x, y) in self.grid.coord_iter(): if self.random.random() < self.cop_density: unique_id_str = f"cop_{unique_id}" cop = CopRL(unique_id_str, self, (x, y), vision=self.cop_vision) @@ -35,7 +38,7 @@ def create_intial_agents(self, CitizenRL, CopRL): self.add(citizen) -def grid_to_observation(self, CitizenRL): +def grid_to_observation(self): # Convert neighborhood to observation grid self.obs_grid = [] for i in self.grid._grid: diff --git a/rl/train.py b/rl/train.py index 59214739..419e9590 100644 --- a/rl/train.py +++ b/rl/train.py @@ -36,7 +36,7 @@ def train_model( algo_config = get_config(config) algo = algo_config.build() - for i in range(num_iterations): + for _ in range(num_iterations): result = algo.train() print(pretty_print(result)) diff --git a/rl/wolf_sheep/README.md b/rl/wolf_sheep/README.md index d877fc02..b97fb62a 100644 --- a/rl/wolf_sheep/README.md +++ b/rl/wolf_sheep/README.md @@ -7,7 +7,7 @@ This project demonstrates the use of the RLlib library to implement Multi-Agent **RLlib and Multi-Agent Learning**: - **Library Utilized**: The project leverages the RLlib library to concurrently train two independent PPO (Proximal Policy Optimization) agents. - **Agents**: - - **Wolf**: Predatory agent survives by eating sheeps + - **Wolf**: Predatory agent survives by eating sheep - **Sheep**: Prey agent survives by eating grass - **Grass**: Grass is eaten by sheep and regrows with time diff --git a/rl/wolf_sheep/agents.py b/rl/wolf_sheep/agents.py index 0d1eed5f..92231179 100644 --- a/rl/wolf_sheep/agents.py +++ b/rl/wolf_sheep/agents.py @@ -1,5 +1,6 @@ from mesa.examples.advanced.wolf_sheep.agents import GrassPatch, Sheep, Wolf -from utility import move + +from .utility import move class SheepRL(Sheep): diff --git a/rl/wolf_sheep/model.py b/rl/wolf_sheep/model.py index bbe02360..dd12c1ab 100644 --- a/rl/wolf_sheep/model.py +++ b/rl/wolf_sheep/model.py @@ -6,7 +6,8 @@ from mesa.examples.advanced.wolf_sheep.model import WolfSheep from mesa.experimental.devs import ABMSimulator from ray.rllib.env import MultiAgentEnv -from utility import create_intial_agents, grid_to_observation + +from .utility import create_initial_agents, grid_to_observation # Don't create the ABMSimulator as argument default: https://docs.astral.sh/ruff/rules/function-call-in-default-argument/ ABM_SIMULATOR = ABMSimulator() @@ -154,8 +155,8 @@ def reset(self, *, seed=None, options=None): super().reset() self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True) self.current_id = 0 - create_intial_agents(self, SheepRL, WolfRL, GrassPatch) - grid_to_observation(self, SheepRL, WolfRL, GrassPatch) + create_initial_agents(self) + grid_to_observation(self) obs = {} for agent in self.agents: if isinstance(agent, (SheepRL, WolfRL)): diff --git a/rl/wolf_sheep/utility.py b/rl/wolf_sheep/utility.py index 40d140d4..0dc44769 100644 --- a/rl/wolf_sheep/utility.py +++ b/rl/wolf_sheep/utility.py @@ -1,6 +1,9 @@ -def create_intial_agents(self, SheepRL, WolfRL, GrassPatch): +from .agents import GrassPatch, SheepRL, WolfRL + + +def create_initial_agents(self): # Create sheep: - for i in range(self.initial_sheep): + for _ in range(self.initial_sheep): x = self.random.randrange(self.width) y = self.random.randrange(self.height) energy = self.random.randrange(2 * self.sheep_gain_from_food) @@ -10,7 +13,7 @@ def create_intial_agents(self, SheepRL, WolfRL, GrassPatch): self.add(sheep) # Create wolves - for i in range(self.initial_wolves): + for _ in range(self.initial_wolves): x = self.random.randrange(self.width) y = self.random.randrange(self.height) energy = self.random.randrange(2 * self.wolf_gain_from_food) @@ -21,7 +24,7 @@ def create_intial_agents(self, SheepRL, WolfRL, GrassPatch): # Create grass patches if self.grass: - for agent, (x, y) in self.grid.coord_iter(): + for _, (x, y) in self.grid.coord_iter(): fully_grown = self.random.choice([True, False]) if fully_grown: @@ -62,7 +65,7 @@ def move(self, action): self.model.grid.move_agent(self, new_position) -def grid_to_observation(self, SheepRL, WolfRL, GrassPatch): +def grid_to_observation(self): # Convert grid to matrix for better representation self.obs_grid = [] for i in self.grid._grid: