Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions gis/agents_and_networks/references/GMU-Social.nlogo
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ vertices-own [
entrance? ;;if it is an entrance to a building
test ;;used to delete in test

;;the follwoing variables are used and renewed in each path-selection
;;the following variables are used and renewed in each path-selection
dist ;;distance from original point to here
done ;;1 if has calculated the shortest path through this point, 0 otherwise
lastnode ;;last node to this point in shortest path
Expand Down Expand Up @@ -108,7 +108,7 @@ to setup

;;ask patches with [ centroid? = true][sprout 1 [set size 2 set color red]] ;;use this line to verify

;;create turtles representing the nodes. create links to conect them.
;;create turtles representing the nodes. create links to connect them.
foreach gis:feature-list-of gmu-walkway [ road-feature ->
foreach gis:vertex-lists-of road-feature [ v -> ; for the road feature, get the list of vertices
let previous-node-pt nobody
Expand Down Expand Up @@ -813,7 +813,7 @@ PLOT
456
739
672
Firends at Home
Friends at Home
No. of friends at home
Count of people
0.0
Expand Down Expand Up @@ -852,15 +852,15 @@ You may want to turn off some layers for a clear display.

## THINGS TO TRY

Change the switches for different dispalys. Try different number of coimmuters. Try the verification.
Change the switches for different displays. Try different number of coimmuters. Try the verification.

## EXTENDING THE MODEL

What if the commuters move with a speed (some distance per tick) instead of one node per tick?

## NETLOGO FEATURES

For faster compuation, this model simplifies the original data by reducing the number of nodes. To do that, the walkway data is loaded to the 20 x 20 grid in Netlogo, which is small, and therefore, many nodes fall on the same patch. In each patch, we only want to keep one node, and duplicate nodes are removed, while their neighbors are connected to the one node left.
For faster computation, this model simplifies the original data by reducing the number of nodes. To do that, the walkway data is loaded to the 20 x 20 grid in Netlogo, which is small, and therefore, many nodes fall on the same patch. In each patch, we only want to keep one node, and duplicate nodes are removed, while their neighbors are connected to the one node left.

Also, links are created in this model to represent raods. This is so far the best way I can find to deal with road related problems in Netlogo. However, because the way I create links is to link nodes one by one (see code for more details), so some roads are likely to be left behind. But again there is no better way I can find. Therefore, I also used a loop in setup to delete nodes that are not connected to the whole network.

Expand Down
4 changes: 3 additions & 1 deletion gis/agents_and_networks/src/space/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,9 @@ class UnitTransformer:
_degree2meter: pyproj.Transformer
_meter2degree: pyproj.Transformer

def __init__(self, degree_crs: pyproj.CRS | None, meter_crs: pyproj.CRS | None):
def __init__(
self, degree_crs: pyproj.CRS | None = None, meter_crs: pyproj.CRS | None = None
):
if degree_crs is None:
degree_crs = pyproj.CRS("EPSG:4326")

Expand Down
2 changes: 1 addition & 1 deletion rl/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ To test the code, simply execute `example.py`:
python example.py
```

*Note: Pre-trained models might not work in some cases because of differnce in versions of libraries used to train and test.*
*Note: Pre-trained models might not work in some cases because of difference in versions of libraries used to train and test.*

To learn about individual implementations, please refer to the README files of specific environments.

Expand Down
4 changes: 2 additions & 2 deletions rl/boltzmann_money/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@

# Modify the MoneyModel class to take actions from the RL model
class MoneyModelRL(BoltzmannWealthModelRL):
def __init__(self, N, width, height):
super().__init__(N, width, height)
def __init__(self, n, width, height):
super().__init__(n, width, height)
model_path = os.path.join(
os.path.dirname(__file__), "..", "model", "boltzmann_money.zip"
)
Expand Down
2 changes: 1 addition & 1 deletion rl/epstein_civil_violence/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ This project demonstrates the use of the RLlib library to implement Multi-Agent
- **Library Utilized**: The project leverages the RLlib library to concurrently train two independent PPO (Proximal Policy Optimization) agents.
- **Agents**:
- **Police**: Aims to control violence (Reduce active agent)
- **Citizen**: Aims to show resistence (be active) without getting arrested
- **Citizen**: Aims to show resistance (be active) without getting arrested

**Input and Observation Space**:
- **Observation Grid**: Each agent's policy receives a 4 radius grid centered on itself as input.
Expand Down
5 changes: 3 additions & 2 deletions rl/epstein_civil_violence/agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from mesa.examples.advanced.epstein_civil_violence.agents import Citizen, Cop
from utility import move

from .utility import move


class CitizenRL(Citizen):
Expand All @@ -11,7 +12,7 @@ def step(self):
self.jail_sentence -= 1
else:
# RL Logic
# Update condition and postion based on action
# Update condition and position based on action
self.condition = "Active" if action_tuple[0] == 1 else "Quiescent"
# Update neighbors for updated empty neighbors
self.update_neighbors()
Expand Down
7 changes: 4 additions & 3 deletions rl/epstein_civil_violence/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from agent import CitizenRL, CopRL
from mesa.examples.advanced.epstein_civil_violence.model import EpsteinCivilViolence
from ray.rllib.env import MultiAgentEnv
from utility import create_intial_agents, grid_to_observation

from .utility import create_initial_agents, grid_to_observation


class EpsteinCivilViolenceRL(EpsteinCivilViolence, MultiAgentEnv):
Expand Down Expand Up @@ -143,8 +144,8 @@ def reset(self, *, seed=None, options=None):
"""
super().reset()
self.grid = mesa.space.SingleGrid(self.width, self.height, torus=True)
create_intial_agents(self, CitizenRL, CopRL)
grid_to_observation(self, CitizenRL)
create_initial_agents(self)
grid_to_observation(self)
# Initialize action dictionary with no action
self.action_dict = {a.unique_id: (0, 0) for a in self.agents}
# Update neighbors for observation space
Expand Down
9 changes: 6 additions & 3 deletions rl/epstein_civil_violence/utility.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
def create_intial_agents(self, CitizenRL, CopRL):
from .agent import CitizenRL, CopRL


def create_initial_agents(self):
# Create agents
unique_id = 0
if self.cop_density + self.citizen_density > 1:
raise ValueError("CopRL density + citizen density must be less than 1")
cops = []
citizens = []
for contents, (x, y) in self.grid.coord_iter():
for _, (x, y) in self.grid.coord_iter():
if self.random.random() < self.cop_density:
unique_id_str = f"cop_{unique_id}"
cop = CopRL(unique_id_str, self, (x, y), vision=self.cop_vision)
Expand Down Expand Up @@ -35,7 +38,7 @@ def create_intial_agents(self, CitizenRL, CopRL):
self.add(citizen)


def grid_to_observation(self, CitizenRL):
def grid_to_observation(self):
# Convert neighborhood to observation grid
self.obs_grid = []
for i in self.grid._grid:
Expand Down
2 changes: 1 addition & 1 deletion rl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def train_model(
algo_config = get_config(config)
algo = algo_config.build()

for i in range(num_iterations):
for _ in range(num_iterations):
result = algo.train()
print(pretty_print(result))

Expand Down
2 changes: 1 addition & 1 deletion rl/wolf_sheep/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ This project demonstrates the use of the RLlib library to implement Multi-Agent
**RLlib and Multi-Agent Learning**:
- **Library Utilized**: The project leverages the RLlib library to concurrently train two independent PPO (Proximal Policy Optimization) agents.
- **Agents**:
- **Wolf**: Predatory agent survives by eating sheeps
- **Wolf**: Predatory agent survives by eating sheep
- **Sheep**: Prey agent survives by eating grass
- **Grass**: Grass is eaten by sheep and regrows with time

Expand Down
3 changes: 2 additions & 1 deletion rl/wolf_sheep/agents.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from mesa.examples.advanced.wolf_sheep.agents import GrassPatch, Sheep, Wolf
from utility import move

from .utility import move


class SheepRL(Sheep):
Expand Down
7 changes: 4 additions & 3 deletions rl/wolf_sheep/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
from mesa.examples.advanced.wolf_sheep.model import WolfSheep
from mesa.experimental.devs import ABMSimulator
from ray.rllib.env import MultiAgentEnv
from utility import create_intial_agents, grid_to_observation

from .utility import create_initial_agents, grid_to_observation

# Don't create the ABMSimulator as argument default: https://docs.astral.sh/ruff/rules/function-call-in-default-argument/
ABM_SIMULATOR = ABMSimulator()
Expand Down Expand Up @@ -154,8 +155,8 @@ def reset(self, *, seed=None, options=None):
super().reset()
self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True)
self.current_id = 0
create_intial_agents(self, SheepRL, WolfRL, GrassPatch)
grid_to_observation(self, SheepRL, WolfRL, GrassPatch)
create_initial_agents(self)
grid_to_observation(self)
obs = {}
for agent in self.agents:
if isinstance(agent, (SheepRL, WolfRL)):
Expand Down
13 changes: 8 additions & 5 deletions rl/wolf_sheep/utility.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
def create_intial_agents(self, SheepRL, WolfRL, GrassPatch):
from .agents import GrassPatch, SheepRL, WolfRL


def create_initial_agents(self):
# Create sheep:
for i in range(self.initial_sheep):
for _ in range(self.initial_sheep):
x = self.random.randrange(self.width)
y = self.random.randrange(self.height)
energy = self.random.randrange(2 * self.sheep_gain_from_food)
Expand All @@ -10,7 +13,7 @@ def create_intial_agents(self, SheepRL, WolfRL, GrassPatch):
self.add(sheep)

# Create wolves
for i in range(self.initial_wolves):
for _ in range(self.initial_wolves):
x = self.random.randrange(self.width)
y = self.random.randrange(self.height)
energy = self.random.randrange(2 * self.wolf_gain_from_food)
Expand All @@ -21,7 +24,7 @@ def create_intial_agents(self, SheepRL, WolfRL, GrassPatch):

# Create grass patches
if self.grass:
for agent, (x, y) in self.grid.coord_iter():
for _, (x, y) in self.grid.coord_iter():
fully_grown = self.random.choice([True, False])

if fully_grown:
Expand Down Expand Up @@ -62,7 +65,7 @@ def move(self, action):
self.model.grid.move_agent(self, new_position)


def grid_to_observation(self, SheepRL, WolfRL, GrassPatch):
def grid_to_observation(self):
# Convert grid to matrix for better representation
self.obs_grid = []
for i in self.grid._grid:
Expand Down
Loading