-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathshowGridCells.py
110 lines (80 loc) · 3.78 KB
/
showGridCells.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import numpy as np
import os
import matplotlib.pyplot as plt
import os
from scipy.signal import correlate2d
from ratSimulator import RatSimulator
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def showGridCells(agent, dataGenerator, num_traj, num_steps, pcu, hcu, llu, bins, place_cell_centers, head_cell_centers):
factor=2.2/bins
activityMap=np.zeros((llu, bins, bins))
counterActivityMap=np.zeros((llu, bins, bins))
X=np.zeros((num_traj,num_steps,3))
positions=np.zeros((num_traj,num_steps,2))
angles=np.zeros((num_traj,num_steps,1))
env=RatSimulator(num_steps)
print(">>Generating trajectory")
for i in range(num_traj):
vel, angVel, pos, angle =env.generateTrajectory()
X[i,:,0]=vel
X[i,:,1]=np.sin(angVel)
X[i,:,2]=np.cos(angVel)
positions[i,:]=pos
init_X=np.zeros((num_traj,8,pcu + hcu))
for i in range(8):
init_X[:, i, :pcu]=dataGenerator.computePlaceCellsDistrib(positions[:,(i*100)], place_cell_centers)
init_X[:, i, pcu:]=dataGenerator.computeHeadCellsDistrib(angles[:,(i*100)], head_cell_centers)
print(">>Computing Actvity maps")
#Feed 500 examples at time to avoid memory problems. Otherwise (10000*100=1million matrix)
batch_size=500
for startB in range(0, num_traj, batch_size):
endB=startB+batch_size
#Divide the sequence in 100 steps.
for startT in range(0, num_steps, 100):
endT=startT+100
#Retrieve the inputs for the timestep
xBatch=X[startB:endB, startT:endT]
#When the timestep=0, initialize the hidden and cell state of LSTm using init_X. if not timestep=0, the network will use cell_state and hidden_state
feed_dict={ agent.X: xBatch,
agent.placeCellGround: init_X[startB:endB, (startT//100), : pcu],
agent.headCellGround: init_X[startB:endB, (startT//100), pcu :]}
linearNeurons=agent.sess.run(agent.linearLayer, feed_dict=feed_dict)
#Convert 500,100,2 -> 50000,2
posReshaped=np.reshape(positions[startB:endB,startT:endT],(-1,2))
#save the value of the neurons in the linear layer at each timestep
for t in range(linearNeurons.shape[0]):
#Compute which bins are for each position
bin_x, bin_y=(posReshaped[t]//factor).astype(int)
if(bin_y==bins):
bin_y=bins-1
elif(bin_x==bins):
bin_x=bins-1
#Now there are the 512 values of the same location
activityMap[:,bin_y, bin_x]+=np.abs(linearNeurons[t])#linearNeurons must be a vector of 512
counterActivityMap[:, bin_y, bin_x]+=np.ones((512))
#counterActivityMap[counterActivityMap==0]=1
#Compute average value
result=activityMap/counterActivityMap
os.makedirs("activityMaps", exist_ok=True)
os.makedirs("corrMaps", exist_ok=True)
#normalize total or single?
normMap=(result -np.min(result))/(np.max(result)-np.min(result))
#adding absolute value
cols=16
rows=32
#Save images
fig=plt.figure(figsize=(80, 80))
for i in range(llu):
fig.add_subplot(rows, cols, i+1)
#normMap=(result[i]-np.min(result[i]))/(np.max(result[i])-np.min(result[i]))
plt.imshow(normMap[i], cmap="jet", origin="lower")
plt.axis('off')
fig.savefig('activityMaps/neurons.jpg')
fig=plt.figure(figsize=(80, 80))
for i in range(llu):
fig.add_subplot(rows, cols, i+1)
#normMap=(result[i]-np.min(result[i]))/(np.max(result[i])-np.min(result[i]))
plt.imshow(correlate2d(normMap[i], normMap[i]), cmap="jet", origin="lower")
plt.axis('off')
fig.savefig('corrMaps/neurons.jpg')