Skip to content

Commit

Permalink
added initialization step and code to print to csv. Not confident ini…
Browse files Browse the repository at this point in the history
…tialization is doing anything right now given the amount of epochs. In general, not quite satisfied with shape of mu, still seems to linearly increase after a bit
  • Loading branch information
AlexYFM committed Sep 11, 2024
1 parent 628f9c6 commit eee3958
Show file tree
Hide file tree
Showing 2 changed files with 96 additions and 6 deletions.
72 changes: 72 additions & 0 deletions verse/stars/nn_results.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
time,mu
0.0,2.0558527
0.1,2.010922
0.2,1.9875194
0.3,1.9978961
0.4,1.9818294
0.5,1.9269229
0.6,1.8700421
0.7,1.8410861
0.8,1.8507035
0.90000004,1.8603206
1.0,1.8699384
1.1,1.8778574
1.2,1.8804708
1.3,1.8776419
1.4,1.8846819
1.5,1.900111
1.6,1.9155452
1.7,1.9309776
1.8000001,1.9464085
1.9,1.9618409
2.0,1.9778116
2.1,1.9939077
2.2,2.0100057
2.3,2.0261037
2.4,2.0422013
2.5,2.0583012
2.6000001,2.0743973
2.7,2.0904987
2.8000002,2.1065953
2.9,2.1226943
3.0,2.1387904
3.1000001,2.154888
3.2,2.1709864
3.3,2.1870844
3.4,2.2031844
3.5,2.2192786
3.6000001,2.2353804
3.7,2.2514765
3.8,2.2675755
3.9,2.2836716
4.0,2.2997696
4.1,2.3158686
4.2,2.3319705
4.3,2.3480666
4.4,2.3641665
4.5,2.3802617
4.6,2.3963654
4.7,2.4124587
4.8,2.4285576
4.9,2.4446585
5.0,2.4608128
5.1000004,2.4774582
5.2000003,2.4941037
5.3,2.5107548
5.4,2.5274022
5.5,2.5440476
5.6,2.560696
5.7,2.5773404
5.7999997,2.5939982
5.9,2.6106436
6.0,2.627289
6.1,2.6439402
6.2,2.6605818
6.2999997,2.6772273
6.4,2.6938822
6.5,2.7105277
6.6,2.7271693
6.7,2.7438204
6.8,2.7604716
6.9,2.7771246
7.0,2.7937682
30 changes: 24 additions & 6 deletions verse/stars/star_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from starset import *
from scipy.integrate import ode
from sklearn.decomposition import PCA
import pandas as pd

### synthetic dynamic and simulation function
def dynamic_test(vec, t):
Expand Down Expand Up @@ -89,15 +90,24 @@ def forward(self, x):

model = PostNN(input_size, hidden_size, output_size)

def he_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu') # Apply He Normal Initialization
if m.bias is not None:
nn.init.constant_(m.bias, 0) # Initialize biases to 0 (optional)

# Apply He initialization to the existing model
model.apply(he_init)

# Use SGD as the optimizer
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)

num_epochs = 50 # sample number of epoch -- can play with this/set this as a hyperparameter
num_samples = 100 # number of samples per time step
lamb = 1
lamb = 30

T = 14
T = 7
ts = 0.1

initial_star = StarSet(center, basis, C, g)
Expand Down Expand Up @@ -166,7 +176,7 @@ def sample_initial(num_samples: int = num_samples) -> List[List[float]]:
# cont = lambda p, i: torch.linalg.vector_norm(torch.relu([email protected](bases[i])@(p-centers[i])-torch.diag(mu)@g))
# cont = lambda p, i: torch.linalg.vector_norm(torch.relu([email protected](bases[i])@(p-center)-mu*g))
# loss = (1-lamb)*mu + lamb*torch.sum(torch.stack([cont(point, i) for point in post_points[:, i, 1:]]))/len(post_points[:,i,1:])
loss = mu + 10*torch.sum(torch.stack([cont(point, i) for point in post_points[:, i, 1:]]))/num_samples
loss = mu + lamb*torch.sum(torch.stack([cont(point, i) for point in post_points[:, i, 1:]]))/num_samples

# if i==len(times)-1 and (epoch+1)%10==0:
# f = 1
Expand All @@ -185,7 +195,7 @@ def sample_initial(num_samples: int = num_samples) -> List[List[float]]:
# print(f'Loss: {loss.item():.4f}')
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch + 1}/{num_epochs}] \n_____________\n')
print("Gradients of weights and loss", model.fc1.weight.grad, model.fc1.bias.grad)
# print("Gradients of weights and loss", model.fc1.weight.grad, model.fc1.bias.grad)
for i in range(len(times)):
t = torch.tensor([times[i]], dtype=torch.float32)
mu = model(t)
Expand All @@ -199,9 +209,10 @@ def sample_initial(num_samples: int = num_samples) -> List[List[float]]:

model.eval()

S_0 = sample_star(initial_star, num_samples*10) ### this is critical step -- this needs to be recomputed per training step
# S_0 = sample_star(initial_star, num_samples*10) ### this is critical step -- this needs to be recomputed per training step
S = sample_initial(num_samples*10)
post_points = []
for point in S_0:
for point in S:
post_points.append(sim_test(None, point, T, ts).tolist())
post_points = np.array(post_points) ### this has shape N x (T/ts) x (n+1), S_t is equivalent to p_p[:, t, 1:]

Expand Down Expand Up @@ -239,4 +250,11 @@ def sample_initial(num_samples: int = num_samples) -> List[List[float]]:
# plt.plot(test_times, model(test).detach().numpy())
plot_stars_points_nonit(stars, post_points)
plt.plot(test.numpy(), model(test).detach().numpy())

results = pd.DataFrame({
'time': test.squeeze().numpy(),
'mu': model(test).squeeze().detach().numpy()
})

results.to_csv('./verse/stars/nn_results.csv', index=False)
plt.show()

0 comments on commit eee3958

Please sign in to comment.