Skip to content

Commit

Permalink
slight improvements from my fork
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexYFM committed Oct 15, 2024
1 parent 83de405 commit b67a46c
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 7 deletions.
4 changes: 2 additions & 2 deletions demo/dryvr_demo/thermo_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class ThermoMode(Enum):
scenario.set_sensor(BaseStarSensor())

trace = scenario.verify(3.5, 0.1)
plot_reachtube_stars(trace)
# plot_reachtube_stars(trace)
car1 = sum([trace.nodes[i].trace['test'] for i in range(len(trace.nodes))], [])
times = [star[0] for star in car1]
car1 = [star[1] for star in car1]
Expand All @@ -53,4 +53,4 @@ class ThermoMode(Enum):
print(times[i], car.C, car.g, car.basis, car.center, '\n_______ \n')
# for star in car1:
# print(star.center, star.basis, star.C, star.g, '\n --------')
# fig.show()
plt.show()
9 changes: 4 additions & 5 deletions verse/stars/star_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from scipy.integrate import ode
from sklearn.decomposition import PCA
import pandas as pd
from tqdm import tqdm

### synthetic dynamic and simulation function
def dynamic_test(vec, t):
Expand Down Expand Up @@ -103,7 +104,7 @@ def he_init(m):
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)

num_epochs = 50 # sample number of epoch -- can play with this/set this as a hyperparameter
num_epochs = 30 # sample number of epoch -- can play with this/set this as a hyperparameter
num_samples = 100 # number of samples per time step
lamb = 7

Expand All @@ -127,10 +128,7 @@ def sample_initial(num_samples: int = num_samples) -> List[List[float]]:
samples.append(S_0[np.random.randint(0, len(S_0))])
return samples

for epoch in range(num_epochs):
# Zero the parameter gradients
optimizer.zero_grad()

for epoch in tqdm(range(num_epochs), desc="Training Progress"):
samples = sample_initial()

post_points = []
Expand Down Expand Up @@ -166,6 +164,7 @@ def sample_initial(num_samples: int = num_samples) -> List[List[float]]:
### for now, don't worry about batch training, just do single input, makes more sense to me to think of loss function like this
### I would really like to be able to do batch training though, figure out a way to make it work
for i in range(len(times)):
optimizer.zero_grad()
# Forward pass
t = torch.tensor([times[i]], dtype=torch.float32)
mu = model(t)
Expand Down

0 comments on commit b67a46c

Please sign in to comment.