From 8ee3d94bb08f398891be80305a526e14679145d8 Mon Sep 17 00:00:00 2001 From: Alex Yuan Date: Tue, 17 Sep 2024 20:38:52 -0500 Subject: [PATCH] last changes before fork --- verse/stars/star_nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/verse/stars/star_nn.py b/verse/stars/star_nn.py index e28ea164..fa5bd34a 100644 --- a/verse/stars/star_nn.py +++ b/verse/stars/star_nn.py @@ -110,7 +110,7 @@ def he_init(m): num_epochs: int = 50 # sample number of epoch -- can play with this/set this as a hyperparameter num_samples: int = 100 # number of samples per time step -lamb: float = 1 +lamb: float = 5 batch_size: int = 5 # number of times computed at once, lower should be better but slower, min of 1 T = 14 @@ -196,7 +196,7 @@ def containment(points: torch.Tensor, times: torch.Tensor, bases: List[torch.Ten ### very naive way to do this, probably would want more or less equal batch sizes if not dividing equally mu = model(times[start:end].unsqueeze(1)) # get times in right form - loss = torch.sum(mu)+lamb*torch.sum(containment(post_points[:, start:end, 1:], times[start:end], bases[start:end], centers[start:end]))/num_samples + loss = torch.log1p(torch.sum(mu))+lamb*torch.sum(containment(post_points[:, start:end, 1:], times[start:end], bases[start:end], centers[start:end]))/num_samples loss.backward() optimizer.step()