Skip to content
Open

2d flow #2301

Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions projects/super_res/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@
config.random_fourier_features = True,
config.learned_sinusoidal_dim = 32
config.diffusion_steps = 1500
config.sampling_steps = 6
config.loss = "l1"
config.sampling_steps = 20
config.loss = "l2"

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems like a major change. It would be great if there was a clearly defined experiment behind this choice.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I ran a couple of experiments with less number of epochs. But it was cluttering the project space of wandb so I deleted them

config.objective = "pred_v"
config.lr = 8e-5
config.steps = 5000000
config.grad_acc = 1
config.val_num_of_batch = 1
config.val_num_of_batch = 2
config.save_and_sample_every = 5000
config.ema_decay = 0.995
config.amp = False
Expand Down
43 changes: 43 additions & 0 deletions projects/super_res/config_mod_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from ml_collections import config_dict

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we use a different name than "mod"? Is this the config for the 2D flow?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure


#batch_size = 4
config = config_dict.ConfigDict()

config.dim = 64
config.dim_mults = (1, 1, 2, 2, 3, 4)
config.learned_sinusoidal_cond = True,
config.random_fourier_features = True,
config.learned_sinusoidal_dim = 32
config.diffusion_steps = 1500
config.sampling_steps = 20
config.loss = "l2"
config.objective = "pred_v"
config.lr = 8e-5
config.steps = 5000000
config.grad_acc = 1
config.val_num_of_batch = 2
config.save_and_sample_every = 5000
config.ema_decay = 0.995
config.amp = False
config.split_batches = True
config.additional_note = "mod_flow"
config.eval_folder = "./evaluate"
config.results_folder = "./results"
config.tensorboard_dir = "./tensorboard"
config.milestone = 1

config.batch_size = 1
config.data_config = config_dict.ConfigDict({
"dataset_name": "c384",
"length": 7,
#"channels": ["UGRD10m_coarse","VGRD10m_coarse"],
"channels": ["PRATEsfc_coarse"],
#"img_channel": 2,
"img_channel": 1,
"img_size": 384,
"logscale": True,
"quick": True
})

config.data_name = f"{config.data_config['dataset_name']}-{config.data_config['channels']}-{config.objective}-{config.loss}-d{config.dim}-t{config.diffusion_steps}{config.additional_note}"
config.model_name = f"c384-{config.data_config['channels']}-{config.objective}-{config.loss}-d{config.dim}-t{config.diffusion_steps}{config.additional_note}"
10 changes: 5 additions & 5 deletions projects/super_res/model/autoreg_diffusion_mod.py
Original file line number Diff line number Diff line change
Expand Up @@ -1031,7 +1031,7 @@ def p_losses(self, stack, hres, lres, ures, t, noise = None):
loss2 = self.loss_fn(x_start, warped, reduction = 'none')
loss2 = reduce(loss2, 'b ... -> b (...)', 'mean')

return loss.mean() + loss1.mean() + loss2.mean()
return loss.mean()*1.7 + loss1.mean()*1.0 + loss2.mean()*1.0

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Interesting, so now the losses are not quite balanced? How did you determine the 1.7? (nit: While it seems like both of the 1.0's are superfluous, at least one of them is because you only need to weigh 2/3 loss terms, or in general k-1 of k loss terms. It's the relative weight that is going to matter).

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Aah, it's a typo, last one was supposed to be 0.3 so that it sums up to 3. But yeah, it's manually chosen hyperparameters, which even I'm not a fan of much. I was thinking of using the SoftAdapt paper to automatically figure out these weights


def forward(self, lres, hres, *args, **kwargs):

Expand Down Expand Up @@ -1325,13 +1325,13 @@ def train(self):
ax1.set_ylabel("Density")
ax1.set_yscale("log")

flow_d = np.zeros((1, num_samples, 3, img_size, img_size))
for m in range(num_samples):
flow_d = np.zeros((1, num_frames, 3, img_size, img_size))
for m in range(num_frames):
flow_d[0,m,:,:,:] = np.transpose(flow_vis.flow_to_color(flows.clamp(0, 1)[0,m,:2,:,:].permute(1,2,0).cpu().numpy(), convert_to_bgr = True), (2,0,1))

flow_s = np.zeros((1, num_samples, 3, img_size, img_size))
flow_s = np.zeros((1, num_frames, 3, img_size, img_size))
sm = smap(None, fcmap)
for m in range(num_samples):
for m in range(num_frames):
flow_s[0,m,:,:,:] = np.transpose(sm.to_rgba(flows.clamp(0, 1)[0,m,2,:,:].cpu().numpy())[:,:,:3], (2,0,1))

accelerator.log({"true_high": wandb.Video((hres[:,2:,0:1,:,:].repeat(1,1,3,1,1).cpu().numpy()*255).astype(np.uint8))}, step=self.step)
Expand Down
Loading