-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexample_full.yaml
69 lines (60 loc) · 1.62 KB
/
example_full.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# @package _global_
# to execute this experiment run:
# python run.py experiment=example_full.yaml
defaults:
- override /trainer: null # override trainer to null so it's not loaded from main config defaults...
- override /model: null
- override /datamodule: null
- override /callbacks: null
- override /logger: null
# we override default configurations with nulls to prevent them from loading at all
# instead we define all modules and their paths directly in this config,
# so everything is stored in one place
seed: 12345
trainer:
_target_: pytorch_lightning.Trainer
gpus: 0
min_epochs: 1
max_epochs: 10
gradient_clip_val: 0.5
accumulate_grad_batches: 2
weights_summary: null
# resume_from_checkpoint: ${work_dir}/last.ckpt
model:
_target_: src.models.mnist_model.MNISTLitModel
lr: 0.001
weight_decay: 0.00005
input_size: 784
lin1_size: 256
lin2_size: 256
lin3_size: 128
output_size: 10
datamodule:
_target_: src.datamodules.mnist_datamodule.MNISTDataModule
data_dir: ${data_dir}
batch_size: 64
train_val_test_split: [55_000, 5_000, 10_000]
num_workers: 0
pin_memory: False
callbacks:
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
monitor: "val/acc"
save_top_k: 2
save_last: True
mode: "max"
dirpath: "checkpoints/"
filename: "sample-mnist-{epoch:02d}"
early_stopping:
_target_: pytorch_lightning.callbacks.EarlyStopping
monitor: "val/acc"
patience: 10
mode: "max"
logger:
wandb:
tags: ["best_model", "uwu"]
notes: "Description of this model."
neptune:
tags: ["best_model"]
csv_logger:
save_dir: "."