forked from Rithwikksvr/EdgeNILM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmini-experiments-trainer-2.py
61 lines (38 loc) · 1.37 KB
/
mini-experiments-trainer-2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import numpy as np
import pandas as pd
import time
import math
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
import os
from sklearn.metrics import mean_absolute_error
import math
import sys
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from collections import OrderedDict
import time
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from functions import *
from networks import *
cuda=True
appliances = ["washing machine","fridge", "dish washer"]
n_epochs=60
val_prop = 0.4
batch_size=64
folds = [1,2,3]
sequence_lengths = [99]
start = time.time()
methods = ['not_fully_shared_mtl']
for method in methods:
for fold_number in folds:
for sequence_length in sequence_lengths:
if method=='not_fully_shared_mtl':
print ( "Training fold %s with %s method using sequence length %s"%(fold_number, method, sequence_length))
mtl_model = [NotFullySharedMTL(sequence_length, len(appliances), cuda)]
train_fold(mtl_model, method, appliances, fold_number, n_epochs, sequence_length, batch_size, 'adam', val_prop,num_of_minibatches_to_save_model=40)
end = time.time()
print ("Total script runtime: ",end-start)