-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathrun.py
51 lines (39 loc) · 1.48 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import DeepSparseKernel as dsk
from DeepSparseKernel import np
import matplotlib.pyplot as plt
train_x = np.loadtxt('train_x')
train_y = np.loadtxt('train_y')
train_y = train_y.reshape(1, train_y.size)
test_x = np.loadtxt('test_x')
test_y = np.loadtxt('test_y')
test_y = test_y.reshape(1, test_y.size)
num_train = train_x.shape[0]
num_test = test_x.shape[0]
dim = int(train_x.size / num_train)
train_x = train_x.reshape(num_train, dim).T;
test_x = test_x.reshape(num_test, dim).T;
print(dim)
print(train_x.shape)
print(test_x.shape)
layer_sizes = [50, 50, 50, 50]
activations = [dsk.relu, dsk.tanh, dsk.relu, dsk.tanh]
scale = 0.1
dim = train_x.shape[0]
gp = dsk.DSK_GP(train_x, train_y, layer_sizes, activations, bfgs_iter=200, l1=0, l2=0.0, debug=True);
theta = gp.rand_theta(scale=scale)
gp.fit(theta)
py, ps2 = gp.predict(test_x)
py_train, ps2_train = gp.predict(train_x)
log_lscales = gp.theta[2:2+dim];
Phi_train = gp.calc_Phi(gp.theta[2+dim:], dsk.scale_x(train_x, log_lscales));
Phi_test = gp.calc_Phi(gp.theta[2+dim:], dsk.scale_x(test_x, log_lscales));
np.savetxt('pred_y', py)
np.savetxt('pred_s2', ps2)
np.savetxt('theta', gp.theta)
np.savetxt('Phi_train', Phi_train)
np.savetxt('Phi_test', Phi_test)
# plt.plot(test_y.reshape(test_y.size), py.reshape(py.size), 'r.', train_y.reshape(train_y.size), py_train.reshape(train_y.size), 'b.')
# plt.show()
gp.debug = True
print(gp.log_likelihood(gp.theta))
np.savetxt('loss', gp.log_likelihood(gp.theta))