-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlinear_utils.py
171 lines (163 loc) · 6.77 KB
/
linear_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
"""
Author : Ziping Xu
Email : [email protected]
Date : Mar 21, 2022
Record : Utilise for linear model
"""
from utils import discover
from numpy.random import randn
from copy import deepcopy
import pdb
import numpy as np
class linear_discover(discover):
def __init__(self, m, d, sigma = 1.0, sig_coef = 1.0, noise = 0.1):
super(linear_discover, self).__init__()
self.m = m
self.d = d
self.sigma = sigma
self.sig_coef = sig_coef
self.noise = noise
self.init_linear()
def init_linear(self):
self.x = randn(self.m * self.d).reshape(self.m, self.d) * self.sigma
self.y_noise = randn(self.m) * self.noise
self.sig_coef = randn(self.d) * self.sig_coef
self.y = self.x @ self.sig_coef.transpose() + self.y_noise
def get_reward(self, action):
return self.y[action], None
def get_mask(self):
mask_mat = np.array([False]*(self.m))
for action in self.actions:
mask_mat[action] = True
self.mask_mat = mask_mat
return mask_mat
def regret(self):
T = len(self.rewards)
xf = -self.y.flatten()
xf.sort()
reg = -(np.cumsum(xf[:T])) - np.cumsum(self.rewards)
return reg
class linear_solver:
def __init__(self, prob):
self.prob = prob
def step(self):
pass
def run(self, T):
self.prob.refresh()
acts = np.random.choice(range(self.prob.m), T, replace=False)
for t in range(T):
self.prob.take_action(acts[t])
return self.prob.regret(), self.prob.rewards
class TS_linear(linear_solver):
def __init__(self, prob):
super(TS_linear, self).__init__(prob)
self.init_priors()
def init_priors(self, s0 = 1.0):
self.mu_t = np.zeros(self.prob.d)
self.sigma_t = s0 * np.eye(self.prob.d) # to adapt according to the true distribution of theta
def update_posterior(self, action, r):
f = self.prob.x[action, :]
s_inv = np.linalg.inv(self.sigma_t)
ffT = np.outer(f, f)
# pdb.set_trace()
mu_ = np.dot(np.linalg.inv(s_inv + ffT / self.prob.noise**2), np.dot(s_inv, self.mu_t) + r * f / self.prob.noise**2)
sigma_ = np.linalg.inv(s_inv + ffT/self.prob.noise**2)
self.mu_t = mu_
self.sigma_t = sigma_
return mu_, sigma_
def step(self):
theta_t = np.random.multivariate_normal(self.mu_t, self.sigma_t, 1)
pred = self.prob.x @ theta_t.transpose()
self.prob.get_mask()
pred[self.prob.mask_mat] = np.nan
at = np.nanargmax(pred)
return at
def run(self, T):
self.prob.refresh()
for t in range(T):
action = self.step()
r, _ = self.prob.take_action(action)
self.update_posterior(action, r)
return self.prob.regret(), self.prob.rewards
class UCB_linear(linear_solver):
def __init__(self, prob, lbda = 10e-4, alpha = 10e-1):
super(UCB_linear, self).__init__(prob)
self.A_t = lbda * np.eye(self.prob.d)
self.b_t = np.zeros(self.prob.d)
self.lbda = lbda
self.alpha = alpha
def step(self):
inv_A = np.linalg.inv(self.A_t)
theta_t = np.dot(inv_A, self.b_t)
beta_t = self.alpha * np.sqrt(np.diagonal(np.dot(np.dot(self.prob.x, inv_A), self.prob.x.T)))
pred = np.dot(self.prob.x, theta_t) + beta_t
self.prob.get_mask()
pred[self.prob.mask_mat] = np.nan
at = np.nanargmax(pred)
return at
def update_est(self, a_t, r):
self.A_t += np.outer(self.prob.x[a_t, :], self.prob.x[a_t, :])
self.b_t += r * self.prob.x[a_t, :]
def run(self, T):
self.prob.refresh()
for t in range(T):
action = self.step()
r, _ = self.prob.take_action(action)
self.update_est(action, r)
return self.prob.regret(), self.prob.rewards
class IDS_linear(linear_solver):
def __init__(self, prob, M = 1000):
super(IDS_linear, self).__init__(prob)
self.M = M
self.init_priors()
def init_priors(self, s0 = 1.0):
self.mu_t = np.zeros(self.prob.d)
self.sigma_t = s0 * np.eye(self.prob.d) # to adapt according to the true distribution of theta
def update_posterior(self, action, r):
f = self.prob.x[action, :]
s_inv = np.linalg.inv(self.sigma_t)
ffT = np.outer(f, f)
# pdb.set_trace()
mu_ = np.dot(np.linalg.inv(s_inv + ffT / self.prob.noise**2), np.dot(s_inv, self.mu_t) + r * f / self.prob.noise**2)
sigma_ = np.linalg.inv(s_inv + ffT/self.prob.noise**2)
self.mu_t = mu_
self.sigma_t = sigma_
return mu_, sigma_
def step(self):
"""
Implementation of linearSampleVIR (algorithm 6 in Russo & Van Roy, p. 244) applied for Linear Bandits with
multivariate normal prior. Here integrals are approximated in sampling thetas according to their respective
posterior distributions.
:param mu_t: np.array, posterior mean vector at time t
:param sigma_t: np.array, posterior covariance matrix at time t
:param M: int, number of samples
:return: int, np.array, arm chose and p*
"""
mu_t, sigma_t, M = self.mu_t, self.sigma_t, self.M
self.n_a = self.prob.m
self.prob.get_mask()
thetas = np.random.multivariate_normal(mu_t, sigma_t, M)
mu = np.mean(thetas, axis=0)
# print(self.features.shape, thetas.shape)
means = np.dot(self.prob.x, thetas.T)
means[self.prob.mask_mat] = np.nan
theta_hat = np.nanargmax(means, axis=0)
theta_hat_ = [thetas[np.where(theta_hat==a)] for a in range(self.n_a)]
p_a = np.array([len(theta_hat_[a]) for a in range(self.n_a)])/M
mu_a = np.nan_to_num(np.array([np.nanmean([theta_hat_[a]], axis=1).squeeze() for a in range(self.n_a)]))
L_hat = np.nansum(np.array([p_a[a]*np.outer(mu_a[a]-mu, mu_a[a]-mu) for a in range(self.n_a)]), axis=0)
rho_star = np.nansum(np.array([p_a[a]*np.dot(self.prob.x[a], mu_a[a]) for a in range(self.n_a)]), axis=0)
# v = np.array([np.dot(np.dot(self.prob.x[a], np.dot(sigma_t, sigma_t.T)), self.prob.x[a].T) for a in range(self.n_a)]) + 0.000001
v = np.array([np.dot(np.dot(self.prob.x[a], L_hat), self.prob.x[a].T) for a in range(self.n_a)]) + 0.000001
delta = np.array([rho_star - np.dot(self.prob.x[a], mu) for a in range(self.n_a)])
delta[self.prob.mask_mat] = np.nan
arm = np.nanargmax(-delta**2/v)
# pdb.set_trace()
return arm
def run(self, T):
self.prob.refresh()
for t in range(T):
action = self.step()
r, _ = self.prob.take_action(action)
self.update_posterior(action, r)
return self.prob.regret(), self.prob.rewards