-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrunMulti.py
More file actions
137 lines (112 loc) · 4.88 KB
/
runMulti.py
File metadata and controls
137 lines (112 loc) · 4.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# from AE.simple.algoMicroHardGaussian import RPOL
from algo import RPOLforkey, CKBforkey, RPOL, CKB
from k8sManager import K8sManager
import numpy as np
import sys
from time import sleep
from utils import reset_env, exp, interpolate_distribution
import json
import random
task_mul = 1
with open('/home/caohch1/Downloads/Erms/AE/simple/paras.json') as f:
paras = json.load(f)
task = paras['task']
task_num = paras["task_num"]
if task == "login":
task = 0 * task_mul
elif task == "recommendation":
task = 1 * task_mul
else:
task = 2 * task_mul
sla = {0*task_mul: paras['sla'][0], 1*task_mul: paras['sla'][0], 2*task_mul: paras['sla'][0]}
obj_cost = {0*task_mul: paras['obj_cost'][0], 1*task_mul: paras['obj_cost'][0], 2*task_mul: paras['obj_cost'][0]}
obj_hardCost = {0*task_mul: paras['obj_hardCost'][0], 1*task_mul: paras['obj_hardCost'][0], 2*task_mul: paras['obj_hardCost'][0]}
k8sManager = K8sManager("hotel-reserv")
repeats = paras["repeats"]
periods = paras["periods"]
rounds = paras["rounds"]
avg_regret = np.zeros(periods)
avg_cost = np.zeros(periods)
avg_hardCost = np.zeros(periods)
for j in range(repeats):
reset_env(k8sManager)
pod_grid = [np.array([i for i in range(30, 0, -1)])]
models = {
0: RPOLforkey(pod_grid, 1, [], paras["pobo_multi_paras"][0][0], paras["pobo_multi_paras"][0][1], paras["pobo_multi_paras"][0][2], paras["pobo_multi_paras"][0][3]),
1: RPOL(pod_grid, 1, [], paras["pobo_multi_paras"][1][0], paras["pobo_multi_paras"][1][1], paras["pobo_multi_paras"][1][2]),
2: RPOL(pod_grid, 1, [], paras["pobo_multi_paras"][2][0], paras["pobo_multi_paras"][2][1], paras["pobo_multi_paras"][2][2])
}
regret_sum = []
cost_sum = []
hardCost_sum = []
for i in range(periods):
print("="*100)
if paras["mult_dist"] == "333":
task = np.random.randint(0, task_num) * task_mul
elif paras["mult_dist"] == "226":
task = random.choices(
[0*task_mul, 1*task_mul, 2*task_mul], weights=[0.2, 0.2, 0.6])[0]
elif paras["mult_dist"] == "442":
task = random.choices(
[0*task_mul, 1*task_mul, 2*task_mul], weights=[0.4, 0.4, 0.2])[0]
elif paras["mult_dist"] == "changing":
current_dis = interpolate_distribution(
[0.4, 0.4, 0.2], [0.2, 0.2, 0.6], i, rounds)
task = random.choices(
[0*task_mul, 1*task_mul, 2*task_mul], weights=current_dis)[0]
# Make decision
pod_num = int(models[task].decision(0)[-1])
# Execute action
if task == 0*task_mul:
module_name = "login"
k8sManager.scale_deployment("frontend", pod_num)
k8sManager.scale_deployment("user", pod_num)
elif task == 1*task_mul:
module_name = "recommendation"
k8sManager.scale_deployment("frontend", pod_num)
k8sManager.scale_deployment("recommendation", pod_num)
k8sManager.scale_deployment("profile", pod_num)
elif task == 2*task_mul:
module_name = "search"
k8sManager.scale_deployment("frontend", pod_num)
k8sManager.scale_deployment("search", pod_num)
k8sManager.scale_deployment("rate", pod_num)
k8sManager.scale_deployment("geo", pod_num)
k8sManager.scale_deployment("profile", pod_num)
k8sManager.scale_deployment("reservation", pod_num)
# Repeat to get average latency, better experiment result
sv_sum = []
ul_sum = []
for j in range(rounds):
st, alj, svn, sv, sl, ul, cu, mu = exp(
paras["rate"], module_name, paras["duration"], sla[task]*1000)
if alj == 0 or ul == 0:
continue
sv_sum.append(sv)
ul_sum.append(ul)
sleep(2.5)
sv = sum(sv_sum)/(len(sv_sum))
ul = sum(ul_sum)/(len(ul_sum))
ul /= 1000
# Update model
hardCost = ul
reward = 100 - pod_num
cost = sv
models[task].update(reward, cost-obj_cost[task],
hardCost-obj_hardCost[task], 0)
real_cost = cost
hard_cost = hardCost
regret = 100 - reward
regret_sum.append(regret + regret_sum[-1] if regret_sum else regret)
cost_sum.append(real_cost + cost_sum[-1] if cost_sum else real_cost)
hardCost_sum.append(hard_cost + hardCost_sum[-1] if hardCost_sum else hard_cost)
print(f"Period {i+1}: Pod Num.: {regret}, SLAV: {real_cost}, P90 TL: {hard_cost}")
regret_sum = [regret_sum[i]/(i+1) for i in range(periods)]
cost_sum = [cost_sum[i]/(i+1) for i in range(periods)]
hardCost_sum = [hardCost_sum[i]/(i+1) for i in range(periods)]
avg_regret += np.array(regret_sum)
avg_cost += np.array(cost_sum)
avg_hardCost += np.array(hardCost_sum)
avg_regret /= repeats
avg_cost /= repeats
avg_hardCost /= repeats