forked from netcontract/ncflow
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathfind_demand_scale_factor.py
executable file
·111 lines (92 loc) · 3.52 KB
/
find_demand_scale_factor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import os
import numpy as np
import sys
sys.path.append("..")
from lib.algorithms.path_formulation import PathFormulation as PF
from lib.problem import Problem
from lib.traffic_matrix import GenericTrafficMatrix
from lib.config import TOPOLOGIES_DIR
from benchmarks.benchmark_consts import NCFLOW_HYPERPARAMS
from lib.algorithms import NcfEpi
from lib.partitioning import FMPartitioning, SpectralClustering
import datetime
t_arg = sys.argv[1]
# read the topology
if t_arg.endswith(".graphml"):
topo_fname = os.path.join(TOPOLOGIES_DIR, "topology-zoo/" + t_arg)
else:
topo_fname = os.path.join(TOPOLOGIES_DIR, t_arg)
if t_arg.endswith(".json"):
G = Problem._read_graph_json(topo_fname)
elif t_arg.endswith(".graphml"):
G = Problem._read_graph_graphml(topo_fname)
num_nodes = len(G.nodes)
print("#nodes={}".format(num_nodes))
# process each traffic matrix
TEAVAR_DEMANDS_DIR = "../code/teavar/code/data"
d_fname = os.path.join(TEAVAR_DEMANDS_DIR, t_arg, "demand.txt")
line_num = 0
with open(d_fname, "r") as input_file:
for line in input_file:
line_num = line_num + 1
# if line_num != 7:
# continue
print("==================Demand {}==================".format(line_num))
tm = GenericTrafficMatrix(
problem=None,
tm=np.fromstring(line, np.float32, sep=" ").reshape(num_nodes, num_nodes),
)
# if line_num == 7:
# print("tm=[{}]".format(tm.tm))
p = Problem(G, tm)
p.name = t_arg
# compute demand scale factor
pf_cdsf = PF.compute_demand_scale_factor(4, edge_disjoint=True)
pf_cdsf.solve(p)
z = pf_cdsf.obj_val
# compute pf solution and runtime
pf = PF.new_total_flow(4, edge_disjoint=True)
pf.solve(p)
pf_flow = pf.obj_val
pf_runtime = pf.runtime
# compute nc solution and runtime
# print("---> p.name = {}".format(p.name))
if p.name in NCFLOW_HYPERPARAMS:
(
num_paths,
edge_disjoint,
dist_metric,
partition_algo,
sf,
) = NCFLOW_HYPERPARAMS[p.name]
num_partitions = sf * int(np.sqrt(len(p.G.nodes)))
# print("---> partition_algo = {}".format(partition_algo))
if False:
if partition_algo.contains("spectral_clustering"):
partition_cls = SpectralClustering
elif partition_algo.contains("fm_partitioning"):
partition_cls = FMPartitioning
else:
print(
"WARN un-parseable partition_algo = {}".format(partition_algo)
)
partitioner = partition_algo(num_partitions)
ncflow = NcfEpi.new_total_flow(
num_paths, edge_disjoint=True, dist_metric="inv-cap"
)
begin = datetime.datetime.now()
ncflow.solve(p, partitioner)
end = datetime.datetime.now()
nc_flow = ncflow.obj_val
nc_runtime = ncflow.runtime_est(14)
nc_wallclocktime = (end - begin).seconds
else:
nc_flow = pf_flow
nc_runtime = pf_runtime
nc_wallclocktime = -1
print(
"RESULT D {0} (paths=edinvcap4) z {1:1.3f} PF flow/runtime {2:1.3f} {3:1.3f} NCFlow flow/runtime/wc {4:1.3f} {5:1.3f} {6:1.3f}\n".format(
line_num, z, pf_flow, pf_runtime, nc_flow, nc_runtime, nc_wallclocktime
)
)
# quit()