-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathutils.py
More file actions
125 lines (119 loc) · 4.28 KB
/
utils.py
File metadata and controls
125 lines (119 loc) · 4.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import numpy as np
from torch_geometric.utils.convert import from_networkx as fn
import networkx as nx
from networkx.algorithms import approximation
import time
import igraph as ig
def bubble_sort(array):
n = len(array)
for i in range(n - 1):
for j in range(n - i - 1):
if array[j].vcount() < array[j + 1].vcount():
array[j], array[j + 1] = array[j + 1], array[j]
return array
def bubble_sort_reverse(array):
n = len(array)
for i in range(n - 1):
for j in range(n - i - 1):
if array[j].vcount() > array[j + 1].vcount():
array[j], array[j + 1] = array[j + 1], array[j]
return array
def get_features(g_nx): # get graph-based features
features_li = []
features = []
vertex = g_nx.number_of_nodes()
feat_dim = len(next(iter(nx.get_node_attributes(g_nx, "x").values())))
edge = g_nx.number_of_edges()
dense = nx.density(g_nx)
cen = nx.degree_centrality(g_nx)
c_mean = np.array(list(cen.values())).mean()
c_max = np.max(np.array(list(cen.values())))
c_min = np.min(np.array(list(cen.values())))
c_std = np.std(np.array(list(cen.values())))
cluster_coeffi = approximation.average_clustering(g_nx)
degrees = [val for (node, val) in g_nx.degree()]
degree_min = min(degrees)
degree_max = max(degrees)
degree_mean = np.mean(np.array(degrees))
degree_std = np.std(np.array(degrees))
features.append(vertex)
features.append(edge)
features.append(feat_dim)
features.append(dense)
features.append(c_mean)
features.append(c_min)
features.append(c_max)
features.append(c_std)
features.append(cluster_coeffi)
features.append(degree_mean)
features.append(degree_max)
features.append(degree_min)
features.append(degree_std)
features_li.append(features)
feat_arr = np.array(features_li)
return feat_arr
def predict_acc(feature): # to btain predicted accuracy
sc_dit = {}
fg_sc = fg_rf.predict(feature)
im_sc = im_rf.predict(feature)
lp_sc = lp_rf.predict(feature)
ld_sc = ld_rf.predict(feature)
ml_sc = ml_rf.predict(feature)
nop_sc = nop_rf.predict(feature)
sc_dit["fastgreedy"] = fg_sc[0]
sc_dit["infomap"] = im_sc[0]
sc_dit["label_propagation"] = lp_sc[0]
sc_dit["leiden"] = ld_sc[0]
sc_dit["louvain"] = ml_sc[0]
sc_dit["no_partition"] = nop_sc[0]
sc_dit = dict(sorted(sc_dit.items(), key=lambda x: x[1], reverse=True)) # reverse sort
md = next(iter(sc_dit))
score = sc_dit[md]
print(f"Selected Algorithm : {md} ({score})")
return sc_dit, score, md
def partitioning(g_nx, md):
status = 1
if md != "no_partition":
g_ig = ig.Graph.from_networkx(g_nx)
g_ig = g_ig.as_undirected()
if md=="fastgreedy":
time1 = time.time()
partitions = g_ig.community_fastgreedy()
time2 = time.time() - time1
partitions = partitions.as_clustering()
elif md=="label_propagation":
time1 = time.time()
partitions = g_ig.community_label_propagation()
time2 = time.time() - time1
elif md=="infomap":
time1 = time.time()
partitions = g_ig.community_infomap()
time2 = time.time() - time1
elif md=="leiden":
time1 = time.time()
partitions = g_ig.community_leiden("modularity")
time2 = time.time() - time1
elif md=="louvain":
time1 = time.time()
partitions = g_ig.community_multilevel()
time2 = time.time() - time1
subsub = partitions.subgraphs()
else: # no partitioning is selected
time2 = 0.0
g_ig = ig.Graph.from_networkx(g_nx)
subsub = []
subsub.append(g_ig)
status = 0
print(f"{md}'s time : {time2} secs")
return subsub, time2, status
def set_attr_minor_super(ver_seq,clust_major,major_attr):
res_li = []
for i in clust_major.subgraphs()[max(major_attr)+1:]:
vs11 = np.array(i.vs["x"])
res = np.mean(vs11, axis=0)
res_li.append(res)
for i in range(len(major_attr)):
ver_seq.vs[i]['x'] = major_attr[i].flatten()
for i in range(len(clust_major.subgraphs()[max(major_attr)+1:])):
ver_seq.vs[max(major_attr)+1+i]['x'] = res_li[i]
return ver_seq