-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path3_event_detection.py
More file actions
315 lines (287 loc) · 12.5 KB
/
3_event_detection.py
File metadata and controls
315 lines (287 loc) · 12.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
#!/usr/bin/env python3
"""Detect events in a document collection based on semantic and temporal similarity."""
import argparse
import json
from dataclasses import asdict
from typing import List, Iterable, Tuple
import numpy as np
from chunkdot import cosine_similarity_top_k
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
from tqdm.auto import tqdm
from util import (
get_doc_date_days,
get_embeddings,
get_topics,
get_doc_topics,
NumpyArrayEncoder,
Event,
EVENTS_PATH,
)
parser = argparse.ArgumentParser(
prog="Event Detection",
description="Detect events in a document collection based on semantic and temporal similarity.",
)
parser.add_argument(
"-e", "--eval", action="store_true", help="Run grid search for evaluation."
)
parser.add_argument(
"-g", "--gamma", type=float, default=0.05, help="Temporal similarity decay factor."
)
parser.add_argument(
"-a",
"--alpha",
type=float,
default=0.4,
help="Weight of temporal similarity in the affinity matrix.",
)
parser.add_argument(
"-z",
"--zeta",
type=float,
default=0.4,
help="Weight of salience in the event weight calculation.",
)
doc_embeddings = get_embeddings()
# get days after year 2000 for each document
doc_date_days = get_doc_date_days()
doc_topics = get_doc_topics()
topics = get_topics()
class MTLSEventGenerator:
"""Clustering of documents into events based on semantic and temporal similarity.
Based on: Multi-timeline summarization (mtls): Improving timeline summarization
by generating multiple summaries. (Yu, Yi, et al. 2021)
Steps:
1. Calculate semantic similarity matrix for document embeddings.
2. Calculate temporal similarity matrix for document dates.
3. Calculate affinity matrix as a linear combination of semantic and temporal similarity matrices.
4. Apply affinity propagation clustering to generate candidate events.
5. Calculate salience and consistency for each event.
6. Calculate event weight as a linear combination of salience and consistency.
7. Filter events by weight.
Parameters
----------
gamma : float, default=0.05
Temporal similarity decay factor.
alpha : float, default=0.4
Weight of temporal similarity in the affinity matrix.
zeta : float, default=0.4
Weight of salience in the event weight calculation.
"""
def __init__(self, gamma: float = 0.05, alpha: float = 0.4, zeta: float = 0.4):
self.gamma = gamma
self.alpha = alpha
self.zeta = zeta
def _semantic_similarity(
self,
doc_embeddings: np.ndarray,
chunk_size: int = 2000,
use_chunkdot: bool = True,
) -> np.ndarray:
"""Calculate cosine similarity matrix for document embeddings."""
if use_chunkdot:
return cosine_similarity_top_k(
doc_embeddings, top_k=doc_embeddings.shape[0] - 1, max_memory=20e9
)
if chunk_size and chunk_size < doc_embeddings.shape[0]:
# Compute cosine similarity in chunks
cos_sim_matrices = []
for i in range(0, len(doc_embeddings), chunk_size):
chunk = doc_embeddings[i : i + chunk_size]
cos_sim_matrix = cosine_similarity(chunk, doc_embeddings)
cos_sim_matrices.append(cos_sim_matrix)
# Combine results if needed
if len(cos_sim_matrices) > 1:
similarity_matrix = np.vstack(cos_sim_matrices)
else:
similarity_matrix = cos_sim_matrices[0]
else:
similarity_matrix = cosine_similarity(doc_embeddings)
return similarity_matrix
def _temporal_similarity(
self, doc_date_days: np.ndarray[int], chunk_size: int = 2000
) -> np.ndarray:
"""Calculate temporal similarity matrix for document dates."""
if chunk_size and chunk_size < len(doc_date_days):
# Define chunk size
chunk_size = chunk_size
temporal_sim_matrices = []
for i in range(0, len(doc_date_days), chunk_size):
chunk = doc_date_days[i : i + chunk_size]
diff = np.subtract.outer(np.array(chunk), doc_date_days)
diff = np.abs(diff) + 1 # fix zero values
temporal_sim_matrix = 1 / (np.exp(self.gamma * diff))
temporal_sim_matrices.append(temporal_sim_matrix)
if len(temporal_sim_matrices) > 1:
date_similarity_matrix = np.vstack(temporal_sim_matrices)
else:
date_similarity_matrix = temporal_sim_matrices[0]
else:
diff = np.subtract.outer(doc_date_days, doc_date_days)
diff = np.abs(diff) + 1 # fix zero values
date_similarity_matrix = 1 / (np.exp(self.gamma * diff))
return date_similarity_matrix
def _get_affinity_matrix(
self, semantic_sim_matrix: np.ndarray, temporal_sim_matrix: np.ndarray
) -> np.ndarray:
"""Calculate affinity matrix as a linear combination of semantic and temporal similarity matrices."""
return self.alpha * temporal_sim_matrix + (1 - self.alpha) * semantic_sim_matrix
def _get_salience(
self, affinity_matrix: np.array, cluster_labels: np.array, cluster_id: int
) -> float:
"""Salience: log ratio of the number of documents in the cluster to the total number of documents.
Describes the relative frequency of the event in the dataset."""
return np.log(affinity_matrix[cluster_labels == cluster_id].shape[0]) / np.log(
affinity_matrix.shape[0]
)
def _get_consistency(
self,
doc_embeddings: np.ndarray,
cluster_labels: np.array,
cluster_centers_indices: np.array,
cluster_id: int,
) -> float:
"""Consistency: average cosine similarity between cluster center and its members.
Describes the quality / semantic coherence of the event.
"""
cluster_center = doc_embeddings[cluster_centers_indices[cluster_id]]
# get indices of documents that belong to the cluster and are not cluster centers
cluster_member_indices = np.where(cluster_labels == cluster_id)[0]
cluster_member_indices = cluster_member_indices[
cluster_member_indices != cluster_centers_indices[cluster_id]
]
cluster_members = doc_embeddings[cluster_member_indices]
cosine_sims = cosine_similarity(cluster_center.reshape(1, -1), cluster_members)
return np.sum(cosine_sims) / cosine_sims.shape[1]
def _get_event_weight(self, consistency: float, salience: float) -> float:
"""Event weight: linear combination of salience and consistency."""
return self.zeta * salience + (1 - self.zeta) * consistency
def generate_events(
self, doc_embeddings: np.ndarray, doc_date_days: np.ndarray[int]
) -> List[Event]:
# calculate semantic and temporal similarity matrices
print("semantic sim")
semantic_sim_matrix = self._semantic_similarity(doc_embeddings)
print("temporal sim")
temporal_sim_matrix = self._temporal_similarity(doc_date_days)
# calculate affinity matrix as a linear combination of semantic and temporal similarity matrices
print("affinity matrix")
affinity_matrix = self._get_affinity_matrix(
semantic_sim_matrix, temporal_sim_matrix
)
del semantic_sim_matrix, temporal_sim_matrix
# apply affinity propagation clustering
clustering = AffinityPropagation(
affinity="precomputed", verbose=False, random_state=42
).fit(np.asarray(affinity_matrix))
cluster_centers_indices = clustering.cluster_centers_indices_
cluster_labels = clustering.labels_
# create events from clusters, calculate salience and consistency
events = []
for event_id in set(cluster_labels):
if sum(cluster_labels == event_id) == 1:
# only one doc in the cluster, skip
continue
event_salience = self._get_salience(
affinity_matrix, cluster_labels, event_id
)
event_consistency = self._get_consistency(
doc_embeddings, cluster_labels, cluster_centers_indices, event_id
)
event_weight = self._get_event_weight(event_consistency, event_salience)
event_docs = np.where(cluster_labels == event_id)[0]
e = Event(
event_id,
event_docs,
cluster_centers_indices[event_id],
event_salience,
event_consistency,
event_weight,
)
events.append(e)
# filter events by weight.
# Only keep events with weight greater than one std-dev from the mean (lower bound)
event_weight_mean = np.mean([x.weight for x in events])
event_weight_std = np.std([x.weight for x in events])
top_scored_events = [
x for x in events if x.weight > event_weight_mean - event_weight_std
]
return top_scored_events
def gen_topic_events(gamma, alpha, zeta) -> Iterable[Tuple[int, List[Event]]]:
event_generator = MTLSEventGenerator(gamma, alpha, zeta)
for topic_id in topics[1:]: # ignore topic "-1"
topic_doc_indices = np.where(doc_topics == topic_id)[0]
topic_doc_embeddings = doc_embeddings[topic_doc_indices]
topic_doc_date_days = doc_date_days[topic_doc_indices]
print(f"Topic {topic_id} with {len(topic_doc_embeddings)} documents.")
events = event_generator.generate_events(
topic_doc_embeddings, topic_doc_date_days
)
print(
f"Topic {topic_id} with {len(topic_doc_embeddings)} documents has {len(events)} events."
)
# replace document indices with original document indices
for event in events:
event.doc_indices = topic_doc_indices[event.doc_indices].tolist()
event.center_index = topic_doc_indices[event.center_index]
yield topic_id, events
if __name__ == "__main__":
args = parser.parse_args()
if args.eval:
# run grid search of alpha and zeta for evaluation
from sklearn.model_selection import ParameterGrid
from util import get_users
from concurrent.futures import ProcessPoolExecutor
users = get_users()
out_file = EVENTS_PATH.with_suffix(".eval.jsonl")
out_file.unlink(missing_ok=True)
param_grid = {
"alpha": np.linspace(0.1, 0.9, 9),
"zeta": np.linspace(0.1, 0.9, 9),
}
def process_params(params):
results = []
for topic_id, events in gen_topic_events(
args.gamma, params["alpha"], params["zeta"]
):
for e in events:
num_users = len(set(users[e.doc_indices]))
num_docs = len(e.doc_indices)
results.append(
{
"topic_id": topic_id,
"event_id": e.id,
"alpha": params["alpha"],
"zeta": params["zeta"],
"num_docs": num_docs,
"num_users": num_users,
"salience": e.salience,
"consistency": e.consistency,
"weight": e.weight,
"duration": e.duration,
}
)
return results
with ProcessPoolExecutor() as executor:
futures = [
executor.submit(process_params, params)
for params in ParameterGrid(param_grid)
]
for future in tqdm(futures, desc="Param grid search"):
results = future.result()
with open(out_file, "a") as f:
for result in results:
f.write(json.dumps(result, cls=NumpyArrayEncoder) + "\n")
else:
# generate events for all topics
EVENTS_PATH.unlink(missing_ok=True)
for topic_id, events in gen_topic_events(args.gamma, args.alpha, args.zeta):
with open(EVENTS_PATH, "a") as f:
event_dicts = [asdict(event) for event in events]
f.write(
json.dumps(
{"topic_id": topic_id, "events": event_dicts},
cls=NumpyArrayEncoder,
)
+ "\n"
)