Skip to content

Commit 53b90cb

Browse files
committed
run ruff format
1 parent a581396 commit 53b90cb

File tree

5 files changed

+86
-47
lines changed

5 files changed

+86
-47
lines changed

src/xdist/dsession.py

+21-15
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,6 @@ def loop_once(self) -> None:
180180
if self.sched.tests_finished:
181181
self.triggershutdown()
182182

183-
184183
def is_node_finishing(self, node: WorkerController) -> bool:
185184
"""Check if a test worker is considered to be finishing.
186185
@@ -191,32 +190,33 @@ def is_node_finishing(self, node: WorkerController) -> bool:
191190
pending = self.sched.node2pending.get(node)
192191
return pending is not None and len(pending) < 2
193192

194-
195193
def are_all_nodes_finishing(self) -> bool:
196194
"""Check if all workers are finishing (See 'is_node_finishing' above)."""
197195
assert self.sched is not None
198196
return all(self.is_node_finishing(node) for node in self.sched.nodes)
199197

200-
201198
def are_all_nodes_done(self) -> bool:
202199
"""Check if all nodes have reported to finish."""
203200
return all(s == "finished" for s in self.worker_status.values())
204201

205-
206202
def are_all_active_nodes_collected(self) -> bool:
207203
"""Check if all nodes have reported collection to be complete."""
208204
if not all(n.gateway.id in self.worker_status for n in self._active_nodes):
209205
return False
210-
return all(self.worker_status[n.gateway.id] == "collected" for n in self._active_nodes)
211-
206+
return all(
207+
self.worker_status[n.gateway.id] == "collected" for n in self._active_nodes
208+
)
212209

213210
def reset_nodes_if_needed(self) -> None:
214211
assert self.sched is not None
215212
assert type(self.sched) is CustomGroup
216-
if self.are_all_nodes_finishing() and self.ready_to_run_tests and not self.sched.do_resched:
213+
if (
214+
self.are_all_nodes_finishing()
215+
and self.ready_to_run_tests
216+
and not self.sched.do_resched
217+
):
217218
self.reset_nodes()
218219

219-
220220
def reset_nodes(self) -> None:
221221
"""Issue shutdown notices to workers for rescheduling purposes."""
222222
assert self.sched is not None
@@ -227,21 +227,21 @@ def reset_nodes(self) -> None:
227227
if self.is_node_finishing(node):
228228
node.shutdown()
229229

230-
231230
def reschedule(self) -> None:
232231
"""Reschedule tests."""
233232
assert self.sched is not None
234233
assert type(self.sched) is CustomGroup
235234
self.sched.do_resched = False
236235
self.sched.check_schedule(self.sched.nodes[0], 1.0, True)
237236

238-
239237
def prepare_for_reschedule(self) -> None:
240238
"""Update test workers and their status tracking so rescheduling is ready."""
241239
assert type(self.sched) is CustomGroup
242240
assert self.sched is not None
243241
self.remake_nodes = False
244-
num_workers = self.sched.dist_groups[self.sched.pending_groups[0]]['group_workers']
242+
num_workers = self.sched.dist_groups[self.sched.pending_groups[0]][
243+
"group_workers"
244+
]
245245
self.trdist._status = {}
246246
assert self.nodemanager is not None
247247
new_nodes = self.nodemanager.setup_nodes(self.saved_put, num_workers)
@@ -295,8 +295,10 @@ def worker_workerfinished(self, node: WorkerController) -> None:
295295
try:
296296
self.prepare_for_reschedule()
297297
except Exception as e:
298-
msg = ("Exception caught during preparation for rescheduling. Giving up."
299-
f"\n{''.join(traceback.format_exception(e))}")
298+
msg = (
299+
"Exception caught during preparation for rescheduling. Giving up."
300+
f"\n{''.join(traceback.format_exception(e))}"
301+
)
300302
self.shouldstop = msg
301303
return
302304
self.config.hook.pytest_testnodedown(node=node, error=None)
@@ -392,7 +394,9 @@ def worker_collectionfinish(
392394
scheduling the first time it logs which scheduler is in use.
393395
"""
394396
if self.shuttingdown:
395-
self.report_line(f"[-] [dse] collectionfinish while closing {node.gateway.id}")
397+
self.report_line(
398+
f"[-] [dse] collectionfinish while closing {node.gateway.id}"
399+
)
396400
return
397401
self.update_worker_status(node, "collected")
398402

@@ -412,7 +416,9 @@ def worker_collectionfinish(
412416
self.trdist.ensure_show_status()
413417
self.terminal.write_line("")
414418
if self.config.option.verbose > 0:
415-
self.report_line(f"[-] [dse] scheduling tests via {self.sched.__class__.__name__}")
419+
self.report_line(
420+
f"[-] [dse] scheduling tests via {self.sched.__class__.__name__}"
421+
)
416422
if isinstance(self.sched, CustomGroup):
417423
if self.ready_to_run_tests and self.are_all_active_nodes_collected():
418424
# we're coming back here after finishing a batch of tests - so start the next batch

src/xdist/remote.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,9 @@ def pytest_collection_modifyitems(
209209
) -> None:
210210
# add the group name to nodeid as suffix if --dist=loadgroup
211211
if config.getvalue("loadgroup") or config.getvalue("customgroup"):
212-
functional_mark = "xdist_group" if config.getvalue("loadgroup") else "xdist_custom"
212+
functional_mark = (
213+
"xdist_group" if config.getvalue("loadgroup") else "xdist_custom"
214+
)
213215
for item in items:
214216
mark = item.get_closest_marker(functional_mark)
215217
if not mark:

src/xdist/scheduler/customgroup.py

+47-30
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,6 @@ def mark_test_complete(
189189
self.check_schedule(node, duration=duration)
190190

191191
def mark_test_pending(self, item: str) -> None:
192-
193192
assert self.collection is not None
194193
self.pending.insert(
195194
0,
@@ -205,7 +204,9 @@ def remove_pending_tests_from_node(
205204
) -> None:
206205
raise NotImplementedError()
207206

208-
def check_schedule(self, node: WorkerController, duration: float = 0, from_dsession: bool = False) -> None:
207+
def check_schedule(
208+
self, node: WorkerController, duration: float = 0, from_dsession: bool = False
209+
) -> None:
209210
"""Maybe schedule new items on the node.
210211
211212
If there are any globally pending nodes left then this will
@@ -214,7 +215,9 @@ def check_schedule(self, node: WorkerController, duration: float = 0, from_dsess
214215
heuristic to influence how many tests the node is assigned.
215216
"""
216217
if node.shutting_down:
217-
self.report_line(f"[-] [csg] {node.workerinput['workerid']} is already shutting down")
218+
self.report_line(
219+
f"[-] [csg] {node.workerinput['workerid']} is already shutting down"
220+
)
218221
return
219222

220223
if self.pending:
@@ -227,18 +230,25 @@ def check_schedule(self, node: WorkerController, duration: float = 0, from_dsess
227230
if self.pending_groups:
228231
dist_group_key = self.pending_groups.pop(0)
229232
dist_group = self.dist_groups[dist_group_key]
230-
nodes = cycle(self.nodes[0:dist_group['group_workers']])
231-
schedule_log: dict[str, Any] = {n.gateway.id:[] for n in self.nodes[0:dist_group['group_workers']]}
232-
for _ in range(len(dist_group['test_indices'])):
233+
nodes = cycle(self.nodes[0 : dist_group["group_workers"]])
234+
schedule_log: dict[str, Any] = {
235+
n.gateway.id: []
236+
for n in self.nodes[0 : dist_group["group_workers"]]
237+
}
238+
for _ in range(len(dist_group["test_indices"])):
233239
n = next(nodes)
234-
#needs cleaner way to be identified
235-
tests_per_node = self.dist_groups[dist_group_key]['pending_indices'][:1]
240+
# needs cleaner way to be identified
241+
tests_per_node = self.dist_groups[dist_group_key][
242+
"pending_indices"
243+
][:1]
236244
schedule_log[n.gateway.id].extend(tests_per_node)
237245

238246
self._send_tests_group(n, 1, dist_group_key)
239247
del self.dist_groups[dist_group_key]
240-
message = (f"\n[-] [csg] check_schedule: processed scheduling for {dist_group_key}:"
241-
f" {' '.join([f'{nid} ({len(nt)})' for nid,nt in schedule_log.items()])}")
248+
message = (
249+
f"\n[-] [csg] check_schedule: processed scheduling for {dist_group_key}:"
250+
f" {' '.join([f'{nid} ({len(nt)})' for nid,nt in schedule_log.items()])}"
251+
)
242252
self.report_line(message)
243253

244254
else:
@@ -310,26 +320,28 @@ def schedule(self) -> None:
310320

311321
if self.is_first_time:
312322
for i, test in enumerate(self.collection):
313-
if '@' in test:
314-
group_mark = test.split('@')[-1]
315-
group_workers = int(group_mark.split('_')[-1])
323+
if "@" in test:
324+
group_mark = test.split("@")[-1]
325+
group_workers = int(group_mark.split("_")[-1])
316326
if group_workers > len(self.nodes):
317327
# We can only distribute across as many nodes as we have available
318328
# If a group requests more, we fallback to our actual max
319329
group_workers = len(self.nodes)
320330
else:
321-
group_mark = 'default'
331+
group_mark = "default"
322332
group_workers = len(self.nodes)
323-
existing_tests = dist_groups.get(group_mark, {}).get('tests', [])
333+
existing_tests = dist_groups.get(group_mark, {}).get("tests", [])
324334
existing_tests.append(test)
325-
existing_indices = dist_groups.get(group_mark, {}).get('test_indices', [])
335+
existing_indices = dist_groups.get(group_mark, {}).get(
336+
"test_indices", []
337+
)
326338
existing_indices.append(i)
327339

328340
dist_groups[group_mark] = {
329-
'tests': existing_tests,
330-
'group_workers': group_workers,
331-
'test_indices': existing_indices,
332-
'pending_indices': existing_indices
341+
"tests": existing_tests,
342+
"group_workers": group_workers,
343+
"test_indices": existing_indices,
344+
"pending_indices": existing_indices,
333345
}
334346
self.dist_groups = dist_groups
335347
self.pending_groups = list(dist_groups.keys())
@@ -342,17 +354,21 @@ def schedule(self) -> None:
342354
return
343355
dist_group_key = self.pending_groups.pop(0)
344356
dist_group = self.dist_groups[dist_group_key]
345-
nodes = cycle(self.nodes[0:dist_group['group_workers']])
346-
schedule_log: dict[str, Any] = {n.gateway.id: [] for n in self.nodes[0:dist_group['group_workers']]}
347-
for _ in range(len(dist_group['test_indices'])):
357+
nodes = cycle(self.nodes[0 : dist_group["group_workers"]])
358+
schedule_log: dict[str, Any] = {
359+
n.gateway.id: [] for n in self.nodes[0 : dist_group["group_workers"]]
360+
}
361+
for _ in range(len(dist_group["test_indices"])):
348362
n = next(nodes)
349363
# needs cleaner way to be identified
350-
tests_per_node = self.dist_groups[dist_group_key]['pending_indices'][:1]
364+
tests_per_node = self.dist_groups[dist_group_key]["pending_indices"][:1]
351365
schedule_log[n.gateway.id].extend(tests_per_node)
352366
self._send_tests_group(n, 1, dist_group_key)
353367
del self.dist_groups[dist_group_key]
354-
message = ("\n[-] [csg] schedule: processed scheduling for "
355-
f"{dist_group_key}: {' '.join([f'{nid} ({len(nt)})' for nid, nt in schedule_log.items()])}")
368+
message = (
369+
"\n[-] [csg] schedule: processed scheduling for "
370+
f"{dist_group_key}: {' '.join([f'{nid} ({len(nt)})' for nid, nt in schedule_log.items()])}"
371+
)
356372
self.report_line(message)
357373

358374
def _send_tests(self, node: WorkerController, num: int) -> None:
@@ -362,16 +378,17 @@ def _send_tests(self, node: WorkerController, num: int) -> None:
362378
self.node2pending[node].extend(tests_per_node)
363379
node.send_runtest_some(tests_per_node)
364380

365-
def _send_tests_group(self, node: WorkerController, num: int, dist_group_key: str) -> None:
366-
tests_per_node = self.dist_groups[dist_group_key]['pending_indices'][:num]
381+
def _send_tests_group(
382+
self, node: WorkerController, num: int, dist_group_key: str
383+
) -> None:
384+
tests_per_node = self.dist_groups[dist_group_key]["pending_indices"][:num]
367385
if tests_per_node:
368-
del self.dist_groups[dist_group_key]['pending_indices'][:num]
386+
del self.dist_groups[dist_group_key]["pending_indices"][:num]
369387
for test_index in tests_per_node:
370388
self.pending.remove(test_index)
371389
self.node2pending[node].extend(tests_per_node)
372390
node.send_runtest_some(tests_per_node)
373391

374-
375392
def _check_nodes_have_same_collection(self) -> bool:
376393
"""Return True if all nodes have collected the same items.
377394

src/xdist/workermanage.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def rsync_roots(self, gateway: execnet.Gateway) -> None:
8282
def setup_nodes(
8383
self,
8484
putevent: Callable[[tuple[str, dict[str, Any]]], None],
85-
max_nodes: int | None = None
85+
max_nodes: int | None = None,
8686
) -> list[WorkerController]:
8787
self.config.hook.pytest_xdist_setupnodes(config=self.config, specs=self.specs)
8888
self.trace("setting up nodes")

xdist-testing-ntop/test.py

+14
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,25 @@ def test_1():
88
time.sleep(2)
99
assert True
1010

11+
1112
@pytest.mark.xdist_custom(name="low_4")
1213
def test_2():
1314
time.sleep(2)
1415
assert True
1516

17+
1618
@pytest.mark.xdist_custom(name="low_4")
1719
def test_3():
1820
time.sleep(2)
1921
assert True
2022

23+
2124
@pytest.mark.xdist_custom(name="low_4")
2225
def test_4():
2326
time.sleep(2)
2427
assert True
2528

29+
2630
# @pytest.mark.xdist_custom(name="low_4")
2731
# def test_4a():
2832
# time.sleep(2)
@@ -48,48 +52,58 @@ def test_4():
4852
# time.sleep(2)
4953
# assert True
5054

55+
5156
@pytest.mark.xdist_custom(name="med_2")
5257
def test_5():
5358
time.sleep(3)
5459
assert True
5560

61+
5662
@pytest.mark.xdist_custom(name="med_2")
5763
def test_6():
5864
time.sleep(3)
5965
assert True
6066

67+
6168
@pytest.mark.xdist_custom(name="med_2")
6269
def test_7():
6370
time.sleep(3)
6471
assert True
6572

73+
6674
@pytest.mark.xdist_custom(name="med_2")
6775
def test_8():
6876
time.sleep(3)
6977
assert True
7078

79+
7180
@pytest.mark.xdist_custom(name="high_1")
7281
def test_9():
7382
time.sleep(5)
7483
assert True
7584

85+
7686
@pytest.mark.xdist_custom(name="high_1")
7787
def test_10():
7888
time.sleep(5)
7989
assert True
8090

91+
8192
def test_11():
8293
time.sleep(1)
8394
assert True
8495

96+
8597
def test_12():
8698
time.sleep(1)
8799
assert True
88100

101+
89102
def test_13():
90103
time.sleep(1)
91104
assert True
92105

106+
93107
def test_14():
94108
time.sleep(1)
95109
assert True

0 commit comments

Comments
 (0)