forked from networkx/nx-parallel
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_should_run.py
More file actions
104 lines (78 loc) · 3.02 KB
/
test_should_run.py
File metadata and controls
104 lines (78 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import nx_parallel as nxp
from nx_parallel.interface import ALGORITHMS
import networkx as nx
import inspect
import pytest
import os
def get_functions_with_should_run():
for name, obj in inspect.getmembers(nxp.algorithms, inspect.isfunction):
if callable(obj.should_run):
yield name
def test_get_functions_with_should_run():
assert set(get_functions_with_should_run()) == set(ALGORITHMS)
def test_default_should_run():
@nxp._configure_if_nx_active()
def dummy_default():
pass
with pytest.MonkeyPatch().context() as mp:
mp.delitem(os.environ, "PYTEST_CURRENT_TEST", raising=False)
with nx.config.backends.parallel(n_jobs=1):
assert (
dummy_default.should_run()
== "Parallel backend requires `n_jobs` > 1 to run"
)
assert dummy_default.should_run()
def test_skip_parallel_backend():
@nxp._configure_if_nx_active(should_run=nxp.should_skip_parallel)
def dummy_skip_parallel():
pass
assert dummy_skip_parallel.should_run() == "Fast algorithm; skip parallel execution"
def test_should_run_if_large():
@nxp._configure_if_nx_active(should_run=nxp.should_run_if_large)
def dummy_if_large(G):
pass
smallG = nx.fast_gnp_random_graph(20, 0.6, seed=42)
largeG = nx.fast_gnp_random_graph(250, 0.6, seed=42)
assert dummy_if_large.should_run(smallG) == "Graph too small for parallel execution"
assert dummy_if_large.should_run(largeG)
def test_should_run_if_nodes_none():
@nxp._configure_if_nx_active(should_run=nxp.should_run_if_nodes_none)
def dummy_nodes_none(G, nodes=None):
pass
G = nx.fast_gnp_random_graph(20, 0.6, seed=42)
assert (
dummy_nodes_none.should_run(G, nodes=[1, 3])
== "`nodes` should be None for parallel execution"
)
assert dummy_nodes_none.should_run(G)
def test_should_run_if_sparse():
@nxp._configure_if_nx_active(should_run=nxp.should_run_if_sparse)
def dummy_if_sparse(G):
pass
G_dense = nx.fast_gnp_random_graph(20, 0.6, seed=42)
assert (
dummy_if_sparse.should_run(G_dense)
== "Graph too dense to benefit from parallel execution"
)
G_sparse = nx.fast_gnp_random_graph(20, 0.3, seed=42)
assert dummy_if_sparse.should_run(G_sparse)
assert (
dummy_if_sparse.should_run(G_sparse, threshold=0.2)
== "Graph too dense to benefit from parallel execution"
)
@pytest.mark.parametrize("func_name", get_functions_with_should_run())
def test_should_run(func_name):
tournament_funcs = [
"tournament_is_strongly_connected",
]
if func_name in tournament_funcs:
G = nx.tournament.random_tournament(15, seed=42)
else:
G = nx.fast_gnp_random_graph(40, 0.6, seed=42)
H = nxp.ParallelGraph(G)
func = getattr(nxp, func_name)
result = func.should_run(H)
if not isinstance(result, (bool, str)):
raise AssertionError(
f"{func.__name__}.should_run has an invalid return type; {type(result)}"
)