mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-03-03 18:57:06 +01:00
pytest: Consolidate node teardown checks a bit
These are following the same pattern over and over again, so I just added a tiny wrapper so we reduce the amount of clutter.
This commit is contained in:
parent
ce2bdeec70
commit
b90b4b4bb1
1 changed files with 17 additions and 32 deletions
|
@ -161,38 +161,19 @@ def node_factory(request, directory, test_name, bitcoind, executor, teardown_che
|
||||||
for e in errs:
|
for e in errs:
|
||||||
teardown_checks.add_error(e)
|
teardown_checks.add_error(e)
|
||||||
|
|
||||||
if VALGRIND:
|
def map_node_error(nodes, f, msg):
|
||||||
for node in nf.nodes:
|
for n in nodes:
|
||||||
if printValgrindErrors(node):
|
if n and f(n):
|
||||||
teardown_checks.add_node_error(node, "reported valgrind errors")
|
teardown_checks.add_node_error(n, msg)
|
||||||
|
|
||||||
for node in nf.nodes:
|
map_node_error(nf.nodes, printValgrindErrors, "reported valgrind errors")
|
||||||
if printCrashLog(node):
|
map_node_error(nf.nodes, printCrashLog, "had crash.log files")
|
||||||
teardown_checks.add_node_error(node, "had crash.log files")
|
map_node_error(nf.nodes, lambda n: not n.allow_broken_log and n.daemon.is_in_log(r'\*\*BROKEN\*\*'), "had BROKEN messages")
|
||||||
|
map_node_error(nf.nodes, checkReconnect, "had unexpected reconnections")
|
||||||
for node in [n for n in nf.nodes if not n.allow_broken_log]:
|
map_node_error(nf.nodes, checkBadGossip, "had bad gossip messages")
|
||||||
if checkBroken(node):
|
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('Bad reestablish'), "had bad reestablish")
|
||||||
teardown_checks.add_node_error(node, "had BROKEN messages")
|
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('bad hsm request'), "had bad hsm requests")
|
||||||
|
map_node_error(nf.nodes, checkMemleak, "had memleak messages")
|
||||||
for node in nf.nodes:
|
|
||||||
if checkReconnect(node):
|
|
||||||
teardown_checks.add_node_error(node, "had unexpected reconnections")
|
|
||||||
|
|
||||||
for node in [n for n in nf.nodes if not n.allow_bad_gossip]:
|
|
||||||
if checkBadGossip(node):
|
|
||||||
teardown_checks.add_node_error(node, "had bad gossip messages")
|
|
||||||
|
|
||||||
for node in nf.nodes:
|
|
||||||
if checkBadReestablish(node):
|
|
||||||
teardown_checks.add_node_error(node,"had bad reestablish")
|
|
||||||
|
|
||||||
for node in nf.nodes:
|
|
||||||
if checkBadHSMRequest(node):
|
|
||||||
teardown_checks.add_node_error(node, "had bad hsm requests")
|
|
||||||
|
|
||||||
for node in nf.nodes:
|
|
||||||
if checkMemleak(node):
|
|
||||||
teardown_checks.add_node_error(node, "had memleak messages")
|
|
||||||
|
|
||||||
if not ok:
|
if not ok:
|
||||||
teardown_checks.add_error("At least one lightning exited with unexpected non-zero return code")
|
teardown_checks.add_error("At least one lightning exited with unexpected non-zero return code")
|
||||||
|
@ -249,6 +230,8 @@ def checkReconnect(node):
|
||||||
|
|
||||||
|
|
||||||
def checkBadGossip(node):
|
def checkBadGossip(node):
|
||||||
|
if node.allow_bad_gossip:
|
||||||
|
return 0
|
||||||
# We can get bad gossip order from inside error msgs.
|
# We can get bad gossip order from inside error msgs.
|
||||||
if node.daemon.is_in_log('Bad gossip order from (?!error)'):
|
if node.daemon.is_in_log('Bad gossip order from (?!error)'):
|
||||||
# This can happen if a node sees a node_announce after a channel
|
# This can happen if a node sees a node_announce after a channel
|
||||||
|
@ -264,6 +247,8 @@ def checkBadGossip(node):
|
||||||
|
|
||||||
|
|
||||||
def checkBroken(node):
|
def checkBroken(node):
|
||||||
|
if node.allow_broken_log:
|
||||||
|
return 0
|
||||||
# We can get bad gossip order from inside error msgs.
|
# We can get bad gossip order from inside error msgs.
|
||||||
if node.daemon.is_in_log(r'\*\*BROKEN\*\*'):
|
if node.daemon.is_in_log(r'\*\*BROKEN\*\*'):
|
||||||
return 1
|
return 1
|
||||||
|
@ -289,7 +274,7 @@ def checkMemleak(node):
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def executor():
|
def executor(teardown_checks):
|
||||||
ex = futures.ThreadPoolExecutor(max_workers=20)
|
ex = futures.ThreadPoolExecutor(max_workers=20)
|
||||||
yield ex
|
yield ex
|
||||||
ex.shutdown(wait=False)
|
ex.shutdown(wait=False)
|
||||||
|
|
Loading…
Add table
Reference in a new issue