mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-03-03 18:57:06 +01:00
pytest: Stopping daemon cleanly
We used to simply kill the daemon, which in some cases could result in half-written crashlogs and similar artifacts such as half-completed RPC calls. Now we ask lightningd to stop nicely, give it some time and only then kill it. We also return the returncode of the daemon. Signed-off-by: Christian Decker <decker.christian@gmail.com>
This commit is contained in:
parent
3f9ec6c2fa
commit
11eaabdbe6
2 changed files with 48 additions and 15 deletions
|
@ -126,7 +126,7 @@ class NodeFactory(object):
|
|||
|
||||
def killall(self):
|
||||
for n in self.nodes:
|
||||
n.daemon.stop()
|
||||
n.stop()
|
||||
|
||||
|
||||
class BaseLightningDTests(unittest.TestCase):
|
||||
|
@ -1541,9 +1541,8 @@ class LightningDTests(BaseLightningDTests):
|
|||
time.sleep(1)
|
||||
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000
|
||||
assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 10000
|
||||
|
||||
# Stop l2, l1 will reattempt to connect
|
||||
l2.daemon.stop()
|
||||
l2.stop()
|
||||
|
||||
# Wait for l1 to notice
|
||||
wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected'])
|
||||
|
@ -1562,7 +1561,7 @@ class LightningDTests(BaseLightningDTests):
|
|||
assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 20000
|
||||
|
||||
# Finally restart l1, and make sure it remembers
|
||||
l1.daemon.stop()
|
||||
l1.stop()
|
||||
l1.daemon.start()
|
||||
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000
|
||||
|
||||
|
|
|
@ -58,17 +58,28 @@ class TailableProc(object):
|
|||
self.thread.start()
|
||||
self.running = True
|
||||
|
||||
def stop(self):
|
||||
def stop(self, timeout=10):
|
||||
if self.outputDir:
|
||||
logpath = os.path.join(self.outputDir, 'log')
|
||||
with open(logpath, 'w') as f:
|
||||
for l in self.logs:
|
||||
f.write(l + '\n')
|
||||
self.proc.terminate()
|
||||
self.proc.kill()
|
||||
|
||||
# Now give it some time to react to the signal
|
||||
rc = self.proc.wait(timeout)
|
||||
|
||||
if rc is None:
|
||||
self.proc.kill()
|
||||
|
||||
self.proc.wait()
|
||||
self.thread.join()
|
||||
|
||||
if failed:
|
||||
raise(ValueError("Process '{}' did not cleanly shutdown".format(self.proc.pid)))
|
||||
|
||||
return self.proc.returncode
|
||||
|
||||
def tail(self):
|
||||
"""Tail the stdout of the process and remember it.
|
||||
|
||||
|
@ -231,12 +242,14 @@ class LightningD(TailableProc):
|
|||
self.wait_for_log("Creating IPv6 listener on port")
|
||||
logging.info("LightningD started")
|
||||
|
||||
def stop(self):
|
||||
# If it's already crashing, wait a bit for log dump.
|
||||
if os.path.isfile(os.path.join(self.lightning_dir, 'crash.log')):
|
||||
time.sleep(2)
|
||||
TailableProc.stop(self)
|
||||
logging.info("LightningD stopped")
|
||||
def wait(self, timeout=10):
|
||||
"""Wait for the daemon to stop for up to timeout seconds
|
||||
|
||||
Returns the returncode of the process, None if the process did
|
||||
not return before the timeout triggers.
|
||||
"""
|
||||
self.proc.wait(timeout)
|
||||
return self.proc.returncode
|
||||
|
||||
class LightningNode(object):
|
||||
def __init__(self, daemon, rpc, btc, executor):
|
||||
|
@ -316,3 +329,24 @@ class LightningNode(object):
|
|||
on cleanup"""
|
||||
self.known_fail = True
|
||||
|
||||
def stop(self, timeout=10):
|
||||
""" Attempt to do a clean shutdown, but kill if it hangs
|
||||
"""
|
||||
|
||||
# Tell the daemon to stop
|
||||
try:
|
||||
# May fail if the process already died
|
||||
self.rpc.stop()
|
||||
except:
|
||||
pass
|
||||
|
||||
rc = self.daemon.wait(timeout)
|
||||
|
||||
# If it did not stop be more insistent
|
||||
if rc is None:
|
||||
rc = self.daemon.stop()
|
||||
|
||||
if rc != 0:
|
||||
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
|
||||
else:
|
||||
return rc
|
||||
|
|
Loading…
Add table
Reference in a new issue