mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-02-22 14:42:40 +01:00
pytest: Add py.test fixtures and migrate first example test
This is the first example of the py.test style fixtures which should allow us to write much cleaner and nicer tests. Signed-off-by: Christian Decker <decker.christian@gmail.com>
This commit is contained in:
parent
b307df0002
commit
727d115296
4 changed files with 207 additions and 54 deletions
5
Makefile
5
Makefile
|
@ -203,9 +203,10 @@ check:
|
|||
|
||||
pytest: $(ALL_PROGRAMS)
|
||||
ifndef PYTEST
|
||||
PYTHONPATH=contrib/pylightning:$$PYTHONPATH DEVELOPER=$(DEVELOPER) python3 tests/test_lightningd.py -f
|
||||
@echo "py.test is required to run the integration tests, please install using 'pip3 install -r tests/requirements.txt'"
|
||||
exit 1
|
||||
else
|
||||
PYTHONPATH=contrib/pylightning:$$PYTHONPATH TEST_DEBUG=1 DEVELOPER=$(DEVELOPER) $(PYTEST) -vx tests/test_lightningd.py --test-group=$(TEST_GROUP) --test-group-count=$(TEST_GROUP_COUNT) $(PYTEST_OPTS)
|
||||
PYTHONPATH=contrib/pylightning:$$PYTHONPATH TEST_DEBUG=1 DEVELOPER=$(DEVELOPER) $(PYTEST) -vx tests/ --test-group=$(TEST_GROUP) --test-group-count=$(TEST_GROUP_COUNT) $(PYTEST_OPTS)
|
||||
endif
|
||||
|
||||
# Keep includes in alpha order.
|
||||
|
|
144
tests/fixtures.py
Normal file
144
tests/fixtures.py
Normal file
|
@ -0,0 +1,144 @@
|
|||
from concurrent import futures
|
||||
from test_lightningd import NodeFactory
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
import re
|
||||
import tempfile
|
||||
import utils
|
||||
|
||||
|
||||
TEST_DIR = tempfile.mkdtemp(prefix='ltests-')
|
||||
VALGRIND = os.getenv("NO_VALGRIND", "0") == "0"
|
||||
DEVELOPER = os.getenv("DEVELOPER", "0") == "1"
|
||||
TEST_DEBUG = os.getenv("TEST_DEBUG", "0") == "1"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def directory(test_name):
|
||||
"""Return a per-test specific directory
|
||||
"""
|
||||
global TEST_DIR
|
||||
yield os.path.join(TEST_DIR, test_name)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_name(request):
|
||||
yield request.function.__name__
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def bitcoind(directory):
|
||||
bitcoind = utils.BitcoinD(bitcoin_dir=directory, rpcport=28332)
|
||||
try:
|
||||
bitcoind.start()
|
||||
except Exception:
|
||||
bitcoind.stop()
|
||||
raise
|
||||
|
||||
info = bitcoind.rpc.getnetworkinfo()
|
||||
|
||||
if info['version'] < 160000:
|
||||
bitcoind.rpc.stop()
|
||||
raise ValueError("bitcoind is too old. At least version 16000 (v0.16.0)"
|
||||
" is needed, current version is {}".format(info['version']))
|
||||
|
||||
info = bitcoind.rpc.getblockchaininfo()
|
||||
# Make sure we have some spendable funds
|
||||
if info['blocks'] < 101:
|
||||
bitcoind.generate_block(101 - info['blocks'])
|
||||
elif bitcoind.rpc.getwalletinfo()['balance'] < 1:
|
||||
logging.debug("Insufficient balance, generating 1 block")
|
||||
bitcoind.generate_block(1)
|
||||
|
||||
yield bitcoind
|
||||
|
||||
try:
|
||||
bitcoind.rpc.stop()
|
||||
except Exception:
|
||||
bitcoind.proc.kill()
|
||||
bitcoind.proc.wait()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def node_factory(directory, test_name, bitcoind, executor):
|
||||
nf = NodeFactory(test_name, bitcoind, executor, directory=directory)
|
||||
yield nf
|
||||
err_count = 0
|
||||
ok = nf.killall([not n.may_fail for n in nf.nodes])
|
||||
if VALGRIND:
|
||||
for node in nf.nodes:
|
||||
err_count += printValgrindErrors(node)
|
||||
if err_count:
|
||||
raise ValueError("{} nodes reported valgrind errors".format(err_count))
|
||||
|
||||
for node in nf.nodes:
|
||||
err_count += printCrashLog(node)
|
||||
if err_count:
|
||||
raise ValueError("{} nodes had crash.log files".format(err_count))
|
||||
for node in nf.nodes:
|
||||
err_count += checkReconnect(node)
|
||||
if err_count:
|
||||
raise ValueError("{} nodes had unexpected reconnections".format(err_count))
|
||||
|
||||
if not ok:
|
||||
raise Exception("At least one lightning exited with unexpected non-zero return code")
|
||||
|
||||
|
||||
def getValgrindErrors(node):
|
||||
for error_file in os.listdir(node.daemon.lightning_dir):
|
||||
if not re.fullmatch("valgrind-errors.\d+", error_file):
|
||||
continue
|
||||
with open(os.path.join(node.daemon.lightning_dir, error_file), 'r') as f:
|
||||
errors = f.read().strip()
|
||||
if errors:
|
||||
return errors, error_file
|
||||
return None, None
|
||||
|
||||
|
||||
def printValgrindErrors(node):
|
||||
errors, fname = getValgrindErrors(node)
|
||||
if errors:
|
||||
print("-" * 31, "Valgrind errors", "-" * 32)
|
||||
print("Valgrind error file:", fname)
|
||||
print(errors)
|
||||
print("-" * 80)
|
||||
return 1 if errors else 0
|
||||
|
||||
|
||||
def getCrashLog(node):
|
||||
if node.may_fail:
|
||||
return None, None
|
||||
try:
|
||||
crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log')
|
||||
with open(crashlog, 'r') as f:
|
||||
return f.readlines(), crashlog
|
||||
except Exception:
|
||||
return None, None
|
||||
|
||||
|
||||
def printCrashLog(node):
|
||||
errors, fname = getCrashLog(node)
|
||||
if errors:
|
||||
print("-" * 10, "{} (last 50 lines)".format(fname), "-" * 10)
|
||||
for l in errors[-50:]:
|
||||
print(l, end='')
|
||||
print("-" * 80)
|
||||
return 1 if errors else 0
|
||||
|
||||
|
||||
def checkReconnect(node):
|
||||
# Without DEVELOPER, we can't suppress reconnection.
|
||||
if node.may_reconnect or not DEVELOPER:
|
||||
return 0
|
||||
if node.daemon.is_in_log('Peer has reconnected'):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def executor():
|
||||
ex = futures.ThreadPoolExecutor(max_workers=20)
|
||||
yield ex
|
||||
ex.shutdown(wait=False)
|
60
tests/test_gossip.py
Normal file
60
tests/test_gossip.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
from fixtures import * # noqa: F401,F403
|
||||
from test_lightningd import wait_for
|
||||
|
||||
import os
|
||||
import time
|
||||
import unittest
|
||||
|
||||
|
||||
DEVELOPER = os.getenv("DEVELOPER", "0") == "1"
|
||||
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
|
||||
def test_gossip_pruning(node_factory, bitcoind):
|
||||
""" Create channel and see it being updated in time before pruning
|
||||
"""
|
||||
opts = {'channel-update-interval': 5}
|
||||
l1, l2, l3 = node_factory.get_nodes(3, opts)
|
||||
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
|
||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
|
||||
|
||||
scid1 = l1.fund_channel(l2, 10**6)
|
||||
scid2 = l2.fund_channel(l3, 10**6)
|
||||
|
||||
bitcoind.rpc.generate(6)
|
||||
|
||||
# Channels should be activated locally
|
||||
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True] * 4)
|
||||
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True] * 4)
|
||||
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']] == [True] * 4)
|
||||
|
||||
# All of them should send a keepalive message
|
||||
l1.daemon.wait_for_logs([
|
||||
'Sending keepalive channel_update for {}'.format(scid1),
|
||||
])
|
||||
l2.daemon.wait_for_logs([
|
||||
'Sending keepalive channel_update for {}'.format(scid1),
|
||||
'Sending keepalive channel_update for {}'.format(scid2),
|
||||
])
|
||||
l3.daemon.wait_for_logs([
|
||||
'Sending keepalive channel_update for {}'.format(scid2),
|
||||
])
|
||||
|
||||
# Now kill l3, so that l2 and l1 can prune it from their view after 10 seconds
|
||||
|
||||
# FIXME: This sleep() masks a real bug: that channeld sends a
|
||||
# channel_update message (to disable the channel) with same
|
||||
# timestamp as the last keepalive, and thus is ignored. The minimal
|
||||
# fix is to backdate the keepalives 1 second, but maybe we should
|
||||
# simply have gossipd generate all updates?
|
||||
time.sleep(1)
|
||||
l3.stop()
|
||||
|
||||
l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
|
||||
l2.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
|
||||
|
||||
assert scid2 not in [c['short_channel_id'] for c in l1.rpc.listchannels()['channels']]
|
||||
assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']]
|
||||
assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
|
||||
assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']]
|
|
@ -2444,58 +2444,6 @@ class LightningDTests(BaseLightningDTests):
|
|||
node = l2.rpc.listnodes(l1.info['id'])['nodes'][0]
|
||||
assert node['alias'] == weird_name
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-broadcast-interval")
|
||||
def test_gossip_pruning(self):
|
||||
""" Create channel and see it being updated in time before pruning
|
||||
"""
|
||||
opts = {'channel-update-interval': 5}
|
||||
l1 = self.node_factory.get_node(options=opts)
|
||||
l2 = self.node_factory.get_node(options=opts)
|
||||
l3 = self.node_factory.get_node(options=opts)
|
||||
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port'])
|
||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port'])
|
||||
|
||||
scid1 = self.fund_channel(l1, l2, 10**6)
|
||||
scid2 = self.fund_channel(l2, l3, 10**6)
|
||||
|
||||
l1.bitcoin.rpc.generate(6)
|
||||
|
||||
# Channels should be activated locally
|
||||
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True] * 4)
|
||||
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True] * 4)
|
||||
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']] == [True] * 4)
|
||||
|
||||
# All of them should send a keepalive message
|
||||
l1.daemon.wait_for_logs([
|
||||
'Sending keepalive channel_update for {}'.format(scid1),
|
||||
])
|
||||
l2.daemon.wait_for_logs([
|
||||
'Sending keepalive channel_update for {}'.format(scid1),
|
||||
'Sending keepalive channel_update for {}'.format(scid2),
|
||||
])
|
||||
l3.daemon.wait_for_logs([
|
||||
'Sending keepalive channel_update for {}'.format(scid2),
|
||||
])
|
||||
|
||||
# Now kill l3, so that l2 and l1 can prune it from their view after 10 seconds
|
||||
|
||||
# FIXME: This sleep() masks a real bug: that channeld sends a
|
||||
# channel_update message (to disable the channel) with same
|
||||
# timestamp as the last keepalive, and thus is ignored. The minimal
|
||||
# fix is to backdate the keepalives 1 second, but maybe we should
|
||||
# simply have gossipd generate all updates?
|
||||
time.sleep(1)
|
||||
l3.stop()
|
||||
|
||||
l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
|
||||
l2.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
|
||||
|
||||
assert scid2 not in [c['short_channel_id'] for c in l1.rpc.listchannels()['channels']]
|
||||
assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']]
|
||||
assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
|
||||
assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']]
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for --dev-no-reconnect")
|
||||
def test_gossip_persistence(self):
|
||||
"""Gossip for a while, restart and it should remember.
|
||||
|
|
Loading…
Add table
Reference in a new issue