tests/test_lightningd.py: move port into node.

We're about to change the JSONRPC, so let's put an explicit 'port' into
our node class.

We initialize it at startup time: in future I hope to use ephemeral ports
to make our tests more easily parallelizable.

Suggested-by: @cdecker
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2018-05-07 13:14:39 +09:30 committed by Christian Decker
parent 7d27501c94
commit 6ba9a7cb57
2 changed files with 109 additions and 109 deletions

View File

@ -208,7 +208,7 @@ class NodeFactory(object):
] ]
try: try:
node.daemon.start() node.start()
except Exception: except Exception:
node.daemon.stop() node.daemon.stop()
raise raise
@ -329,7 +329,7 @@ class BaseLightningDTests(unittest.TestCase):
class LightningDTests(BaseLightningDTests): class LightningDTests(BaseLightningDTests):
def connect(self, may_reconnect=False): def connect(self, may_reconnect=False):
l1, l2 = self.node_factory.get_nodes(2, opts={'may_reconnect': may_reconnect}) l1, l2 = self.node_factory.get_nodes(2, opts={'may_reconnect': may_reconnect})
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
@ -639,10 +639,10 @@ class LightningDTests(BaseLightningDTests):
l2.daemon.wait_for_log('hand_back_peer {}: now local again'.format(l1.info['id'])) l2.daemon.wait_for_log('hand_back_peer {}: now local again'.format(l1.info['id']))
# Reconnect should be a noop # Reconnect should be a noop
ret = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.info['port']) ret = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
ret = l2.rpc.connect(l1.info['id'], host='localhost', port=l1.info['port']) ret = l2.rpc.connect(l1.info['id'], host='localhost', port=l1.port)
assert ret['id'] == l1.info['id'] assert ret['id'] == l1.info['id']
# Should still only have one peer! # Should still only have one peer!
@ -662,7 +662,7 @@ class LightningDTests(BaseLightningDTests):
# Should get reasonable error if wrong key for peer. # Should get reasonable error if wrong key for peer.
self.assertRaisesRegex(ValueError, self.assertRaisesRegex(ValueError,
"Cryptographic handshake: ", "Cryptographic handshake: ",
l1.rpc.connect, '032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'localhost', l2.info['port']) l1.rpc.connect, '032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'localhost', l2.port)
def test_connect_by_gossip(self): def test_connect_by_gossip(self):
"""Test connecting to an unknown peer using node gossip """Test connecting to an unknown peer using node gossip
@ -671,7 +671,7 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l3 = self.node_factory.get_node(options={"ipaddr": "127.0.0.1"}) l3 = self.node_factory.get_node(options={"ipaddr": "127.0.0.1"})
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
# Nodes are gossiped only if they have channels # Nodes are gossiped only if they have channels
chanid = self.fund_channel(l2, l3, 10**6) chanid = self.fund_channel(l2, l3, 10**6)
@ -681,7 +681,7 @@ class LightningDTests(BaseLightningDTests):
l2.daemon.wait_for_logs(['Received node_announcement for node {}'.format(l3.info['id'])]) l2.daemon.wait_for_logs(['Received node_announcement for node {}'.format(l3.info['id'])])
# Let l1 learn of l3 by node gossip # Let l1 learn of l3 by node gossip
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.daemon.wait_for_logs(['Received node_announcement for node {}'.format(l3.info['id'])]) l1.daemon.wait_for_logs(['Received node_announcement for node {}'.format(l3.info['id'])])
# Have l1 connect to l3 without explicit host and port. # Have l1 connect to l3 without explicit host and port.
@ -695,31 +695,30 @@ class LightningDTests(BaseLightningDTests):
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
# node@host # node@host
ret = l1.rpc.connect("{}@{}".format(l2.info['id'], 'localhost'), port=l2.info['port']) ret = l1.rpc.connect("{}@{}".format(l2.info['id'], 'localhost'), port=l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
# node@host:port # node@host:port
ret = l1.rpc.connect("{}@localhost:{}".format(l3.info['id'], l3.info['port'])) ret = l1.rpc.connect("{}@localhost:{}".format(l3.info['id'], l3.port))
assert ret['id'] == l3.info['id'] assert ret['id'] == l3.info['id']
# node@[ipv6]:port --- not supported by our CI # node@[ipv6]:port --- not supported by our CI
# ret = l1.rpc.connect("{}@[::1]:{}".format(l3.info['id'], l3.info['port'])) # ret = l1.rpc.connect("{}@[::1]:{}".format(l3.info['id'], l3.port))
# assert ret['id'] == l3.info['id'] # assert ret['id'] == l3.info['id']
def test_reconnect_channel_peers(self): def test_reconnect_channel_peers(self):
l1 = self.node_factory.get_node(may_reconnect=True) l1 = self.node_factory.get_node(may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
l2.stop() l2.restart()
l2.daemon.start()
# Should reconnect. # Should reconnect.
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'][0]['connected']) wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'][0]['connected'])
wait_for(lambda: l2.rpc.listpeers(l1.info['id'])['peers'][0]['connected']) wait_for(lambda: l2.rpc.listpeers(l1.info['id'])['peers'][0]['connected'])
# Connect command should succeed. # Connect command should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Stop l2 and wait for l1 to notice. # Stop l2 and wait for l1 to notice.
l2.stop() l2.stop()
@ -728,18 +727,18 @@ class LightningDTests(BaseLightningDTests):
# Now should fail. # Now should fail.
self.assertRaisesRegex(ValueError, self.assertRaisesRegex(ValueError,
"Connection refused", "Connection refused",
l1.rpc.connect, l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
# Wait for exponential backoff to give us a 2 second window. # Wait for exponential backoff to give us a 2 second window.
l1.daemon.wait_for_log('...will try again in 2 seconds') l1.daemon.wait_for_log('...will try again in 2 seconds')
# It should now succeed when it restarts. # It should now succeed when it restarts.
l2.daemon.start() l2.start()
# Multiples should be fine! # Multiples should be fine!
fut1 = self.executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.info['port']) fut1 = self.executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
fut2 = self.executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.info['port']) fut2 = self.executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
fut3 = self.executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.info['port']) fut3 = self.executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
fut1.result(10) fut1.result(10)
fut2.result(10) fut2.result(10)
fut3.result(10) fut3.result(10)
@ -1246,7 +1245,7 @@ class LightningDTests(BaseLightningDTests):
# l1-l2-l3-l4. # l1-l2-l3-l4.
def fund_from_to_payer(lsrc, ldst, lpayer): def fund_from_to_payer(lsrc, ldst, lpayer):
lsrc.rpc.connect(ldst.info['id'], 'localhost', ldst.info['port']) lsrc.rpc.connect(ldst.info['id'], 'localhost', ldst.port)
c = self.fund_channel(lsrc, ldst, 10000000) c = self.fund_channel(lsrc, ldst, 10000000)
self.wait_for_routes(lpayer, [c]) self.wait_for_routes(lpayer, [c])
@ -1276,7 +1275,7 @@ class LightningDTests(BaseLightningDTests):
# l1 asks for a too-long locktime # l1 asks for a too-long locktime
l1 = self.node_factory.get_node(options={'locktime-blocks': 100}) l1 = self.node_factory.get_node(options={'locktime-blocks': 100})
l2 = self.node_factory.get_node(options={'max-locktime-blocks': 99}) l2 = self.node_factory.get_node(options={'max-locktime-blocks': 99})
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
@ -1387,7 +1386,7 @@ class LightningDTests(BaseLightningDTests):
l1.rpc.close, chan, False, 0) l1.rpc.close, chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN') l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.start() l2.start()
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
@ -1416,7 +1415,7 @@ class LightningDTests(BaseLightningDTests):
l1.db_manip("CREATE TABLE version (version INTEGER);") l1.db_manip("CREATE TABLE version (version INTEGER);")
l1.db_manip("INSERT INTO version VALUES (1);") l1.db_manip("INSERT INTO version VALUES (1);")
l1.daemon.start() l1.start()
upgrades = l1.db_query("SELECT * from db_upgrades;") upgrades = l1.db_query("SELECT * from db_upgrades;")
assert len(upgrades) == 1 assert len(upgrades) == 1
assert(upgrades[0]['upgrade_from'] == 1) assert(upgrades[0]['upgrade_from'] == 1)
@ -1465,7 +1464,7 @@ class LightningDTests(BaseLightningDTests):
}) })
p.feerate = feerate p.feerate = feerate
p.amount = amount p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.info['port']) l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p) peers.append(p)
for p in peers: for p in peers:
@ -1584,7 +1583,7 @@ class LightningDTests(BaseLightningDTests):
# Make locktime different, as we once had them reversed! # Make locktime different, as we once had them reversed!
l2 = self.node_factory.get_node(options={'locktime-blocks': 10}) l2 = self.node_factory.get_node(options={'locktime-blocks': 10})
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.give_funds(l1, 10**6 + 1000000) self.give_funds(l1, 10**6 + 1000000)
@ -1671,7 +1670,7 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(options=options) l2 = self.node_factory.get_node(options=options)
btc = l1.bitcoin btc = l1.bitcoin
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash'] rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
@ -1722,7 +1721,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects) l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
# Must be dust! # Must be dust!
@ -1767,8 +1766,7 @@ class LightningDTests(BaseLightningDTests):
l2.daemon.wait_for_log('onchaind complete, forgetting peer') l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash! # Restart l1, it should not crash!
l1.stop() l1.restart()
l1.daemon.start()
# Now, 100 blocks and l1 should be done. # Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6) bitcoind.generate_block(6)
@ -1785,10 +1783,10 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects) l1 = self.node_factory.get_node(disconnect=disconnects)
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash'] rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
@ -1856,8 +1854,8 @@ class LightningDTests(BaseLightningDTests):
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain # l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.info['port']) l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
self.fund_channel(l2, l1, 10**6) self.fund_channel(l2, l1, 10**6)
c23 = self.fund_channel(l2, l3, 10**6) c23 = self.fund_channel(l2, l3, 10**6)
@ -1929,7 +1927,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True) l1 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True)
l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit']) l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled.. # Now, this will get stuck due to l1 commit being disabled..
@ -1992,7 +1990,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True) l1 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True)
l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit']) l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
# Move some across to l2. # Move some across to l2.
@ -2059,7 +2057,7 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(disconnect=disconnects, l2 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash'] rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
@ -2093,7 +2091,7 @@ class LightningDTests(BaseLightningDTests):
l1.stop() l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000') l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.daemon.start() l1.start()
# We recognize different proposal as ours. # We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US') l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
@ -2133,7 +2131,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(options={'dev-no-reconnect': None}) l1 = self.node_factory.get_node(options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node(disconnect=disconnects) l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash'] rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
@ -2239,7 +2237,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(options={'dev-no-reconnect': None}) l1 = self.node_factory.get_node(options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node(disconnect=disconnects) l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
# This will fail at l2's end. # This will fail at l2's end.
@ -2275,7 +2273,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(options={'dev-no-reconnect': None}) l1 = self.node_factory.get_node(options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node(disconnect=disconnects) l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
# This will fail at l2's end. # This will fail at l2's end.
@ -2317,7 +2315,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(options={'dev-no-reconnect': None}) l1 = self.node_factory.get_node(options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node(disconnect=disconnects) l2 = self.node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l2, l1, 10**6) self.fund_channel(l2, l1, 10**6)
# This will fail at l2's end. # This will fail at l2's end.
@ -2440,7 +2438,7 @@ class LightningDTests(BaseLightningDTests):
assert l2.daemon.is_in_log('Server started with public key .* alias {}' assert l2.daemon.is_in_log('Server started with public key .* alias {}'
.format(normal_name)) .format(normal_name))
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l2, l1, 10**6) self.fund_channel(l2, l1, 10**6)
bitcoind.rpc.generate(6) bitcoind.rpc.generate(6)
@ -2468,9 +2466,9 @@ class LightningDTests(BaseLightningDTests):
l3 = self.node_factory.get_node(options=opts) l3 = self.node_factory.get_node(options=opts)
l4 = self.node_factory.get_node(options=opts) l4 = self.node_factory.get_node(options=opts)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l3.rpc.connect(l4.info['id'], 'localhost', l4.info['port']) l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
self.fund_channel(l2, l3, 10**6) self.fund_channel(l2, l3, 10**6)
@ -2581,11 +2579,11 @@ class LightningDTests(BaseLightningDTests):
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.openchannel(l2, 20000) l1.openchannel(l2, 20000)
# Now open new channels and everybody should sync # Now open new channels and everybody should sync
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.openchannel(l3, 20000) l2.openchannel(l3, 20000)
# Settle the gossip # Settle the gossip
@ -2597,8 +2595,8 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
self.fund_channel(l1, l3, 10**6) self.fund_channel(l1, l3, 10**6)
@ -2609,7 +2607,7 @@ class LightningDTests(BaseLightningDTests):
for i in range(len(nodes) - 1): for i in range(len(nodes) - 1):
src, dst = nodes[i], nodes[i + 1] src, dst = nodes[i], nodes[i + 1]
src.rpc.connect(dst.info['id'], 'localhost', dst.info['port']) src.rpc.connect(dst.info['id'], 'localhost', dst.port)
src.openchannel(dst, 20000) src.openchannel(dst, 20000)
# Allow announce messages. # Allow announce messages.
@ -2648,7 +2646,7 @@ class LightningDTests(BaseLightningDTests):
# Connect 1 -> 2 -> 3. # Connect 1 -> 2 -> 3.
l1, l2 = self.connect() l1, l2 = self.connect()
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
assert ret['id'] == l3.info['id'] assert ret['id'] == l3.info['id']
@ -2744,13 +2742,13 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(options={'cltv-delta': 20, 'fee-base': 200, 'fee-per-satoshi': 2000}) l2 = self.node_factory.get_node(options={'cltv-delta': 20, 'fee-base': 200, 'fee-per-satoshi': 2000})
l3 = self.node_factory.get_node(options={'cltv-delta': 30, 'cltv-final': 9, 'fee-base': 300, 'fee-per-satoshi': 3000}) l3 = self.node_factory.get_node(options={'cltv-delta': 30, 'cltv-final': 9, 'fee-base': 300, 'fee-per-satoshi': 3000})
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handing back peer .* to master') l1.daemon.wait_for_log('Handing back peer .* to master')
l2.daemon.wait_for_log('Handing back peer .* to master') l2.daemon.wait_for_log('Handing back peer .* to master')
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
assert ret['id'] == l3.info['id'] assert ret['id'] == l3.info['id']
l2.daemon.wait_for_log('Handing back peer .* to master') l2.daemon.wait_for_log('Handing back peer .* to master')
@ -2848,13 +2846,13 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(options={'cltv-delta': 20, 'fee-base': 200, 'fee-per-satoshi': 2000}) l2 = self.node_factory.get_node(options={'cltv-delta': 20, 'fee-base': 200, 'fee-per-satoshi': 2000})
l3 = self.node_factory.get_node(options={'cltv-delta': 30, 'cltv-final': 9, 'fee-base': 300, 'fee-per-satoshi': 3000}) l3 = self.node_factory.get_node(options={'cltv-delta': 30, 'cltv-final': 9, 'fee-base': 300, 'fee-per-satoshi': 3000})
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handing back peer .* to master') l1.daemon.wait_for_log('Handing back peer .* to master')
l2.daemon.wait_for_log('Handing back peer .* to master') l2.daemon.wait_for_log('Handing back peer .* to master')
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
assert ret['id'] == l3.info['id'] assert ret['id'] == l3.info['id']
l2.daemon.wait_for_log('Handing back peer .* to master') l2.daemon.wait_for_log('Handing back peer .* to master')
@ -2893,7 +2891,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(options={'dev-no-reconnect': None}) l1 = self.node_factory.get_node(options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node(disconnect=['+WIRE_COMMITMENT_SIGNED']) l2 = self.node_factory.get_node(disconnect=['+WIRE_COMMITMENT_SIGNED'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
f = self.executor.submit(self.pay, l1, l2, 31337000) f = self.executor.submit(self.pay, l1, l2, 31337000)
l1.daemon.wait_for_log(r'HTLC out 0 RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION') l1.daemon.wait_for_log(r'HTLC out 0 RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
@ -2909,7 +2907,7 @@ class LightningDTests(BaseLightningDTests):
l2.rpc.dev_fail(l1.info['id']) l2.rpc.dev_fail(l1.info['id'])
l2.stop() l2.stop()
l1.bitcoin.rpc.generate(1) l1.bitcoin.rpc.generate(1)
l1.daemon.start() l1.start()
assert l1.daemon.is_in_log(r'Loaded 1 HTLC signatures from DB') assert l1.daemon.is_in_log(r'Loaded 1 HTLC signatures from DB')
l1.daemon.wait_for_logs([ l1.daemon.wait_for_logs([
@ -2938,7 +2936,7 @@ class LightningDTests(BaseLightningDTests):
options={'dev-no-reconnect': None}) options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid = self.fund_channel(l1, l2, 10**6) chanid = self.fund_channel(l1, l2, 10**6)
# Wait for route propagation. # Wait for route propagation.
@ -2994,7 +2992,7 @@ class LightningDTests(BaseLightningDTests):
options={'dev-no-reconnect': None}) options={'dev-no-reconnect': None})
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid = self.fund_channel(l1, l2, 10**6) chanid = self.fund_channel(l1, l2, 10**6)
self.wait_for_routes(l1, [chanid]) self.wait_for_routes(l1, [chanid])
@ -3044,12 +3042,12 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
self.assertRaises(ValueError, l1.rpc.connect, self.assertRaises(ValueError, l1.rpc.connect,
l2.info['id'], 'localhost', l2.info['port']) l2.info['id'], 'localhost', l2.port)
self.assertRaises(ValueError, l1.rpc.connect, self.assertRaises(ValueError, l1.rpc.connect,
l2.info['id'], 'localhost', l2.info['port']) l2.info['id'], 'localhost', l2.port)
self.assertRaises(ValueError, l1.rpc.connect, self.assertRaises(ValueError, l1.rpc.connect,
l2.info['id'], 'localhost', l2.info['port']) l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Should have 3 connect fails. # Should have 3 connect fails.
for d in disconnects: for d in disconnects:
@ -3074,12 +3072,12 @@ class LightningDTests(BaseLightningDTests):
self.give_funds(l1, 2000000) self.give_funds(l1, 2000000)
for d in disconnects: for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000) self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
assert l1.rpc.getpeer(l2.info['id']) is None assert l1.rpc.getpeer(l2.info['id']) is None
# This one will succeed. # This one will succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 20000) l1.rpc.fundchannel(l2.info['id'], 20000)
# Should still only have one peer! # Should still only have one peer!
@ -3098,12 +3096,12 @@ class LightningDTests(BaseLightningDTests):
self.give_funds(l1, 2000000) self.give_funds(l1, 2000000)
for d in disconnects: for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000) self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
assert l1.rpc.getpeer(l2.info['id']) is None assert l1.rpc.getpeer(l2.info['id']) is None
# This one will succeed. # This one will succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 20000) l1.rpc.fundchannel(l2.info['id'], 20000)
# Should still only have one peer! # Should still only have one peer!
@ -3120,7 +3118,7 @@ class LightningDTests(BaseLightningDTests):
self.give_funds(l1, 2000000) self.give_funds(l1, 2000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000) self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
# Fundee remembers, funder doesn't. # Fundee remembers, funder doesn't.
@ -3137,7 +3135,7 @@ class LightningDTests(BaseLightningDTests):
self.give_funds(l1, 2000000) self.give_funds(l1, 2000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 20000) l1.rpc.fundchannel(l2.info['id'], 20000)
# They haven't forgotten each other. # They haven't forgotten each other.
@ -3160,7 +3158,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(may_reconnect=True) l1 = self.node_factory.get_node(may_reconnect=True)
l2 = self.node_factory.get_node(disconnect=disconnects, l2 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.give_funds(l1, 2000000) self.give_funds(l1, 2000000)
@ -3169,7 +3167,7 @@ class LightningDTests(BaseLightningDTests):
assert l1.rpc.getpeer(l2.info['id']) is None assert l1.rpc.getpeer(l2.info['id']) is None
# Reconnect. # Reconnect.
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# We should get a message about reconnecting, but order unsynced. # We should get a message about reconnecting, but order unsynced.
l2.daemon.wait_for_logs(['gossipd.*reconnect for active peer', l2.daemon.wait_for_logs(['gossipd.*reconnect for active peer',
@ -3191,7 +3189,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects, l1 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
@ -3205,7 +3203,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects, l1 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
@ -3235,7 +3233,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects, l1 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
@ -3262,7 +3260,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(may_reconnect=True) l1 = self.node_factory.get_node(may_reconnect=True)
l2 = self.node_factory.get_node(disconnect=disconnects, l2 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
@ -3293,7 +3291,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(may_reconnect=True) l1 = self.node_factory.get_node(may_reconnect=True)
l2 = self.node_factory.get_node(disconnect=disconnects, l2 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 10**6) self.fund_channel(l1, l2, 10**6)
@ -3315,7 +3313,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=disconnects, l1 = self.node_factory.get_node(disconnect=disconnects,
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = self.fund_channel(l1, l2, 10**6) chan = self.fund_channel(l1, l2, 10**6)
self.pay(l1, l2, 200000000) self.pay(l1, l2, 200000000)
@ -3342,7 +3340,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node() l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node(options={'anchor-confirms': 3}) l2 = self.node_factory.get_node(options={'anchor-confirms': 3})
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.give_funds(l1, 10**6 + 1000000) self.give_funds(l1, 10**6 + 1000000)
chanid = l1.rpc.fundchannel(l2.info['id'], 10**6)['channel_id'] chanid = l1.rpc.fundchannel(l2.info['id'], 10**6)['channel_id']
@ -3381,7 +3379,7 @@ class LightningDTests(BaseLightningDTests):
'+WIRE_CLOSING_SIGNED'] '+WIRE_CLOSING_SIGNED']
l1 = self.node_factory.get_node(disconnect=disconnects, may_reconnect=True) l1 = self.node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = self.fund_channel(l1, l2, 10**6) chan = self.fund_channel(l1, l2, 10**6)
self.pay(l1, l2, 200000000) self.pay(l1, l2, 200000000)
@ -3411,7 +3409,7 @@ class LightningDTests(BaseLightningDTests):
l2 = self.node_factory.get_node(random_hsm=True) l2 = self.node_factory.get_node(random_hsm=True)
# connect # connect
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# fund a bech32 address and then open a channel with it # fund a bech32 address and then open a channel with it
res = l1.openchannel(l2, 20000, 'bech32') res = l1.openchannel(l2, 20000, 'bech32')
@ -3592,7 +3590,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(random_hsm=True) l1 = self.node_factory.get_node(random_hsm=True)
max_locktime = 3 * 6 * 24 max_locktime = 3 * 6 * 24
l2 = self.node_factory.get_node(options={'locktime-blocks': max_locktime + 1}) l2 = self.node_factory.get_node(options={'locktime-blocks': max_locktime + 1})
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
funds = 1000000 funds = 1000000
@ -3612,7 +3610,7 @@ class LightningDTests(BaseLightningDTests):
# Restart l2 without ridiculous locktime. # Restart l2 without ridiculous locktime.
del l2.daemon.opts['locktime-blocks'] del l2.daemon.opts['locktime-blocks']
l2.restart() l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# We don't have enough left to cover fees if we try to spend it all. # We don't have enough left to cover fees if we try to spend it all.
self.assertRaisesRegex(ValueError, r'Cannot afford funding transaction', self.assertRaisesRegex(ValueError, r'Cannot afford funding transaction',
@ -3629,7 +3627,7 @@ class LightningDTests(BaseLightningDTests):
"""Try to create a giant channel""" """Try to create a giant channel"""
l1 = self.node_factory.get_node() l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Send funds. # Send funds.
amount = 2**24 amount = 2**24
@ -3654,7 +3652,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(may_reconnect=True) l1 = self.node_factory.get_node(may_reconnect=True)
l2 = self.node_factory.get_node(options={'anchor-confirms': 3}, l2 = self.node_factory.get_node(options={'anchor-confirms': 3},
may_reconnect=True) may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.give_funds(l1, 10**6 + 1000000) self.give_funds(l1, 10**6 + 1000000)
l1.rpc.fundchannel(l2.info['id'], 10**6)['tx'] l1.rpc.fundchannel(l2.info['id'], 10**6)['tx']
@ -3666,7 +3664,7 @@ class LightningDTests(BaseLightningDTests):
bitcoind.generate_block(120) bitcoind.generate_block(120)
# Restart # Restart
l1.daemon.start() l1.start()
# All should be good. # All should be good.
l1.daemon.wait_for_log(' to CHANNELD_NORMAL') l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
@ -3687,7 +3685,7 @@ class LightningDTests(BaseLightningDTests):
bitcoind.generate_block(120) bitcoind.generate_block(120)
# Restart # Restart
l1.daemon.start() l1.start()
sync_blockheight([l1]) sync_blockheight([l1])
assert len(l1.rpc.listfunds()['outputs']) == 1 assert len(l1.rpc.listfunds()['outputs']) == 1
@ -3729,7 +3727,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(may_reconnect=True) l1 = self.node_factory.get_node(may_reconnect=True)
l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], l2 = self.node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_reconnect=True) may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Neither node should have a channel open, they are just connected # Neither node should have a channel open, they are just connected
for n in (l1, l2): for n in (l1, l2):
@ -3760,7 +3758,7 @@ class LightningDTests(BaseLightningDTests):
wait_for(lambda: 'connected' not in l1.rpc.listpeers()['peers'][0]['channels'][0]) wait_for(lambda: 'connected' not in l1.rpc.listpeers()['peers'][0]['channels'][0])
# Now restart l2 and it should reload peers/channels from the DB # Now restart l2 and it should reload peers/channels from the DB
l2.daemon.start() l2.start()
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1) wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)
# Wait for the restored HTLC to finish # Wait for the restored HTLC to finish
@ -3776,8 +3774,7 @@ class LightningDTests(BaseLightningDTests):
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 20000 assert l2.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 20000
# Finally restart l1, and make sure it remembers # Finally restart l1, and make sure it remembers
l1.stop() l1.restart()
l1.daemon.start()
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 99980000 assert l1.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 99980000
# Now make sure l1 is watching for unilateral closes # Now make sure l1 is watching for unilateral closes
@ -3796,7 +3793,7 @@ class LightningDTests(BaseLightningDTests):
options={'dev-no-reconnect': None}, options={'dev-no-reconnect': None},
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid = self.fund_channel(l1, l2, 100000) chanid = self.fund_channel(l1, l2, 100000)
@ -3815,7 +3812,7 @@ class LightningDTests(BaseLightningDTests):
del l1.daemon.opts['dev-disconnect'] del l1.daemon.opts['dev-disconnect']
# Should reconnect, and sort the payment out. # Should reconnect, and sort the payment out.
l1.daemon.start() l1.start()
wait_for(lambda: l1.rpc.listpayments()['payments'][0]['status'] != 'pending') wait_for(lambda: l1.rpc.listpayments()['payments'][0]['status'] != 'pending')
@ -3836,7 +3833,7 @@ class LightningDTests(BaseLightningDTests):
options={'dev-no-reconnect': None}, options={'dev-no-reconnect': None},
may_reconnect=True) may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 100000) self.fund_channel(l1, l2, 100000)
@ -3859,7 +3856,7 @@ class LightningDTests(BaseLightningDTests):
time.sleep(3) time.sleep(3)
# Should reconnect, and fail the payment # Should reconnect, and fail the payment
l1.daemon.start() l1.start()
wait_for(lambda: l1.rpc.listpayments()['payments'][0]['status'] != 'pending') wait_for(lambda: l1.rpc.listpayments()['payments'][0]['status'] != 'pending')
@ -3875,7 +3872,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(disconnect=['=WIRE_UPDATE_ADD_HTLC-nocommit']) l1 = self.node_factory.get_node(disconnect=['=WIRE_UPDATE_ADD_HTLC-nocommit'])
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
self.fund_channel(l1, l2, 100000) self.fund_channel(l1, l2, 100000)
@ -3908,8 +3905,8 @@ class LightningDTests(BaseLightningDTests):
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain # l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.info['port']) l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
self.fund_channel(l2, l1, 10**6) self.fund_channel(l2, l1, 10**6)
self.fund_channel(l2, l3, 10**6) self.fund_channel(l2, l3, 10**6)
@ -4041,8 +4038,7 @@ class LightningDTests(BaseLightningDTests):
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True]) wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True])
# Restart l2, will cause l1 to reconnect # Restart l2, will cause l1 to reconnect
l2.stop() l2.restart()
l2.daemon.start()
# Now they should sync and re-establish again # Now they should sync and re-establish again
l1.daemon.wait_for_logs(['Received channel_update for channel \\d+:1:1.1.', l1.daemon.wait_for_logs(['Received channel_update for channel \\d+:1:1.1.',
@ -4139,7 +4135,7 @@ class LightningDTests(BaseLightningDTests):
# Try with node which sets --ignore-fee-limits # Try with node which sets --ignore-fee-limits
l3 = self.node_factory.get_node(options={'ignore-fee-limits': 'true'}) l3 = self.node_factory.get_node(options={'ignore-fee-limits': 'true'})
l1.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
chan = self.fund_channel(l1, l3, 10**6) chan = self.fund_channel(l1, l3, 10**6)
@ -4163,7 +4159,7 @@ class LightningDTests(BaseLightningDTests):
disconnects = ['+WIRE_COMMITMENT_SIGNED'] disconnects = ['+WIRE_COMMITMENT_SIGNED']
l1 = self.node_factory.get_node(disconnect=disconnects, may_reconnect=True) l1 = self.node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = self.node_factory.get_node(may_reconnect=True) l2 = self.node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = self.fund_channel(l1, l2, 10**6) chan = self.fund_channel(l1, l2, 10**6)
@ -4202,7 +4198,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node(options={'debug-subdaemon-io': 'channeld', l1 = self.node_factory.get_node(options={'debug-subdaemon-io': 'channeld',
'log-level': 'io'}) 'log-level': 'io'})
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Fundchannel manually so we get channeld pid. # Fundchannel manually so we get channeld pid.
self.give_funds(l1, 10**6 + 1000000) self.give_funds(l1, 10**6 + 1000000)
@ -4292,7 +4288,7 @@ class LightningDTests(BaseLightningDTests):
# See utils.py # See utils.py
assert configs['bitcoin-datadir'] == bitcoind.bitcoin_dir assert configs['bitcoin-datadir'] == bitcoind.bitcoin_dir
assert configs['lightning-dir'] == l1.daemon.lightning_dir assert configs['lightning-dir'] == l1.daemon.lightning_dir
assert configs['port'] == l1.info['port'] assert configs['port'] == l1.port
assert configs['allow-deprecated-apis'] is False assert configs['allow-deprecated-apis'] is False
assert configs['override-fee-rates'] == '15000/7500/1000' assert configs['override-fee-rates'] == '15000/7500/1000'
assert configs['network'] == 'regtest' assert configs['network'] == 'regtest'
@ -4311,7 +4307,7 @@ class LightningDTests(BaseLightningDTests):
for i in range(3): for i in range(3):
# FIXME: we shouldn't disconnect on close? # FIXME: we shouldn't disconnect on close?
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id'] assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handing back peer .* to master') l1.daemon.wait_for_log('Handing back peer .* to master')
@ -4424,7 +4420,7 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node() l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
self.give_funds(l1, 10**6) self.give_funds(l1, 10**6)
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**5) l1.rpc.fundchannel(l2.info['id'], 10**5)
assert len(l1.rpc.listpeers()['peers']) == 1 assert len(l1.rpc.listpeers()['peers']) == 1
@ -4517,8 +4513,8 @@ class LightningDTests(BaseLightningDTests):
l1 = self.node_factory.get_node() l1 = self.node_factory.get_node()
l2 = self.node_factory.get_node() l2 = self.node_factory.get_node()
l3 = self.node_factory.get_node() l3 = self.node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.info['port']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.info['port']) l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
# Gossiping # Gossiping
assert l1.rpc.getpeer(l2.info['id'])['state'] == "GOSSIPING" assert l1.rpc.getpeer(l2.info['id'])['state'] == "GOSSIPING"
@ -4575,7 +4571,7 @@ class LightningDTests(BaseLightningDTests):
l1.daemon.opts['rescan'] = -500000 l1.daemon.opts['rescan'] = -500000
l1.stop() l1.stop()
btc.rpc.generate(4) btc.rpc.generate(4)
l1.daemon.start() l1.start()
l1.daemon.wait_for_log(r'Adding block 105') l1.daemon.wait_for_log(r'Adding block 105')
assert not l1.daemon.is_in_log(r'Adding block 102') assert not l1.daemon.is_in_log(r'Adding block 102')

View File

@ -367,6 +367,10 @@ class LightningNode(object):
c.close() c.close()
db.close() db.close()
def start(self):
self.daemon.start()
self.port = self.rpc.getinfo()['port']
def stop(self, timeout=10): def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs """ Attempt to do a clean shutdown, but kill if it hangs
""" """
@ -403,7 +407,7 @@ class LightningNode(object):
else: else:
self.daemon.stop() self.daemon.stop()
self.daemon.start() self.start()
def fund_channel(self, l2, amount): def fund_channel(self, l2, amount):