mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-01-19 05:44:12 +01:00
pytest: rename lightning nodes to reduce confusion.
I was trying to debug test_zeroconf_open and getting very confused. The reason: l0 is lightning-1, l1 is lightning-2, etc! And there are two other tests where an l0 has been added at the front: fix them all to avoid future confusion! Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
2a92ccf9f2
commit
f3cedb9aa7
@ -155,11 +155,11 @@ def test_invoice_preimage(node_factory):
|
||||
def test_invoice_routeboost(node_factory, bitcoind):
|
||||
"""Test routeboost 'r' hint in bolt11 invoice.
|
||||
"""
|
||||
l0, l1, l2 = node_factory.line_graph(3, fundamount=2 * (10**5), wait_for_announce=True)
|
||||
l1, l2, l3 = node_factory.line_graph(3, fundamount=2 * (10**5), wait_for_announce=True)
|
||||
|
||||
# Check routeboost.
|
||||
# Make invoice and pay it
|
||||
inv = l2.rpc.invoice(amount_msat=123456, label="inv1", description="?")
|
||||
inv = l3.rpc.invoice(amount_msat=123456, label="inv1", description="?")
|
||||
# Check routeboost.
|
||||
assert 'warning_private_unused' not in inv
|
||||
assert 'warning_capacity' not in inv
|
||||
@ -167,19 +167,19 @@ def test_invoice_routeboost(node_factory, bitcoind):
|
||||
assert 'warning_deadends' not in inv
|
||||
assert 'warning_mpp' not in inv
|
||||
# Route array has single route with single element.
|
||||
r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
|
||||
assert r['pubkey'] == l1.info['id']
|
||||
assert r['short_channel_id'] == l2.rpc.listpeerchannels(l1.info['id'])['channels'][0]['short_channel_id']
|
||||
r = only_one(only_one(l2.rpc.decodepay(inv['bolt11'])['routes']))
|
||||
assert r['pubkey'] == l2.info['id']
|
||||
assert r['short_channel_id'] == l3.rpc.listpeerchannels(l2.info['id'])['channels'][0]['short_channel_id']
|
||||
assert r['fee_base_msat'] == 1
|
||||
assert r['fee_proportional_millionths'] == 10
|
||||
assert r['cltv_expiry_delta'] == 6
|
||||
|
||||
# Pay it (and make sure it's fully resolved before we take l1 offline!)
|
||||
l1.rpc.pay(inv['bolt11'])
|
||||
wait_channel_quiescent(l1, l2)
|
||||
# Pay it (and make sure it's fully resolved before we take l2 offline!)
|
||||
l2.rpc.pay(inv['bolt11'])
|
||||
wait_channel_quiescent(l2, l3)
|
||||
|
||||
# Due to reserve & fees, l1 doesn't have capacity to pay this.
|
||||
inv = l2.rpc.invoice(amount_msat=2 * (10**8) - 123456, label="inv2", description="?")
|
||||
# Due to reserve & fees, l2 doesn't have capacity to pay this.
|
||||
inv = l3.rpc.invoice(amount_msat=2 * (10**8) - 123456, label="inv2", description="?")
|
||||
# Check warning
|
||||
assert 'warning_capacity' in inv
|
||||
assert 'warning_private_unused' not in inv
|
||||
@ -187,10 +187,10 @@ def test_invoice_routeboost(node_factory, bitcoind):
|
||||
assert 'warning_deadends' not in inv
|
||||
assert 'warning_mpp' not in inv
|
||||
|
||||
l1.rpc.disconnect(l2.info['id'], True)
|
||||
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
|
||||
l2.rpc.disconnect(l3.info['id'], True)
|
||||
wait_for(lambda: not only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['connected'])
|
||||
|
||||
inv = l2.rpc.invoice(123456, label="inv3", description="?")
|
||||
inv = l3.rpc.invoice(123456, label="inv3", description="?")
|
||||
# Check warning.
|
||||
assert 'warning_private_unused' not in inv
|
||||
assert 'warning_capacity' not in inv
|
||||
@ -198,14 +198,14 @@ def test_invoice_routeboost(node_factory, bitcoind):
|
||||
assert 'warning_offline' in inv
|
||||
assert 'warning_mpp' not in inv
|
||||
|
||||
# Close l0, l2 will not use l1 at all.
|
||||
l0.rpc.close(l1.info['id'])
|
||||
l0.wait_for_channel_onchain(l1.info['id'])
|
||||
# Close l1, l3 will not use l2 at all.
|
||||
l1.rpc.close(l2.info['id'])
|
||||
l1.wait_for_channel_onchain(l2.info['id'])
|
||||
bitcoind.generate_block(100)
|
||||
|
||||
# l2 has to notice channel is gone.
|
||||
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)
|
||||
inv = l2.rpc.invoice(123456, label="inv4", description="?")
|
||||
# l3 has to notice channel is gone.
|
||||
wait_for(lambda: len(l3.rpc.listchannels()['channels']) == 2)
|
||||
inv = l3.rpc.invoice(123456, label="inv4", description="?")
|
||||
# Check warning.
|
||||
assert 'warning_deadends' in inv
|
||||
assert 'warning_private_unused' not in inv
|
||||
|
@ -1631,8 +1631,8 @@ def test_zeroconf_open(bitcoind, node_factory):
|
||||
"""
|
||||
plugin_path = Path(__file__).parent / "plugins" / "zeroconf-selective.py"
|
||||
|
||||
# Without l0->l1, l2 doesn't add a routehint since l1 looks like a deadend
|
||||
l0, l1, l2 = node_factory.get_nodes(3, opts=[
|
||||
# Without l1->l2, l3 doesn't add a routehint since l2 looks like a deadend
|
||||
l1, l2, l3 = node_factory.get_nodes(3, opts=[
|
||||
{},
|
||||
{},
|
||||
{
|
||||
@ -1641,62 +1641,62 @@ def test_zeroconf_open(bitcoind, node_factory):
|
||||
},
|
||||
])
|
||||
|
||||
node_factory.join_nodes([l0, l1], wait_for_announce=True)
|
||||
node_factory.join_nodes([l1, l2], wait_for_announce=True)
|
||||
|
||||
# Try to open a mindepth=0 channel
|
||||
l1.fundwallet(10**6)
|
||||
l2.fundwallet(10**6)
|
||||
|
||||
l1.connect(l2)
|
||||
assert (int(l1.rpc.listpeers()['peers'][0]['features'], 16) >> 50) & 0x02 != 0
|
||||
l2.connect(l3)
|
||||
assert (int(l2.rpc.listpeers()['peers'][0]['features'], 16) >> 50) & 0x02 != 0
|
||||
|
||||
# Now start the negotiation, l1 should have negotiated zeroconf,
|
||||
# and use their own mindepth=6, while l2 uses mindepth=2 from the
|
||||
# Now start the negotiation, l2 should have negotiated zeroconf,
|
||||
# and use their own mindepth=6, while l3 uses mindepth=2 from the
|
||||
# plugin
|
||||
ret = l1.rpc.fundchannel(l2.info['id'], 'all', mindepth=0)
|
||||
ret = l2.rpc.fundchannel(l3.info['id'], 'all', mindepth=0)
|
||||
if TEST_NETWORK == 'regtest':
|
||||
channel_type = {'bits': [12, 22, 50], 'names': ['static_remotekey/even', 'anchors_zero_fee_htlc_tx/even', 'zeroconf/even']}
|
||||
else:
|
||||
channel_type = {'bits': [12, 50], 'names': ['static_remotekey/even', 'zeroconf/even']}
|
||||
assert ret['channel_type'] == channel_type
|
||||
assert only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['channel_type'] == channel_type
|
||||
assert only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['channel_type'] == channel_type
|
||||
|
||||
assert l1.db.query('SELECT minimum_depth FROM channels WHERE minimum_depth != 1') == [{'minimum_depth': 0}]
|
||||
assert l2.db.query('SELECT minimum_depth FROM channels') == [{'minimum_depth': 0}]
|
||||
assert l2.db.query('SELECT minimum_depth FROM channels WHERE minimum_depth != 1') == [{'minimum_depth': 0}]
|
||||
assert l3.db.query('SELECT minimum_depth FROM channels') == [{'minimum_depth': 0}]
|
||||
|
||||
l1.daemon.wait_for_logs([
|
||||
r'peer_in WIRE_CHANNEL_READY',
|
||||
r'Peer told us that they\'ll use alias=[0-9x]+ for this channel',
|
||||
])
|
||||
l2.daemon.wait_for_logs([
|
||||
r'peer_in WIRE_CHANNEL_READY',
|
||||
r'Peer told us that they\'ll use alias=[0-9x]+ for this channel',
|
||||
])
|
||||
l3.daemon.wait_for_logs([
|
||||
r'peer_in WIRE_CHANNEL_READY',
|
||||
r'Peer told us that they\'ll use alias=[0-9x]+ for this channel',
|
||||
])
|
||||
|
||||
wait_for(lambda: [c['state'] for c in l1.rpc.listpeerchannels()['channels']] == ['CHANNELD_NORMAL'] * 2)
|
||||
wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
|
||||
wait_for(lambda: l2.rpc.listincoming()['incoming'] != [])
|
||||
wait_for(lambda: [c['state'] for c in l2.rpc.listpeerchannels()['channels']] == ['CHANNELD_NORMAL'] * 2)
|
||||
wait_for(lambda: only_one(l3.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
|
||||
wait_for(lambda: l3.rpc.listincoming()['incoming'] != [])
|
||||
|
||||
# Make sure l2 sees l0->l1
|
||||
wait_for(lambda: l2.rpc.listchannels() != {'channels': []})
|
||||
# Make sure l3 sees l1->l2
|
||||
wait_for(lambda: l3.rpc.listchannels() != {'channels': []})
|
||||
|
||||
inv = l2.rpc.invoice(10**8, 'lbl', 'desc')['bolt11']
|
||||
details = l1.rpc.decodepay(inv)
|
||||
inv = l3.rpc.invoice(10**8, 'lbl', 'desc')['bolt11']
|
||||
details = l2.rpc.decodepay(inv)
|
||||
pprint(details)
|
||||
assert('routes' in details and len(details['routes']) == 1)
|
||||
hop = details['routes'][0][0] # First (and only) hop of hint 0
|
||||
l1alias = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['alias']['local']
|
||||
assert(hop['pubkey'] == l1.info['id']) # l1 is the entrypoint
|
||||
assert(hop['short_channel_id'] == l1alias) # Alias has to make sense to entrypoint
|
||||
l1.rpc.pay(inv)
|
||||
l2alias = only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['alias']['local']
|
||||
assert(hop['pubkey'] == l2.info['id']) # l2 is the entrypoint
|
||||
assert(hop['short_channel_id'] == l2alias) # Alias has to make sense to entrypoint
|
||||
l2.rpc.pay(inv)
|
||||
|
||||
# Ensure lightningd knows about the balance change before
|
||||
# attempting the other way around.
|
||||
l2.daemon.wait_for_log(r'Balance [0-9]+msat -> [0-9]+msat')
|
||||
l3.daemon.wait_for_log(r'Balance [0-9]+msat -> [0-9]+msat')
|
||||
|
||||
# Inverse payments should work too
|
||||
pprint(l2.rpc.listpeers())
|
||||
inv = l1.rpc.invoice(10**5, 'lbl', 'desc')['bolt11']
|
||||
l2.rpc.pay(inv)
|
||||
pprint(l3.rpc.listpeers())
|
||||
inv = l2.rpc.invoice(10**5, 'lbl', 'desc')['bolt11']
|
||||
l3.rpc.pay(inv)
|
||||
|
||||
|
||||
def test_zeroconf_public(bitcoind, node_factory, chainparams):
|
||||
|
@ -2167,55 +2167,55 @@ def test_setchannel_usage(node_factory, bitcoind):
|
||||
def test_setchannel_state(node_factory, bitcoind):
|
||||
# TEST SETUP
|
||||
#
|
||||
# [l0] --> [l1] --> [l2]
|
||||
# [l1] --> [l2] --> [l3]
|
||||
#
|
||||
# Initiate channel [l1,l2] and try to set feerates other states than
|
||||
# Initiate channel [l2,l3] and try to set feerates other states than
|
||||
# CHANNELD_NORMAL or CHANNELD_AWAITING_LOCKIN. Should raise error.
|
||||
# Use l0 to make a forward through l1/l2 for testing.
|
||||
# Use l1 to make a forward through l2/l3 for testing.
|
||||
DEF_BASE = 0
|
||||
DEF_PPM = 0
|
||||
|
||||
l0, l1, l2 = node_factory.get_nodes(3, opts={
|
||||
l1, l2, l3 = node_factory.get_nodes(3, opts={
|
||||
'fee-base': DEF_BASE,
|
||||
'fee-per-satoshi': DEF_PPM
|
||||
})
|
||||
|
||||
# connection and funding
|
||||
l0.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
||||
l0.fundchannel(l1, 1000000, wait_for_active=True)
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
scid, _ = l1.fundchannel(l2, 1000000, wait_for_active=False)
|
||||
l1.fundchannel(l2, 1000000, wait_for_active=True)
|
||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||
scid, _ = l2.fundchannel(l3, 1000000, wait_for_active=False)
|
||||
|
||||
# try setting the fee in state AWAITING_LOCKIN should be possible
|
||||
# assert(l1.channel_state(l2) == "CHANNELD_AWAITING_LOCKIN")
|
||||
result = l1.rpc.setchannel(l2.info['id'], 42, 0)
|
||||
assert(result['channels'][0]['peer_id'] == l2.info['id'])
|
||||
# assert(l2.channel_state(l3) == "CHANNELD_AWAITING_LOCKIN")
|
||||
result = l2.rpc.setchannel(l3.info['id'], 42, 0)
|
||||
assert(result['channels'][0]['peer_id'] == l3.info['id'])
|
||||
# cid = result['channels'][0]['channel_id']
|
||||
|
||||
# test routing correct new fees once routing is established
|
||||
mine_funding_to_announce(bitcoind, [l0, l1, l2])
|
||||
mine_funding_to_announce(bitcoind, [l1, l2, l3])
|
||||
|
||||
l0.wait_for_route(l2)
|
||||
inv = l2.rpc.invoice(100000, 'test_setchannel_state', 'desc')['bolt11']
|
||||
result = l0.dev_pay(inv, dev_use_shadow=False)
|
||||
l1.wait_for_route(l3)
|
||||
inv = l3.rpc.invoice(100000, 'test_setchannel_state', 'desc')['bolt11']
|
||||
result = l1.dev_pay(inv, dev_use_shadow=False)
|
||||
assert result['status'] == 'complete'
|
||||
assert result['amount_sent_msat'] == 100042
|
||||
|
||||
# Disconnect and unilaterally close from l2 to l1
|
||||
l2.rpc.disconnect(l1.info['id'], force=True)
|
||||
result = l2.rpc.close(scid, 1)
|
||||
# Disconnect and unilaterally close from l3 to l2
|
||||
l3.rpc.disconnect(l2.info['id'], force=True)
|
||||
result = l3.rpc.close(scid, 1)
|
||||
assert result['type'] == 'unilateral'
|
||||
|
||||
# wait for l1 to see unilateral close via bitcoin network
|
||||
while l1.channel_state(l2) == "CHANNELD_NORMAL":
|
||||
# wait for l2 to see unilateral close via bitcoin network
|
||||
while l2.channel_state(l3) == "CHANNELD_NORMAL":
|
||||
bitcoind.generate_block(1)
|
||||
# assert l1.channel_state(l2) == "FUNDING_SPEND_SEEN"
|
||||
# assert l2.channel_state(l3) == "FUNDING_SPEND_SEEN"
|
||||
|
||||
# Try to setchannel in order to raise expected error.
|
||||
# To reduce false positive flakes, only test if state is not NORMAL anymore.
|
||||
with pytest.raises(RpcError, match=r'-1.*'):
|
||||
# l1.rpc.setchannel(l2.info['id'], 10, 1)
|
||||
l1.rpc.setchannel(l2.info['id'], 10, 1)
|
||||
# l2.rpc.setchannel(l3.info['id'], 10, 1)
|
||||
l2.rpc.setchannel(l3.info['id'], 10, 1)
|
||||
|
||||
|
||||
def test_setchannel_routing(node_factory, bitcoind):
|
||||
|
Loading…
Reference in New Issue
Block a user