mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-01-18 21:35:11 +01:00
routeboost: don't use channels to dead-end nodes.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
547d6ab878
commit
fe4a600bc7
@ -2105,6 +2105,20 @@ out:
|
||||
return daemon_conn_read_next(conn, daemon->master);
|
||||
}
|
||||
|
||||
/*~ If a node has no public channels (other than the one to us), it's not
|
||||
* a very useful route to tell anyone about. */
|
||||
static bool node_has_public_channels(const struct node *peer,
|
||||
const struct chan *exclude)
|
||||
{
|
||||
for (size_t i = 0; i < tal_count(peer->chans); i++) {
|
||||
if (peer->chans[i] == exclude)
|
||||
continue;
|
||||
if (is_chan_public(peer->chans[i]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*~ For routeboost, we offer payers a hint of what incoming channels might
|
||||
* have capacity for their payment. To do this, lightningd asks for the
|
||||
* information about all channels to this node; but gossipd doesn't know about
|
||||
@ -2116,6 +2130,7 @@ static struct io_plan *get_incoming_channels(struct io_conn *conn,
|
||||
struct node *node;
|
||||
struct route_info *public = tal_arr(tmpctx, struct route_info, 0);
|
||||
struct route_info *private = tal_arr(tmpctx, struct route_info, 0);
|
||||
bool has_public = false;
|
||||
|
||||
if (!fromwire_gossip_get_incoming_channels(msg))
|
||||
master_badmsg(WIRE_GOSSIP_GET_INCOMING_CHANNELS, msg);
|
||||
@ -2138,6 +2153,13 @@ static struct io_plan *get_incoming_channels(struct io_conn *conn,
|
||||
ri.fee_proportional_millionths = hc->proportional_fee;
|
||||
ri.cltv_expiry_delta = hc->delay;
|
||||
|
||||
has_public |= is_chan_public(c);
|
||||
|
||||
/* If peer doesn't have other public channels,
|
||||
* no point giving route */
|
||||
if (!node_has_public_channels(other_node(node, c), c))
|
||||
continue;
|
||||
|
||||
if (is_chan_public(c))
|
||||
tal_arr_expand(&public, ri);
|
||||
else
|
||||
@ -2145,8 +2167,8 @@ static struct io_plan *get_incoming_channels(struct io_conn *conn,
|
||||
}
|
||||
}
|
||||
|
||||
/* If no public channels, share private ones. */
|
||||
if (tal_count(public) == 0)
|
||||
/* If no public channels (even deadend ones!), share private ones. */
|
||||
if (!has_public)
|
||||
msg = towire_gossip_get_incoming_channels_reply(NULL, private);
|
||||
else
|
||||
msg = towire_gossip_get_incoming_channels_reply(NULL, public);
|
||||
|
@ -120,7 +120,7 @@ def test_invoice_preimage(node_factory):
|
||||
def test_invoice_routeboost(node_factory, bitcoind):
|
||||
"""Test routeboost 'r' hint in bolt11 invoice.
|
||||
"""
|
||||
l1, l2 = node_factory.line_graph(2, fundamount=2 * (10**4), wait_for_announce=True)
|
||||
l0, l1, l2 = node_factory.line_graph(3, fundamount=2 * (10**4), wait_for_announce=True)
|
||||
|
||||
# Check routeboost.
|
||||
# Make invoice and pay it
|
||||
@ -131,7 +131,7 @@ def test_invoice_routeboost(node_factory, bitcoind):
|
||||
# Route array has single route with single element.
|
||||
r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
|
||||
assert r['pubkey'] == l1.info['id']
|
||||
assert r['short_channel_id'] == l1.rpc.listchannels()['channels'][0]['short_channel_id']
|
||||
assert r['short_channel_id'] == l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0]['short_channel_id']
|
||||
assert r['fee_base_msat'] == 1
|
||||
assert r['fee_proportional_millionths'] == 10
|
||||
assert r['cltv_expiry_delta'] == 6
|
||||
@ -146,7 +146,7 @@ def test_invoice_routeboost(node_factory, bitcoind):
|
||||
assert 'warning_capacity' in inv
|
||||
assert 'warning_offline' not in inv
|
||||
|
||||
l1.stop()
|
||||
l1.rpc.disconnect(l2.info['id'], True)
|
||||
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
|
||||
|
||||
inv = l2.rpc.invoice(123456, label="inv3", description="?")
|
||||
@ -154,12 +154,33 @@ def test_invoice_routeboost(node_factory, bitcoind):
|
||||
assert 'warning_capacity' not in inv
|
||||
assert 'warning_offline' in inv
|
||||
|
||||
# Close l0, l2 will not use l1 at all.
|
||||
l0.rpc.close(l1.info['id'])
|
||||
l0.wait_for_channel_onchain(l1.info['id'])
|
||||
bitcoind.generate_block(100)
|
||||
|
||||
# l2 has to notice channel is gone.
|
||||
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)
|
||||
inv = l2.rpc.invoice(123456, label="inv4", description="?")
|
||||
# Check warning.
|
||||
assert 'warning_capacity' in inv
|
||||
assert 'warning_offline' not in inv
|
||||
|
||||
|
||||
def test_invoice_routeboost_private(node_factory, bitcoind):
|
||||
"""Test routeboost 'r' hint in bolt11 invoice for private channels
|
||||
"""
|
||||
l1, l2 = node_factory.line_graph(2, fundamount=10**6, announce_channels=False)
|
||||
|
||||
# Attach public channel to l1 so it doesn't look like a dead-end.
|
||||
l0 = node_factory.get_node()
|
||||
l0.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
||||
scid = l0.fund_channel(l1, 2 * (10**4))
|
||||
bitcoind.generate_block(5)
|
||||
|
||||
# Make sure channel is totally public.
|
||||
wait_for(lambda: [c['public'] for c in l2.rpc.listchannels(scid)['channels']] == [True, True])
|
||||
|
||||
# Since there's only one route, it will reluctantly hint that even
|
||||
# though it's private
|
||||
inv = l2.rpc.invoice(msatoshi=123456, label="inv0", description="?")
|
||||
@ -177,13 +198,13 @@ def test_invoice_routeboost_private(node_factory, bitcoind):
|
||||
# the exposure of private channels.
|
||||
l3 = node_factory.get_node()
|
||||
l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
scid = l3.fund_channel(l2, 2 * (10**4))
|
||||
scid = l3.fund_channel(l2, (10**4))
|
||||
bitcoind.generate_block(5)
|
||||
|
||||
# Make sure channel is totally public.
|
||||
wait_for(lambda: [c['public'] for c in l3.rpc.listchannels(scid)['channels']] == [True, True])
|
||||
|
||||
inv = l2.rpc.invoice(msatoshi=123456000, label="inv1", description="?")
|
||||
inv = l2.rpc.invoice(msatoshi=10**7, label="inv1", description="?")
|
||||
assert 'warning_capacity' in inv
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user