2018-11-21 04:10:03 +01:00
|
|
|
#include <bitcoin/chainparams.h>
|
2018-09-24 03:41:39 +02:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Welcome to the gossip daemon: keeper of maps!
|
|
|
|
*
|
|
|
|
* This is the last "global" daemon; it has three purposes.
|
|
|
|
*
|
|
|
|
* 1. To determine routes for payments when lightningd asks.
|
|
|
|
* 2. The second purpose is to receive gossip from peers (via their
|
|
|
|
* per-peer daemons) and send it out to them.
|
|
|
|
* 3. Talk to `connectd` to to answer address queries for nodes.
|
|
|
|
*
|
|
|
|
* The gossip protocol itself is fairly simple, but has some twists which
|
|
|
|
* add complexity to this daemon.
|
|
|
|
*/
|
2018-06-04 06:20:25 +02:00
|
|
|
#include <ccan/asort/asort.h>
|
2018-11-13 05:03:49 +01:00
|
|
|
#include <ccan/bitmap/bitmap.h>
|
2017-10-23 06:12:38 +02:00
|
|
|
#include <ccan/build_assert/build_assert.h>
|
2018-02-08 02:24:46 +01:00
|
|
|
#include <ccan/cast/cast.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/container_of/container_of.h>
|
|
|
|
#include <ccan/crypto/hkdf_sha256/hkdf_sha256.h>
|
2018-02-23 01:00:00 +01:00
|
|
|
#include <ccan/crypto/siphash24/siphash24.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/endian/endian.h>
|
|
|
|
#include <ccan/fdpass/fdpass.h>
|
|
|
|
#include <ccan/io/fdpass/fdpass.h>
|
|
|
|
#include <ccan/io/io.h>
|
|
|
|
#include <ccan/list/list.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <ccan/mem/mem.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/noerr/noerr.h>
|
2017-03-07 02:08:20 +01:00
|
|
|
#include <ccan/take/take.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <ccan/timer/timer.h>
|
2018-04-23 16:36:16 +02:00
|
|
|
#include <common/bech32.h>
|
|
|
|
#include <common/bech32_util.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/cryptomsg.h>
|
|
|
|
#include <common/daemon_conn.h>
|
2018-06-28 03:34:47 +02:00
|
|
|
#include <common/decode_short_channel_ids.h>
|
2018-03-13 16:42:55 +01:00
|
|
|
#include <common/features.h>
|
2018-11-21 23:39:31 +01:00
|
|
|
#include <common/memleak.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/ping.h>
|
2018-04-25 14:39:38 +02:00
|
|
|
#include <common/pseudorand.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/status.h>
|
2018-01-08 11:01:09 +01:00
|
|
|
#include <common/subdaemon.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/timeout.h>
|
2017-08-28 18:03:01 +02:00
|
|
|
#include <common/type_to_string.h>
|
2017-08-28 18:02:01 +02:00
|
|
|
#include <common/utils.h>
|
|
|
|
#include <common/version.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <common/wire_error.h>
|
2017-10-23 06:17:38 +02:00
|
|
|
#include <common/wireaddr.h>
|
2018-07-24 08:18:58 +02:00
|
|
|
#include <connectd/gen_connect_gossip_wire.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
2017-08-29 06:12:04 +02:00
|
|
|
#include <gossipd/broadcast.h>
|
2018-11-13 05:03:51 +01:00
|
|
|
#include <gossipd/gen_gossip_peerd_wire.h>
|
2017-08-29 06:12:04 +02:00
|
|
|
#include <gossipd/gen_gossip_wire.h>
|
|
|
|
#include <gossipd/routing.h>
|
2018-09-20 05:06:42 +02:00
|
|
|
#include <hsmd/gen_hsm_wire.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <inttypes.h>
|
2017-03-12 13:39:23 +01:00
|
|
|
#include <lightningd/gossip_msg.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <netdb.h>
|
|
|
|
#include <netinet/in.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <secp256k1_ecdh.h>
|
|
|
|
#include <sodium/randombytes.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
2018-05-07 06:29:21 +02:00
|
|
|
#include <sys/un.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <wire/gen_peer_wire.h>
|
|
|
|
#include <wire/wire_io.h>
|
2017-11-24 15:47:14 +01:00
|
|
|
#include <wire/wire_sync.h>
|
2018-06-04 06:28:02 +02:00
|
|
|
#include <zlib.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We talk to `hsmd` to sign our gossip messages with the node key */
|
2017-10-11 11:58:50 +02:00
|
|
|
#define HSM_FD 3
|
2018-11-21 01:36:08 +01:00
|
|
|
/* connectd asks us for help finding nodes, and gossip fds for new peers */
|
2018-07-24 08:18:58 +02:00
|
|
|
#define CONNECTD_FD 4
|
2017-10-11 11:58:50 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* In developer mode we provide hooks for whitebox testing */
|
2018-06-04 06:28:02 +02:00
|
|
|
#if DEVELOPER
|
2019-08-09 18:08:01 +02:00
|
|
|
static u32 max_encoding_bytes = -1U;
|
2018-07-26 23:27:37 +02:00
|
|
|
static bool suppress_gossip = false;
|
2018-06-04 06:28:02 +02:00
|
|
|
#endif
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The core daemon structure: */
|
2017-01-10 06:08:33 +01:00
|
|
|
struct daemon {
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Who am I? Helps us find ourself in the routing map. */
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Peers we are gossiping to: id is unique */
|
2017-01-10 06:08:33 +01:00
|
|
|
struct list_head peers;
|
2017-03-19 21:28:29 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Connection to lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
struct daemon_conn *master;
|
2017-02-01 15:49:01 +01:00
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
/* Connection to connect daemon. */
|
2018-10-25 01:43:05 +02:00
|
|
|
struct daemon_conn *connectd;
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2017-02-01 15:49:01 +01:00
|
|
|
/* Routing information */
|
|
|
|
struct routing_state *rstate;
|
2017-02-04 16:28:35 +01:00
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
/* chainhash for checking/making gossip msgs */
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Timers: we batch gossip, and also refresh announcements */
|
2017-02-04 16:28:35 +01:00
|
|
|
struct timers timers;
|
2017-04-24 14:31:26 +02:00
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
/* Global features to list in node_announcement. */
|
|
|
|
u8 *globalfeatures;
|
2017-11-24 15:03:22 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Alias (not NUL terminated) and favorite color for node_announcement */
|
2018-09-24 03:41:39 +02:00
|
|
|
u8 alias[32];
|
2017-11-24 15:03:22 +01:00
|
|
|
u8 rgb[3];
|
2018-05-07 06:29:22 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* What addresses we can actually announce. */
|
2018-05-07 06:29:22 +02:00
|
|
|
struct wireaddr *announcable;
|
2019-06-12 01:25:07 +02:00
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
/* Do we think we're missing gossip? Contains timer to re-check */
|
|
|
|
struct oneshot *gossip_missing;
|
2019-06-12 01:28:07 +02:00
|
|
|
|
|
|
|
/* Channels we've heard about, but don't know. */
|
|
|
|
struct short_channel_id *unknown_scids;
|
2017-12-06 07:15:06 +01:00
|
|
|
};
|
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
/*~ How gossipy do we ask a peer to be? */
|
|
|
|
enum gossip_level {
|
|
|
|
/* Give us everything since epoch */
|
|
|
|
GOSSIP_HIGH,
|
|
|
|
/* Give us everything from 24 hours ago. */
|
|
|
|
GOSSIP_MEDIUM,
|
|
|
|
/* Give us everything from now. */
|
|
|
|
GOSSIP_LOW,
|
|
|
|
/* Give us nothing. */
|
|
|
|
GOSSIP_NONE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* What are our targets for each gossip level? (including levels above).
|
|
|
|
*
|
|
|
|
* If we're missing gossip: 3 high.
|
|
|
|
* Otherwise, 2 medium, and 8 low. Rest no limit..
|
|
|
|
*/
|
|
|
|
static const size_t gossip_level_targets[] = { 3, 2, 8, SIZE_MAX };
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This represents each peer we're gossiping with */
|
2017-01-10 06:08:33 +01:00
|
|
|
struct peer {
|
|
|
|
/* daemon->peers */
|
|
|
|
struct list_node list;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* parent pointer. */
|
2018-08-24 07:20:06 +02:00
|
|
|
struct daemon *daemon;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The ID of the peer (always unique) */
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The two features gossip cares about (so far) */
|
2018-07-24 08:18:58 +02:00
|
|
|
bool gossip_queries_feature, initial_routing_sync_feature;
|
|
|
|
|
2019-06-12 01:26:07 +02:00
|
|
|
/* Are there outstanding responses for queries on short_channel_ids? */
|
2018-06-04 06:19:25 +02:00
|
|
|
const struct short_channel_id *scid_queries;
|
2019-08-09 18:07:01 +02:00
|
|
|
const bigsize_t *scid_query_flags;
|
2018-06-04 06:19:25 +02:00
|
|
|
size_t scid_query_idx;
|
|
|
|
|
2018-06-04 06:20:25 +02:00
|
|
|
/* Are there outstanding node_announcements from scid_queries? */
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id *scid_query_nodes;
|
2018-06-04 06:20:25 +02:00
|
|
|
size_t scid_query_nodes_idx;
|
|
|
|
|
2019-06-12 01:26:07 +02:00
|
|
|
/* Do we have an scid_query outstanding? Was it internal? */
|
|
|
|
bool scid_query_outstanding;
|
|
|
|
bool scid_query_was_internal;
|
2018-06-04 06:22:25 +02:00
|
|
|
|
2018-07-24 02:26:43 +02:00
|
|
|
/* How many pongs are we expecting? */
|
|
|
|
size_t num_pings_outstanding;
|
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
/* Map of outstanding channel_range requests. */
|
2018-11-13 05:03:49 +01:00
|
|
|
bitmap *query_channel_blocks;
|
|
|
|
/* What we're querying: [range_first_blocknum, range_end_blocknum) */
|
|
|
|
u32 range_first_blocknum, range_end_blocknum;
|
|
|
|
u32 range_blocks_remaining;
|
2018-06-04 06:28:02 +02:00
|
|
|
struct short_channel_id *query_channel_scids;
|
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
/* Are we asking this peer to give us lot of gossip? */
|
|
|
|
enum gossip_level gossip_level;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The daemon_conn used to queue messages to/from the peer. */
|
2018-10-25 01:43:05 +02:00
|
|
|
struct daemon_conn *dc;
|
2017-01-10 06:08:33 +01:00
|
|
|
};
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ A channel consists of a `struct half_chan` for each direction, each of
|
|
|
|
* which has a `flags` word from the `channel_update`; bit 1 is
|
|
|
|
* ROUTING_FLAGS_DISABLED in the `channel_update`. But we also keep a local
|
|
|
|
* whole-channel flag which indicates it's not available; we use this when a
|
|
|
|
* peer disconnects, and generate a `channel_update` to tell the world lazily
|
|
|
|
* when someone asks. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static void peer_disable_channels(struct daemon *daemon, struct node *node)
|
|
|
|
{
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If this peer had a channel with us, mark it disabled. */
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(node, &i); c; c = next_chan(node, &i)) {
|
2019-04-08 11:58:32 +02:00
|
|
|
if (node_id_eq(&other_node(node, c)->id, &daemon->id))
|
2019-04-11 07:15:22 +02:00
|
|
|
local_disable_chan(daemon->rstate, c);
|
2018-09-25 07:43:56 +02:00
|
|
|
}
|
|
|
|
}
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Destroy a peer, usually because the per-peer daemon has exited.
|
|
|
|
*
|
|
|
|
* Were you wondering why we call this "destroy_peer" and not "peer_destroy"?
|
|
|
|
* I thought not! But while CCAN modules are required to keep to their own
|
|
|
|
* prefix namespace, leading to unnatural word order, we couldn't stomach that
|
|
|
|
* for our own internal use. We use 'find_foo', 'destroy_foo' and 'new_foo'.
|
|
|
|
*/
|
2017-01-10 06:08:33 +01:00
|
|
|
static void destroy_peer(struct peer *peer)
|
|
|
|
{
|
2018-07-24 08:18:58 +02:00
|
|
|
struct node *node;
|
2018-04-25 14:39:38 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Remove it from the peers list */
|
2017-01-10 06:08:33 +01:00
|
|
|
list_del_from(&peer->daemon->peers, &peer->list);
|
2018-09-25 07:43:56 +02:00
|
|
|
|
|
|
|
/* If we have a channel with this peer, disable it. */
|
2018-07-24 08:18:58 +02:00
|
|
|
node = get_node(peer->daemon->rstate, &peer->id);
|
|
|
|
if (node)
|
|
|
|
peer_disable_channels(peer->daemon, node);
|
2018-10-24 03:41:31 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is tricky: our lifetime is tied to the daemon_conn; it's our
|
|
|
|
* parent, so we are freed if it is, but we need to free it if we're
|
|
|
|
* freed manually. tal_free() treats this as a noop if it's already
|
|
|
|
* being freed */
|
2018-10-25 01:43:05 +02:00
|
|
|
tal_free(peer->dc);
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Search for a peer. */
|
2019-04-08 11:58:32 +02:00
|
|
|
static struct peer *find_peer(struct daemon *daemon, const struct node_id *id)
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
list_for_each(&daemon->peers, peer, list)
|
2019-04-08 11:58:32 +02:00
|
|
|
if (node_id_eq(&peer->id, id))
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
return peer;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-06-03 20:12:25 +02:00
|
|
|
/* Queue a gossip message for the peer: the subdaemon on the other end simply
|
|
|
|
* forwards it to the peer. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void queue_peer_msg(struct peer *peer, const u8 *msg TAKES)
|
|
|
|
{
|
2019-06-03 20:12:25 +02:00
|
|
|
daemon_conn_send(peer->dc, msg);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2019-06-03 20:15:25 +02:00
|
|
|
/*~ We have a helper for messages from the store. */
|
2019-04-11 07:15:22 +02:00
|
|
|
static void queue_peer_from_store(struct peer *peer,
|
|
|
|
const struct broadcastable *bcast)
|
|
|
|
{
|
2019-06-03 20:22:25 +02:00
|
|
|
struct gossip_store *gs = peer->daemon->rstate->gs;
|
2019-06-03 20:15:25 +02:00
|
|
|
queue_peer_msg(peer, take(gossip_store_get(NULL, gs, bcast->index)));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* There are several messages which contain a long array of
|
|
|
|
* `short_channel_id`s (called `encoded_short_ids`) so we utilize a
|
|
|
|
* simple compression scheme: the first byte indicates the encoding, the
|
|
|
|
* rest contains the data.
|
|
|
|
*/
|
2019-08-09 18:08:01 +02:00
|
|
|
static u8 *encoding_start(const tal_t *ctx)
|
2018-06-04 06:22:25 +02:00
|
|
|
{
|
2019-08-09 18:08:01 +02:00
|
|
|
return tal_arr(ctx, u8, 0);
|
2018-06-04 06:22:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Marshal a single short_channel_id */
|
2019-08-09 18:08:01 +02:00
|
|
|
static void encoding_add_short_channel_id(u8 **encoded,
|
|
|
|
const struct short_channel_id *scid)
|
2018-06-04 06:22:25 +02:00
|
|
|
{
|
|
|
|
towire_short_channel_id(encoded, scid);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Greg Maxwell asked me privately about using zlib for communicating a set,
|
|
|
|
* and suggested that we'd be better off using Golomb-Rice coding a-la BIP
|
|
|
|
* 158. However, naively using Rice encoding isn't a win: we have to get
|
|
|
|
* more complex and use separate streams. The upside is that it's between
|
|
|
|
* 2 and 5 times smaller (assuming optimal Rice encoding + gzip). We can add
|
|
|
|
* that later. */
|
2019-08-09 18:08:01 +02:00
|
|
|
static u8 *zencode(const tal_t *ctx, const u8 *scids, size_t len)
|
2018-06-04 06:28:02 +02:00
|
|
|
{
|
|
|
|
u8 *z;
|
|
|
|
int err;
|
|
|
|
unsigned long compressed_len = len;
|
|
|
|
|
|
|
|
/* Prefer to fail if zlib makes it larger */
|
|
|
|
z = tal_arr(ctx, u8, len);
|
|
|
|
err = compress2(z, &compressed_len, scids, len, Z_BEST_COMPRESSION);
|
|
|
|
if (err == Z_OK) {
|
2019-08-09 18:08:01 +02:00
|
|
|
status_trace("compressed %zu into %lu",
|
2018-06-04 06:28:02 +02:00
|
|
|
len, compressed_len);
|
|
|
|
tal_resize(&z, compressed_len);
|
|
|
|
return z;
|
|
|
|
}
|
2019-08-09 18:08:01 +02:00
|
|
|
status_trace("compress %zu returned %i:"
|
2018-06-04 06:28:02 +02:00
|
|
|
" not compresssing", len, err);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
/* Try compressing *encoded: fails if result would be longer.
|
|
|
|
* @off is offset to place result in *encoded.
|
|
|
|
*/
|
|
|
|
static bool encoding_end_zlib(u8 **encoded, size_t off)
|
2018-06-04 06:22:25 +02:00
|
|
|
{
|
2018-06-04 06:28:02 +02:00
|
|
|
u8 *z;
|
2019-08-09 18:08:01 +02:00
|
|
|
size_t len = tal_count(*encoded);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
z = zencode(tmpctx, *encoded, len);
|
|
|
|
if (!z)
|
|
|
|
return false;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
/* Successful: copy over and trim */
|
|
|
|
tal_resize(encoded, off + tal_count(z));
|
|
|
|
memcpy(*encoded + off, z, tal_count(z));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void encoding_end_no_compress(u8 **encoded, size_t off)
|
|
|
|
{
|
|
|
|
size_t len = tal_count(*encoded);
|
|
|
|
|
|
|
|
tal_resize(encoded, off + len);
|
|
|
|
memmove(*encoded + off, *encoded, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Once we've assembled it, try compressing.
|
|
|
|
* Prepends encoding type to @encoding. */
|
|
|
|
static bool encoding_end_prepend_type(u8 **encoded, size_t max_bytes)
|
|
|
|
{
|
|
|
|
if (encoding_end_zlib(encoded, 1))
|
|
|
|
**encoded = SHORTIDS_ZLIB;
|
|
|
|
else {
|
|
|
|
encoding_end_no_compress(encoded, 1);
|
|
|
|
**encoded = SHORTIDS_UNCOMPRESSED;
|
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
#if DEVELOPER
|
2019-08-09 18:08:01 +02:00
|
|
|
if (tal_count(*encoded) > max_encoding_bytes)
|
2018-06-04 06:28:02 +02:00
|
|
|
return false;
|
|
|
|
#endif
|
2018-07-28 08:00:16 +02:00
|
|
|
return tal_count(*encoded) <= max_bytes;
|
2018-06-04 06:22:25 +02:00
|
|
|
}
|
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
/* Try compressing, leaving type external */
|
|
|
|
static UNNEEDED bool encoding_end_external_type(u8 **encoded, u8 *type, size_t max_bytes)
|
|
|
|
{
|
|
|
|
if (encoding_end_zlib(encoded, 0))
|
|
|
|
*type = SHORTIDS_ZLIB;
|
|
|
|
else {
|
|
|
|
encoding_end_no_compress(encoded, 0);
|
|
|
|
*type = SHORTIDS_UNCOMPRESSED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tal_count(*encoded) <= max_bytes;
|
|
|
|
}
|
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
/*~ We have different levels of gossipiness, depending on our needs. */
|
|
|
|
static u32 gossip_start(enum gossip_level gossip_level)
|
|
|
|
{
|
|
|
|
switch (gossip_level) {
|
|
|
|
case GOSSIP_HIGH:
|
|
|
|
return 0;
|
|
|
|
case GOSSIP_MEDIUM:
|
|
|
|
return time_now().ts.tv_sec - 24 * 3600;
|
|
|
|
case GOSSIP_LOW:
|
|
|
|
return time_now().ts.tv_sec;
|
|
|
|
case GOSSIP_NONE:
|
|
|
|
return UINT32_MAX;
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* A node:
|
2018-11-21 01:36:08 +01:00
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages unless explicitly requested.
|
|
|
|
*/
|
2018-06-04 06:26:25 +02:00
|
|
|
static void setup_gossip_range(struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Without the `gossip_queries` feature, gossip flows automatically. */
|
2019-06-12 01:28:40 +02:00
|
|
|
if (!peer->gossip_queries_feature) {
|
|
|
|
/* This peer is gossipy whether we want it or not! */
|
2018-06-04 06:26:25 +02:00
|
|
|
return;
|
2019-06-12 01:28:40 +02:00
|
|
|
}
|
2018-06-04 06:26:25 +02:00
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
status_trace("Setting peer %s to gossip level %s",
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
|
|
|
peer->gossip_level == GOSSIP_HIGH ? "HIGH"
|
|
|
|
: peer->gossip_level == GOSSIP_MEDIUM ? "MEDIUM"
|
|
|
|
: peer->gossip_level == GOSSIP_LOW ? "LOW"
|
|
|
|
: peer->gossip_level == GOSSIP_NONE ? "NONE"
|
|
|
|
: "INVALID");
|
|
|
|
/*~ We need to ask for something to start the gossip flowing. */
|
2018-06-04 06:26:25 +02:00
|
|
|
msg = towire_gossip_timestamp_filter(peer,
|
2018-11-21 04:10:03 +01:00
|
|
|
&peer->daemon->chain_hash,
|
2019-06-12 01:28:40 +02:00
|
|
|
gossip_start(peer->gossip_level),
|
|
|
|
UINT32_MAX);
|
2018-06-04 06:26:25 +02:00
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Create a node_announcement with the given signature. It may be NULL in the
|
|
|
|
* case we need to create a provisional announcement for the HSM to sign.
|
|
|
|
* This is called twice: once with the dummy signature to get it signed and a
|
|
|
|
* second time to build the full packet with the signature. The timestamp is
|
|
|
|
* handed in rather than using time_now() internally, since that could change
|
|
|
|
* between the dummy creation and the call with a signature. */
|
2017-11-24 15:47:14 +01:00
|
|
|
static u8 *create_node_announcement(const tal_t *ctx, struct daemon *daemon,
|
|
|
|
secp256k1_ecdsa_signature *sig,
|
|
|
|
u32 timestamp)
|
2017-03-11 14:45:54 +01:00
|
|
|
{
|
2018-08-24 07:20:06 +02:00
|
|
|
u8 *addresses = tal_arr(tmpctx, u8, 0);
|
2017-11-24 15:47:14 +01:00
|
|
|
u8 *announcement;
|
|
|
|
size_t i;
|
|
|
|
if (!sig) {
|
2018-08-24 07:20:06 +02:00
|
|
|
sig = tal(tmpctx, secp256k1_ecdsa_signature);
|
2017-11-24 15:47:14 +01:00
|
|
|
memset(sig, 0, sizeof(*sig));
|
|
|
|
}
|
2018-05-07 06:29:22 +02:00
|
|
|
for (i = 0; i < tal_count(daemon->announcable); i++)
|
|
|
|
towire_wireaddr(&addresses, &daemon->announcable[i]);
|
2017-11-24 15:47:14 +01:00
|
|
|
|
|
|
|
announcement =
|
2018-07-24 08:17:40 +02:00
|
|
|
towire_node_announcement(ctx, sig, daemon->globalfeatures, timestamp,
|
2019-04-08 11:58:44 +02:00
|
|
|
&daemon->id, daemon->rgb, daemon->alias,
|
2017-11-24 15:47:14 +01:00
|
|
|
addresses);
|
|
|
|
return announcement;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This routine created a `node_announcement` for our node, and hands it to
|
|
|
|
* the routing.c code like any other `node_announcement`. Such announcements
|
|
|
|
* are only accepted if there is an announced channel associated with that node
|
|
|
|
* (to prevent spam), so we only call this once we've announced a channel. */
|
2017-11-24 15:47:14 +01:00
|
|
|
static void send_node_announcement(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
u32 timestamp = time_now().ts.tv_sec;
|
|
|
|
secp256k1_ecdsa_signature sig;
|
2018-03-08 05:10:26 +01:00
|
|
|
u8 *msg, *nannounce, *err;
|
2018-10-16 05:44:53 +02:00
|
|
|
struct node *self = get_node(daemon->rstate, &daemon->id);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The origin node:
|
|
|
|
* - MUST set `timestamp` to be greater than that of any previous
|
|
|
|
* `node_announcement` it has previously created.
|
|
|
|
*/
|
2019-04-10 09:31:29 +02:00
|
|
|
if (self && self->bcast.index && timestamp <= self->bcast.timestamp)
|
|
|
|
timestamp = self->bcast.timestamp + 1;
|
2018-01-04 12:40:46 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Get an unsigned one. */
|
2018-01-04 12:40:46 +01:00
|
|
|
nannounce = create_node_announcement(tmpctx, daemon, NULL, timestamp);
|
2017-11-24 15:47:14 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Ask hsmd to sign it (synchronous) */
|
2018-03-15 07:10:22 +01:00
|
|
|
if (!wire_sync_write(HSM_FD, take(towire_hsm_node_announcement_sig_req(NULL, nannounce))))
|
2017-11-24 15:47:14 +01:00
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "Could not write to HSM: %s", strerror(errno));
|
|
|
|
|
|
|
|
msg = wire_sync_read(tmpctx, HSM_FD);
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_hsm_node_announcement_sig_reply(msg, &sig))
|
2017-11-24 15:47:14 +01:00
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "HSM returned an invalid node_announcement sig");
|
|
|
|
|
|
|
|
/* We got the signature for out provisional node_announcement back
|
|
|
|
* from the HSM, create the real announcement and forward it to
|
|
|
|
* gossipd so it can take care of forwarding it. */
|
2018-03-15 07:10:22 +01:00
|
|
|
nannounce = create_node_announcement(NULL, daemon, &sig, timestamp);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* This injects it into the routing code in routing.c; it should not
|
|
|
|
* reject it! */
|
2018-03-25 18:30:47 +02:00
|
|
|
err = handle_node_announcement(daemon->rstate, take(nannounce));
|
2018-03-08 05:10:26 +01:00
|
|
|
if (err)
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"rejected own node announcement: %s",
|
2018-03-15 05:30:38 +01:00
|
|
|
tal_hex(tmpctx, err));
|
2017-11-24 15:47:14 +01:00
|
|
|
}
|
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
/*~ We don't actually keep node_announcements in memory; we keep them in
|
|
|
|
* a file called `gossip_store`. If we need some node details, we reload
|
|
|
|
* and reparse. It's slow, but generally rare. */
|
|
|
|
static bool get_node_announcement(const tal_t *ctx,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const struct node *n,
|
|
|
|
u8 rgb_color[3],
|
|
|
|
u8 alias[32],
|
|
|
|
u8 **features,
|
|
|
|
struct wireaddr **wireaddrs)
|
2018-09-24 03:42:00 +02:00
|
|
|
{
|
2019-04-11 07:15:22 +02:00
|
|
|
const u8 *msg;
|
|
|
|
struct node_id id;
|
2019-04-11 07:15:21 +02:00
|
|
|
secp256k1_ecdsa_signature signature;
|
|
|
|
u32 timestamp;
|
2019-04-11 07:15:22 +02:00
|
|
|
u8 *addresses;
|
2018-09-24 03:42:00 +02:00
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!n->bcast.index)
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
2019-06-03 20:22:25 +02:00
|
|
|
msg = gossip_store_get(tmpctx, daemon->rstate->gs, n->bcast.index);
|
2019-04-11 07:15:22 +02:00
|
|
|
|
2019-04-11 07:15:21 +02:00
|
|
|
/* Note: validity of node_id is already checked. */
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!fromwire_node_announcement(ctx, msg,
|
|
|
|
&signature, features,
|
2019-04-11 07:15:22 +02:00
|
|
|
×tamp,
|
2019-04-11 07:15:22 +02:00
|
|
|
&id, rgb_color, alias,
|
2019-04-11 07:15:21 +02:00
|
|
|
&addresses)) {
|
2019-04-11 07:15:22 +02:00
|
|
|
status_broken("Bad local node_announcement @%u: %s",
|
|
|
|
n->bcast.index, tal_hex(tmpctx, msg));
|
2019-04-11 07:15:21 +02:00
|
|
|
return false;
|
|
|
|
}
|
2019-06-20 04:55:52 +02:00
|
|
|
|
|
|
|
if (!node_id_eq(&id, &n->id) || timestamp != n->bcast.timestamp) {
|
|
|
|
status_broken("Wrong node_announcement @%u:"
|
|
|
|
" expected %s timestamp %u "
|
|
|
|
" got %s timestamp %u",
|
|
|
|
n->bcast.index,
|
|
|
|
type_to_string(tmpctx, struct node_id, &n->id),
|
|
|
|
timestamp,
|
|
|
|
type_to_string(tmpctx, struct node_id, &id),
|
|
|
|
n->bcast.timestamp);
|
|
|
|
return false;
|
|
|
|
}
|
2019-04-11 07:15:22 +02:00
|
|
|
|
|
|
|
*wireaddrs = read_addresses(ctx, addresses);
|
|
|
|
tal_free(addresses);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Version which also does nodeid lookup */
|
|
|
|
static bool get_node_announcement_by_id(const tal_t *ctx,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const struct node_id *node_id,
|
|
|
|
u8 rgb_color[3],
|
|
|
|
u8 alias[32],
|
|
|
|
u8 **features,
|
|
|
|
struct wireaddr **wireaddrs)
|
|
|
|
{
|
|
|
|
struct node *n = get_node(daemon->rstate, node_id);
|
|
|
|
if (!n)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return get_node_announcement(ctx, daemon, n, rgb_color, alias,
|
|
|
|
features, wireaddrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Return true if the only change would be the timestamp. */
|
|
|
|
static bool node_announcement_redundant(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
u8 rgb_color[3];
|
|
|
|
u8 alias[32];
|
|
|
|
u8 *features;
|
|
|
|
struct wireaddr *wireaddrs;
|
|
|
|
|
|
|
|
if (!get_node_announcement_by_id(tmpctx, daemon, &daemon->id,
|
|
|
|
rgb_color, alias, &features,
|
|
|
|
&wireaddrs))
|
|
|
|
return false;
|
2019-04-11 07:15:21 +02:00
|
|
|
|
|
|
|
if (tal_count(wireaddrs) != tal_count(daemon->announcable))
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
2019-04-11 07:15:21 +02:00
|
|
|
for (size_t i = 0; i < tal_count(wireaddrs); i++)
|
|
|
|
if (!wireaddr_eq(&wireaddrs[i], &daemon->announcable[i]))
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
2019-04-11 07:15:21 +02:00
|
|
|
BUILD_ASSERT(ARRAY_SIZE(daemon->alias) == ARRAY_SIZE(alias));
|
2018-09-24 03:42:00 +02:00
|
|
|
if (!memeq(daemon->alias, ARRAY_SIZE(daemon->alias),
|
2019-04-11 07:15:21 +02:00
|
|
|
alias, ARRAY_SIZE(alias)))
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
2019-04-11 07:15:21 +02:00
|
|
|
BUILD_ASSERT(ARRAY_SIZE(daemon->rgb) == ARRAY_SIZE(rgb_color));
|
2018-09-24 03:42:00 +02:00
|
|
|
if (!memeq(daemon->rgb, ARRAY_SIZE(daemon->rgb),
|
2019-04-11 07:15:21 +02:00
|
|
|
rgb_color, ARRAY_SIZE(rgb_color)))
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!memeq(daemon->globalfeatures, tal_count(daemon->globalfeatures),
|
2019-04-11 07:15:21 +02:00
|
|
|
features, tal_count(features)))
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Should we announce our own node? Called at strategic places. */
|
2018-06-04 06:38:39 +02:00
|
|
|
static void maybe_send_own_node_announce(struct daemon *daemon)
|
2018-06-04 06:15:25 +02:00
|
|
|
{
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep an internal flag in the routing code to say we've announced
|
|
|
|
* a local channel. The alternative would be to have it make a
|
|
|
|
* callback, but when we start up we don't want to make multiple
|
|
|
|
* announcments, so we use this approach for now. */
|
2018-06-04 06:15:25 +02:00
|
|
|
if (!daemon->rstate->local_channel_announced)
|
|
|
|
return;
|
|
|
|
|
2018-09-24 03:42:00 +02:00
|
|
|
if (node_announcement_redundant(daemon))
|
|
|
|
return;
|
|
|
|
|
2018-06-04 06:15:25 +02:00
|
|
|
send_node_announcement(daemon);
|
|
|
|
daemon->rstate->local_channel_announced = false;
|
|
|
|
}
|
|
|
|
|
2019-06-12 01:26:07 +02:00
|
|
|
/* Query this peer for these short-channel-ids. */
|
|
|
|
static bool query_short_channel_ids(struct daemon *daemon,
|
|
|
|
struct peer *peer,
|
|
|
|
const struct short_channel_id *scids,
|
|
|
|
bool internal)
|
|
|
|
{
|
|
|
|
u8 *encoded, *msg;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
|
|
|
|
* 2. data:
|
2019-07-16 01:20:37 +02:00
|
|
|
* * [`chain_hash`:`chain_hash`]
|
|
|
|
* * [`u16`:`len`]
|
|
|
|
* * [`len*byte`:`encoded_short_ids`]
|
2019-06-12 01:26:07 +02:00
|
|
|
*/
|
|
|
|
const size_t reply_overhead = 32 + 2;
|
|
|
|
const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
|
|
|
|
|
|
|
|
/* Can't query if they don't have gossip_queries_feature */
|
|
|
|
if (!peer->gossip_queries_feature)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The sender:
|
|
|
|
* - MUST NOT send `query_short_channel_ids` if it has sent a previous
|
|
|
|
* `query_short_channel_ids` to this peer and not received
|
|
|
|
* `reply_short_channel_ids_end`.
|
|
|
|
*/
|
|
|
|
if (peer->scid_query_outstanding)
|
|
|
|
return false;
|
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
encoded = encoding_start(tmpctx);
|
2019-06-12 01:26:07 +02:00
|
|
|
for (size_t i = 0; i < tal_count(scids); i++)
|
2019-08-09 18:08:01 +02:00
|
|
|
encoding_add_short_channel_id(&encoded, &scids[i]);
|
2019-06-12 01:26:07 +02:00
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
if (!encoding_end_prepend_type(&encoded, max_encoded_bytes)) {
|
2019-06-12 01:26:07 +02:00
|
|
|
status_broken("query_short_channel_ids: %zu is too many",
|
|
|
|
tal_count(scids));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-08-09 18:05:01 +02:00
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
|
|
|
|
encoded, NULL);
|
|
|
|
#else
|
2019-06-12 01:26:07 +02:00
|
|
|
msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
|
|
|
|
encoded);
|
2019-08-09 18:05:01 +02:00
|
|
|
#endif
|
2019-06-12 01:26:07 +02:00
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
peer->scid_query_outstanding = true;
|
|
|
|
peer->scid_query_was_internal = internal;
|
|
|
|
|
|
|
|
status_trace("%s: sending query for %zu scids",
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
|
|
|
tal_count(scids));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-12 01:28:07 +02:00
|
|
|
/*~ This peer told us about an update to an unknown channel. Ask it for
|
|
|
|
* a channel_announcement. */
|
|
|
|
static void query_unknown_channel(struct daemon *daemon,
|
|
|
|
struct peer *peer,
|
|
|
|
const struct short_channel_id *id)
|
|
|
|
{
|
|
|
|
/* Don't go overboard if we're already asking for a lot. */
|
|
|
|
if (tal_count(daemon->unknown_scids) > 1000)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Check we're not already getting this one. */
|
|
|
|
for (size_t i = 0; i < tal_count(daemon->unknown_scids); i++)
|
|
|
|
if (short_channel_id_eq(&daemon->unknown_scids[i], id))
|
|
|
|
return;
|
|
|
|
|
|
|
|
tal_arr_expand(&daemon->unknown_scids, *id);
|
|
|
|
|
|
|
|
/* This is best effort: if peer is busy, we'll try next time. */
|
|
|
|
query_short_channel_ids(daemon, peer, daemon->unknown_scids, true);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~Routines to handle gossip messages from peer, forwarded by subdaemons.
|
|
|
|
*-----------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* It's not the subdaemon's fault if they're malformed or invalid; so these
|
|
|
|
* all return an error packet which gets sent back to the subdaemon in that
|
|
|
|
* case.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* The routing code checks that it's basically valid, returning an
|
|
|
|
* error message for the peer or NULL. NULL means it's OK, but the
|
|
|
|
* message might be redundant, in which case scid is also NULL.
|
|
|
|
* Otherwise `scid` gives us the short_channel_id claimed by the
|
|
|
|
* message, and puts the announcemnt on an internal 'pending'
|
|
|
|
* queue. We'll send a request to lightningd to look it up, and continue
|
|
|
|
* processing in `handle_txout_reply`. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_channel_announcement_msg(struct peer *peer,
|
|
|
|
const u8 *msg)
|
2017-11-24 15:47:14 +01:00
|
|
|
{
|
2018-11-05 02:21:51 +01:00
|
|
|
const struct short_channel_id *scid;
|
|
|
|
const u8 *err;
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2019-06-12 01:28:07 +02:00
|
|
|
/* If it's OK, tells us the short_channel_id to lookup; it notes
|
|
|
|
* if this is the unknown channel the peer was looking for (in
|
|
|
|
* which case, it frees and NULLs that ptr) */
|
2018-11-05 02:21:51 +01:00
|
|
|
err = handle_channel_announcement(peer->daemon->rstate, msg, &scid);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
else if (scid)
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_gossip_get_txout(NULL, scid)));
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_channel_update_msg(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
2019-06-12 01:28:07 +02:00
|
|
|
struct short_channel_id unknown_scid;
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Hand the channel_update to the routing code */
|
2019-06-12 01:28:07 +02:00
|
|
|
u8 *err;
|
|
|
|
|
|
|
|
unknown_scid.u64 = 0;
|
|
|
|
err = handle_channel_update(peer->daemon->rstate, msg, "subdaemon",
|
|
|
|
&unknown_scid);
|
|
|
|
if (err) {
|
|
|
|
if (unknown_scid.u64 != 0)
|
|
|
|
query_unknown_channel(peer->daemon, peer, &unknown_scid);
|
2018-11-05 02:21:51 +01:00
|
|
|
return err;
|
2019-06-12 01:28:07 +02:00
|
|
|
}
|
2018-03-18 14:57:15 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ As a nasty compromise in the spec, we only forward channel_announce
|
|
|
|
* once we have a channel_update; the channel isn't *usable* for
|
|
|
|
* routing until you have both anyway. For this reason, we might have
|
|
|
|
* just sent out our own channel_announce, so we check if it's time to
|
|
|
|
* send a node_announcement too. */
|
2018-11-05 02:21:51 +01:00
|
|
|
maybe_send_own_node_announce(peer->daemon);
|
2018-03-18 14:57:15 +01:00
|
|
|
return NULL;
|
2017-03-11 14:45:54 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The peer can ask about an array of short channel ids: we don't assemble the
|
|
|
|
* reply immediately but process them one at a time in dump_gossip which is
|
|
|
|
* called when there's nothing more important to send. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_query_short_channel_ids(struct peer *peer, const u8 *msg)
|
2018-06-04 06:21:25 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain;
|
|
|
|
u8 *encoded;
|
|
|
|
struct short_channel_id *scids;
|
2019-08-09 18:07:01 +02:00
|
|
|
bigsize_t *flags;
|
|
|
|
|
2019-08-09 18:05:01 +02:00
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
struct tlv_query_short_channel_ids_tlvs *tlvs
|
|
|
|
= tlv_query_short_channel_ids_tlvs_new(tmpctx);
|
2018-06-04 06:21:25 +02:00
|
|
|
|
2019-08-09 18:05:01 +02:00
|
|
|
if (!fromwire_query_short_channel_ids(tmpctx, msg, &chain, &encoded,
|
|
|
|
tlvs)) {
|
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids w/tlvs %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
}
|
2019-08-09 18:07:01 +02:00
|
|
|
if (tlvs->query_flags) {
|
|
|
|
flags = decode_scid_query_flags(tmpctx, tlvs->query_flags);
|
|
|
|
if (!flags) {
|
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids query_flags %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
flags = NULL;
|
2019-08-09 18:05:01 +02:00
|
|
|
#else
|
2018-06-04 06:21:25 +02:00
|
|
|
if (!fromwire_query_short_channel_ids(tmpctx, msg, &chain, &encoded)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
2019-08-09 18:07:01 +02:00
|
|
|
flags = NULL;
|
2019-08-09 18:05:01 +02:00
|
|
|
#endif
|
2018-06-04 06:21:25 +02:00
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
|
2018-06-04 06:21:25 +02:00
|
|
|
status_trace("%s sent query_short_channel_ids chainhash %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-06-04 06:21:25 +02:00
|
|
|
type_to_string(tmpctx, struct bitcoin_blkid, &chain));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if it has not sent `reply_short_channel_ids_end` to a
|
|
|
|
* previously received `query_short_channel_ids` from this
|
|
|
|
* sender:
|
|
|
|
* - MAY fail the connection.
|
|
|
|
*/
|
|
|
|
if (peer->scid_queries || peer->scid_query_nodes) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad concurrent query_short_channel_ids");
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
scids = decode_short_ids(tmpctx, encoded);
|
|
|
|
if (!scids) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids encoding %s",
|
|
|
|
tal_hex(tmpctx, encoded));
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
2019-08-09 18:07:01 +02:00
|
|
|
/* BOLT-61a1365a45cc8b463ddbbe3429d350f8eac787dd #7:
|
|
|
|
*
|
|
|
|
* The receiver:
|
|
|
|
*...
|
|
|
|
* - if the incoming message includes `query_short_channel_ids_tlvs`:
|
|
|
|
* - if `encoded_query_flags` does not decode to exactly one flag per
|
|
|
|
* `short_channel_id`:
|
|
|
|
* - MAY fail the connection.
|
|
|
|
*/
|
|
|
|
if (!flags) {
|
|
|
|
/* Pretend they asked for everything. */
|
|
|
|
flags = tal_arr(tmpctx, bigsize_t, tal_count(scids));
|
|
|
|
memset(flags, 0xFF, tal_bytelen(flags));
|
|
|
|
} else {
|
|
|
|
if (tal_count(flags) != tal_count(scids)) {
|
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids flags count %zu scids %zu",
|
|
|
|
tal_count(flags), tal_count(scids));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-04 06:21:25 +02:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* - MUST respond to each known `short_channel_id` with a `channel_announcement`
|
|
|
|
* and the latest `channel_update` for each end
|
2018-06-17 12:13:44 +02:00
|
|
|
* - SHOULD NOT wait for the next outgoing gossip flush to send
|
2018-06-04 06:21:25 +02:00
|
|
|
* these.
|
|
|
|
*/
|
|
|
|
peer->scid_queries = tal_steal(peer, scids);
|
2019-08-09 18:07:01 +02:00
|
|
|
peer->scid_query_flags = tal_steal(peer, flags);
|
2018-06-04 06:21:25 +02:00
|
|
|
peer->scid_query_idx = 0;
|
2019-04-08 11:58:32 +02:00
|
|
|
peer->scid_query_nodes = tal_arr(peer, struct node_id, 0);
|
2018-06-04 06:21:25 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Notify the daemon_conn-write loop to invoke create_next_scid_reply */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_wake(peer->dc);
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
2019-04-11 07:16:57 +02:00
|
|
|
/*~ When we compact the gossip store, all the broadcast indexs move.
|
|
|
|
* We simply offset everyone, which means in theory they could retransmit
|
|
|
|
* some, but that's a lesser evil than skipping some. */
|
|
|
|
void update_peers_broadcast_index(struct list_head *peers, u32 offset)
|
|
|
|
{
|
2019-05-04 07:53:13 +02:00
|
|
|
struct peer *peer, *next;
|
2019-04-11 07:16:57 +02:00
|
|
|
|
2019-05-04 07:53:13 +02:00
|
|
|
list_for_each_safe(peers, peer, next, list) {
|
|
|
|
int gs_fd;
|
|
|
|
/*~ Since store has been compacted, they need a new fd for the
|
2019-06-03 20:15:25 +02:00
|
|
|
* new store. We also tell them how much this is shrunk, so
|
|
|
|
* they can (approximately) tell where to start in the new store.
|
|
|
|
*/
|
2019-06-03 20:22:25 +02:00
|
|
|
gs_fd = gossip_store_readonly_fd(peer->daemon->rstate->gs);
|
2019-05-04 07:53:13 +02:00
|
|
|
if (gs_fd < 0) {
|
|
|
|
status_broken("Can't get read-only gossip store fd:"
|
|
|
|
" killing peer");
|
|
|
|
tal_free(peer);
|
|
|
|
} else {
|
2019-06-03 20:15:25 +02:00
|
|
|
u8 *msg = towire_gossipd_new_store_fd(NULL, offset);
|
2019-05-04 07:53:13 +02:00
|
|
|
daemon_conn_send(peer->dc, take(msg));
|
|
|
|
daemon_conn_send_fd(peer->dc, gs_fd);
|
|
|
|
}
|
2019-04-11 07:16:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We can send multiple replies when the peer queries for all channels in
|
|
|
|
* a given range of blocks; each one indicates the range of blocks it covers. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static void reply_channel_range(struct peer *peer,
|
|
|
|
u32 first_blocknum, u32 number_of_blocks,
|
|
|
|
const u8 *encoded)
|
|
|
|
{
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - For each `reply_channel_range`:
|
|
|
|
* - MUST set with `chain_hash` equal to that of `query_channel_range`,
|
|
|
|
* - MUST encode a `short_channel_id` for every open channel it
|
|
|
|
* knows in blocks `first_blocknum` to `first_blocknum` plus
|
|
|
|
* `number_of_blocks` minus one.
|
|
|
|
* - MUST limit `number_of_blocks` to the maximum number of blocks
|
|
|
|
* whose results could fit in `encoded_short_ids`
|
|
|
|
* - if does not maintain up-to-date channel information for
|
|
|
|
* `chain_hash`:
|
|
|
|
* - MUST set `complete` to 0.
|
|
|
|
* - otherwise:
|
|
|
|
* - SHOULD set `complete` to 1.
|
|
|
|
*/
|
2019-08-09 18:05:01 +02:00
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
u8 *msg = towire_reply_channel_range(NULL,
|
|
|
|
&peer->daemon->chain_hash,
|
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks,
|
|
|
|
1, encoded, NULL);
|
|
|
|
#else
|
2018-06-04 06:28:02 +02:00
|
|
|
u8 *msg = towire_reply_channel_range(NULL,
|
2018-11-21 04:10:03 +01:00
|
|
|
&peer->daemon->chain_hash,
|
2018-06-04 06:28:02 +02:00
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks,
|
|
|
|
1, encoded);
|
2019-08-09 18:05:01 +02:00
|
|
|
#endif
|
2018-06-04 06:28:02 +02:00
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When we need to send an array of channels, it might go over our 64k packet
|
|
|
|
* size. If it doesn't, we recurse, splitting in two, etc. Each message
|
|
|
|
* indicates what blocks it contains, so the recipient knows when we're
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
* finished.
|
|
|
|
*
|
|
|
|
* tail_blocks is the empty blocks at the end, in case they asked for all
|
|
|
|
* blocks to 4 billion.
|
|
|
|
*/
|
2019-01-21 01:57:43 +01:00
|
|
|
static bool queue_channel_ranges(struct peer *peer,
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
u32 first_blocknum, u32 number_of_blocks,
|
|
|
|
u32 tail_blocks)
|
2018-06-04 06:28:02 +02:00
|
|
|
{
|
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
2019-08-09 18:08:01 +02:00
|
|
|
u8 *encoded_scids = encoding_start(tmpctx);
|
2018-06-04 06:28:02 +02:00
|
|
|
struct short_channel_id scid;
|
2019-01-21 01:57:43 +01:00
|
|
|
bool scid_ok;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* 1. type: 264 (`reply_channel_range`) (`gossip_queries`)
|
|
|
|
* 2. data:
|
2019-07-16 01:20:37 +02:00
|
|
|
* * [`chain_hash`:`chain_hash`]
|
|
|
|
* * [`u32`:`first_blocknum`]
|
|
|
|
* * [`u32`:`number_of_blocks`]
|
|
|
|
* * [`byte`:`complete`]
|
|
|
|
* * [`u16`:`len`]
|
|
|
|
* * [`len*byte`:`encoded_short_ids`]
|
2018-06-04 06:28:02 +02:00
|
|
|
*/
|
|
|
|
const size_t reply_overhead = 32 + 4 + 4 + 1 + 2;
|
|
|
|
const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
|
|
|
|
|
|
|
|
/* Avoid underflow: we don't use block 0 anyway */
|
|
|
|
if (first_blocknum == 0)
|
2019-01-21 01:57:43 +01:00
|
|
|
scid_ok = mk_short_channel_id(&scid, 1, 0, 0);
|
2018-06-04 06:28:02 +02:00
|
|
|
else
|
2019-01-21 01:57:43 +01:00
|
|
|
scid_ok = mk_short_channel_id(&scid, first_blocknum, 0, 0);
|
2018-06-04 06:28:02 +02:00
|
|
|
scid.u64--;
|
2019-01-21 01:57:43 +01:00
|
|
|
if (!scid_ok)
|
|
|
|
return false;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep a `uintmap` of `short_channel_id` to `struct chan *`.
|
|
|
|
* Unlike a htable, it's efficient to iterate through, but it only
|
|
|
|
* works because each short_channel_id is basically a 64-bit unsigned
|
|
|
|
* integer.
|
|
|
|
*
|
|
|
|
* First we iteraate and gather all the short channel ids. */
|
2018-06-04 06:28:02 +02:00
|
|
|
while (uintmap_after(&rstate->chanmap, &scid.u64)) {
|
|
|
|
u32 blocknum = short_channel_id_blocknum(&scid);
|
|
|
|
if (blocknum >= first_blocknum + number_of_blocks)
|
|
|
|
break;
|
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
encoding_add_short_channel_id(&encoded_scids, &scid);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If we can encode that, fine: send it */
|
2019-08-09 18:08:01 +02:00
|
|
|
if (encoding_end_prepend_type(&encoded_scids, max_encoded_bytes)) {
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
reply_channel_range(peer, first_blocknum,
|
|
|
|
number_of_blocks + tail_blocks,
|
2019-08-09 18:08:01 +02:00
|
|
|
encoded_scids);
|
2019-01-21 01:57:43 +01:00
|
|
|
return true;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* It wouldn't all fit: divide in half */
|
|
|
|
/* We assume we can always send one block! */
|
|
|
|
if (number_of_blocks <= 1) {
|
|
|
|
/* We always assume we can send 1 blocks worth */
|
|
|
|
status_broken("Could not fit scids for single block %u",
|
|
|
|
first_blocknum);
|
2019-01-21 01:57:43 +01:00
|
|
|
return false;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
status_debug("queue_channel_ranges full: splitting %u+%u and %u+%u(+%u)",
|
2018-06-04 06:28:02 +02:00
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks / 2,
|
|
|
|
first_blocknum + number_of_blocks / 2,
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
number_of_blocks - number_of_blocks / 2,
|
|
|
|
tail_blocks);
|
2019-01-21 01:57:43 +01:00
|
|
|
return queue_channel_ranges(peer, first_blocknum, number_of_blocks / 2, 0)
|
|
|
|
&& queue_channel_ranges(peer, first_blocknum + number_of_blocks / 2,
|
|
|
|
number_of_blocks - number_of_blocks / 2,
|
|
|
|
tail_blocks);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The peer can ask for all channels is a series of blocks. We reply with one
|
|
|
|
* or more messages containing the short_channel_ids. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
|
2018-06-04 06:28:02 +02:00
|
|
|
{
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
2018-06-04 06:28:02 +02:00
|
|
|
struct bitcoin_blkid chain_hash;
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
u32 first_blocknum, number_of_blocks, tail_blocks;
|
|
|
|
struct short_channel_id last_scid;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2019-08-09 18:05:01 +02:00
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
struct tlv_query_channel_range_tlvs *tlvs
|
|
|
|
= tlv_query_channel_range_tlvs_new(msg);
|
|
|
|
|
|
|
|
if (!fromwire_query_channel_range(msg, &chain_hash,
|
|
|
|
&first_blocknum, &number_of_blocks,
|
|
|
|
tlvs)) {
|
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_channel_range w/tlvs %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
}
|
|
|
|
#else
|
2018-06-04 06:28:02 +02:00
|
|
|
if (!fromwire_query_channel_range(msg, &chain_hash,
|
|
|
|
&first_blocknum, &number_of_blocks)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_channel_range %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2019-08-09 18:05:01 +02:00
|
|
|
#endif
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: if they ask for the wrong chain, we should not ignore it,
|
|
|
|
* but give an empty response with the `complete` flag unset? */
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain_hash)) {
|
2018-06-04 06:28:02 +02:00
|
|
|
status_trace("%s sent query_channel_range chainhash %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-06-04 06:28:02 +02:00
|
|
|
type_to_string(tmpctx, struct bitcoin_blkid,
|
|
|
|
&chain_hash));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
/* If they ask for number_of_blocks UINTMAX, and we have to divide
|
|
|
|
* and conquer, we'll do a lot of unnecessary work. Cap it at the
|
|
|
|
* last value we have, then send an empty reply. */
|
|
|
|
if (uintmap_last(&rstate->chanmap, &last_scid.u64)) {
|
|
|
|
u32 last_block = short_channel_id_blocknum(&last_scid);
|
|
|
|
|
|
|
|
/* u64 here avoids overflow on number_of_blocks
|
|
|
|
UINTMAX for example */
|
|
|
|
if ((u64)first_blocknum + number_of_blocks > last_block) {
|
|
|
|
tail_blocks = first_blocknum + number_of_blocks
|
|
|
|
- last_block - 1;
|
|
|
|
number_of_blocks -= tail_blocks;
|
|
|
|
} else
|
|
|
|
tail_blocks = 0;
|
|
|
|
} else
|
|
|
|
tail_blocks = 0;
|
|
|
|
|
2019-01-21 01:57:43 +01:00
|
|
|
if (!queue_channel_ranges(peer, first_blocknum, number_of_blocks,
|
|
|
|
tail_blocks))
|
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Invalid query_channel_range %u+%u",
|
|
|
|
first_blocknum, number_of_blocks + tail_blocks);
|
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is the reply we get when we send query_channel_range; we keep
|
|
|
|
* expecting them until the entire range we asked for is covered. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_reply_channel_range(struct peer *peer, const u8 *msg)
|
2018-06-04 06:28:02 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain;
|
|
|
|
u8 complete;
|
2018-11-13 05:03:49 +01:00
|
|
|
u32 first_blocknum, number_of_blocks, start, end;
|
|
|
|
u8 *encoded;
|
2018-06-04 06:28:02 +02:00
|
|
|
struct short_channel_id *scids;
|
|
|
|
size_t n;
|
2018-11-13 05:03:49 +01:00
|
|
|
unsigned long b;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2019-08-09 18:05:01 +02:00
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
struct tlv_reply_channel_range_tlvs *tlvs
|
|
|
|
= tlv_reply_channel_range_tlvs_new(tmpctx);
|
|
|
|
if (!fromwire_reply_channel_range(tmpctx, msg, &chain, &first_blocknum,
|
|
|
|
&number_of_blocks, &complete,
|
|
|
|
&encoded, tlvs)) {
|
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_channel_range w/tlvs %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
}
|
|
|
|
#else
|
2018-06-04 06:28:02 +02:00
|
|
|
if (!fromwire_reply_channel_range(tmpctx, msg, &chain, &first_blocknum,
|
|
|
|
&number_of_blocks, &complete,
|
|
|
|
&encoded)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_channel_range %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2019-08-09 18:05:01 +02:00
|
|
|
#endif
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_channel_range for bad chain: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!peer->query_channel_blocks) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_channel_range without query: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Beware overflow! */
|
2018-06-04 06:28:02 +02:00
|
|
|
if (first_blocknum + number_of_blocks < first_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_channel_range invalid %u+%u",
|
|
|
|
first_blocknum, number_of_blocks);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
scids = decode_short_ids(tmpctx, encoded);
|
|
|
|
if (!scids) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_channel_range encoding %s",
|
|
|
|
tal_hex(tmpctx, encoded));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-13 05:03:49 +01:00
|
|
|
status_debug("peer %s reply_channel_range %u+%u (of %u+%u) %zu scids",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-11-13 05:03:49 +01:00
|
|
|
first_blocknum, number_of_blocks,
|
|
|
|
peer->range_first_blocknum,
|
|
|
|
peer->range_end_blocknum - peer->range_first_blocknum,
|
|
|
|
tal_count(scids));
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The receiver of `query_channel_range`:
|
|
|
|
*...
|
|
|
|
* - MUST respond with one or more `reply_channel_range` whose
|
|
|
|
* combined range cover the requested `first_blocknum` to
|
|
|
|
* `first_blocknum` plus `number_of_blocks` minus one.
|
|
|
|
*/
|
|
|
|
/* ie. They can be outside range we asked, but they must overlap! */
|
2018-11-13 05:03:49 +01:00
|
|
|
if (first_blocknum + number_of_blocks <= peer->range_first_blocknum
|
|
|
|
|| first_blocknum >= peer->range_end_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
2018-11-13 05:03:49 +01:00
|
|
|
"reply_channel_range invalid %u+%u for query %u+%u",
|
2018-11-05 02:21:51 +01:00
|
|
|
first_blocknum, number_of_blocks,
|
2018-11-13 05:03:49 +01:00
|
|
|
peer->range_first_blocknum,
|
|
|
|
peer->range_end_blocknum
|
|
|
|
- peer->range_first_blocknum);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-13 05:03:49 +01:00
|
|
|
start = first_blocknum;
|
|
|
|
end = first_blocknum + number_of_blocks;
|
|
|
|
/* Trim to make it a subset of what we want. */
|
|
|
|
if (start < peer->range_first_blocknum)
|
|
|
|
start = peer->range_first_blocknum;
|
|
|
|
if (end > peer->range_end_blocknum)
|
|
|
|
end = peer->range_end_blocknum;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep a bitmap of what blocks have been covered by replies: bit 0
|
|
|
|
* represents block peer->range_first_blocknum */
|
2018-11-13 05:03:49 +01:00
|
|
|
b = bitmap_ffs(peer->query_channel_blocks,
|
|
|
|
start - peer->range_first_blocknum,
|
|
|
|
end - peer->range_first_blocknum);
|
|
|
|
if (b != end - peer->range_first_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
2018-11-13 05:03:49 +01:00
|
|
|
"reply_channel_range %u+%u already have block %lu",
|
|
|
|
first_blocknum, number_of_blocks,
|
|
|
|
peer->range_first_blocknum + b);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Mark that short_channel_ids for this block have been received */
|
2018-11-13 05:03:49 +01:00
|
|
|
bitmap_fill_range(peer->query_channel_blocks,
|
2018-11-21 01:36:08 +01:00
|
|
|
start - peer->range_first_blocknum,
|
|
|
|
end - peer->range_first_blocknum);
|
2018-11-13 05:03:49 +01:00
|
|
|
peer->range_blocks_remaining -= end - start;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
/* Add scids */
|
|
|
|
n = tal_count(peer->query_channel_scids);
|
|
|
|
tal_resize(&peer->query_channel_scids, n + tal_count(scids));
|
2018-07-28 08:00:16 +02:00
|
|
|
memcpy(peer->query_channel_scids + n, scids, tal_bytelen(scids));
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
/* Still more to go? */
|
2018-11-13 05:03:49 +01:00
|
|
|
if (peer->range_blocks_remaining)
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* All done, send reply to lightningd: that's currently the only thing
|
|
|
|
* which triggers this (for testing). Eventually we might start probing
|
|
|
|
* for gossip information on our own. */
|
2018-06-04 06:28:02 +02:00
|
|
|
msg = towire_gossip_query_channel_range_reply(NULL,
|
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks,
|
|
|
|
complete,
|
|
|
|
peer->query_channel_scids);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(peer->daemon->master, take(msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
peer->query_channel_scids = tal_free(peer->query_channel_scids);
|
|
|
|
peer->query_channel_blocks = tal_free(peer->query_channel_blocks);
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ For simplicity, all pings and pongs are forwarded to us here in gossipd. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_ping(struct peer *peer, const u8 *ping)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
u8 *pong;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This checks the ping packet and makes a pong reply if needed; peer
|
|
|
|
* can specify it doesn't want a response, to simulate traffic. */
|
2018-11-05 02:21:51 +01:00
|
|
|
if (!check_ping_make_pong(NULL, ping, &pong))
|
|
|
|
return towire_errorfmt(peer, NULL, "Bad ping");
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
if (pong)
|
|
|
|
queue_peer_msg(peer, take(pong));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When we get a pong, we tell lightningd about it (it's probably a response
|
|
|
|
* to the `ping` JSON RPC command). */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_pong(struct peer *peer, const u8 *pong)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
const char *err = got_pong(pong, &peer->num_pings_outstanding);
|
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
if (err)
|
|
|
|
return towire_errorfmt(peer, NULL, "%s", err);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_gossip_ping_reply(NULL, &peer->id, true,
|
|
|
|
tal_count(pong))));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When we ask about an array of short_channel_ids, we get all channel &
|
|
|
|
* node announcements and channel updates which the peer knows. There's an
|
|
|
|
* explicit end packet; this is needed to differentiate between 'I'm slow'
|
|
|
|
* and 'I don't know those channels'. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain;
|
|
|
|
u8 complete;
|
|
|
|
|
|
|
|
if (!fromwire_reply_short_channel_ids_end(msg, &chain, &complete)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_short_channel_ids_end %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_short_channel_ids_end for bad chain: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2019-06-12 01:26:07 +02:00
|
|
|
if (!peer->scid_query_outstanding) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"unexpected reply_short_channel_ids_end: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2019-06-12 01:26:07 +02:00
|
|
|
peer->scid_query_outstanding = false;
|
|
|
|
|
|
|
|
/* If it wasn't generated by us, it's the dev interface from lightningd
|
|
|
|
*/
|
|
|
|
if (!peer->scid_query_was_internal) {
|
|
|
|
msg = towire_gossip_scids_reply(msg, true, complete);
|
|
|
|
daemon_conn_send(peer->daemon->master, take(msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All good, no error. */
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Arbitrary ordering function of pubkeys.
|
2018-07-24 08:18:58 +02:00
|
|
|
*
|
|
|
|
* Note that we could use memcmp() here: even if they had somehow different
|
|
|
|
* bitwise representations for the same key, we copied them all from struct
|
|
|
|
* node which should make them unique. Even if not (say, a node vanished
|
|
|
|
* and reappeared) we'd just end up sending two node_announcement for the
|
|
|
|
* same node.
|
|
|
|
*/
|
2019-04-08 11:58:32 +02:00
|
|
|
static int pubkey_order(const struct node_id *k1,
|
|
|
|
const struct node_id *k2,
|
2018-07-24 08:18:58 +02:00
|
|
|
void *unused UNUSED)
|
2017-10-25 11:18:05 +02:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
return node_id_cmp(k1, k2);
|
2017-10-25 11:18:05 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
static void uniquify_node_ids(struct node_id **ids)
|
2017-01-10 06:08:33 +01:00
|
|
|
{
|
2018-07-24 08:18:58 +02:00
|
|
|
size_t dst, src;
|
2018-06-04 06:20:25 +02:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - MUST follow with any `node_announcement`s for each
|
|
|
|
* `channel_announcement`
|
|
|
|
*
|
|
|
|
* - SHOULD avoid sending duplicate `node_announcements` in
|
|
|
|
* response to a single `query_short_channel_ids`.
|
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* ccan/asort is a typesafe qsort wrapper: like most ccan modules
|
|
|
|
* it eschews exposing 'void *' pointers and ensures that the
|
|
|
|
* callback function and its arguments match types correctly. */
|
2018-06-04 06:20:25 +02:00
|
|
|
asort(*ids, tal_count(*ids), pubkey_order, NULL);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Compact the array */
|
2018-06-04 06:20:25 +02:00
|
|
|
for (dst = 0, src = 0; src < tal_count(*ids); src++) {
|
2019-04-08 11:58:32 +02:00
|
|
|
if (dst && node_id_eq(&(*ids)[dst-1], &(*ids)[src]))
|
2018-06-04 06:20:25 +02:00
|
|
|
continue;
|
|
|
|
(*ids)[dst++] = (*ids)[src];
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* And trim to length, so tal_count() gives correct answer. */
|
2018-06-04 06:20:25 +02:00
|
|
|
tal_resize(ids, dst);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We are fairly careful to avoid the peer DoSing us with channel queries:
|
|
|
|
* this routine sends information about a single short_channel_id, unless
|
|
|
|
* it's finished all of them. */
|
2018-11-13 05:03:50 +01:00
|
|
|
static void maybe_create_next_scid_reply(struct peer *peer)
|
2018-06-04 06:19:25 +02:00
|
|
|
{
|
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
|
|
|
size_t i, num;
|
|
|
|
bool sent = false;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2018-06-17 12:13:44 +02:00
|
|
|
* - MUST respond to each known `short_channel_id` with a
|
2019-01-14 03:22:05 +01:00
|
|
|
* `channel_announcement` and the latest `channel_update` for each end
|
2018-06-17 12:13:44 +02:00
|
|
|
* - SHOULD NOT wait for the next outgoing gossip flush
|
|
|
|
* to send these.
|
2018-06-04 06:19:25 +02:00
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Search for next short_channel_id we know about. */
|
2018-06-04 06:19:25 +02:00
|
|
|
num = tal_count(peer->scid_queries);
|
|
|
|
for (i = peer->scid_query_idx; !sent && i < num; i++) {
|
|
|
|
struct chan *chan;
|
|
|
|
|
|
|
|
chan = get_channel(rstate, &peer->scid_queries[i]);
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!chan || !is_chan_public(chan))
|
2018-06-04 06:19:25 +02:00
|
|
|
continue;
|
|
|
|
|
2019-08-09 18:07:01 +02:00
|
|
|
/* BOLT-61a1365a45cc8b463ddbbe3429d350f8eac787dd #7:
|
|
|
|
* - if bit 0 of `query_flag` is set:
|
|
|
|
* - MUST reply with a `channel_announcement`
|
|
|
|
*/
|
|
|
|
if (peer->scid_query_flags[i] & SCID_QF_ANNOUNCE)
|
|
|
|
queue_peer_from_store(peer, &chan->bcast);
|
|
|
|
|
|
|
|
/* BOLT-61a1365a45cc8b463ddbbe3429d350f8eac787dd #7:
|
|
|
|
* - if bit 1 of `query_flag` is set and it has received a
|
|
|
|
* `channel_update` from `node_id_1`:
|
|
|
|
* - MUST reply with the latest `channel_update` for
|
|
|
|
* `node_id_1`
|
|
|
|
* - if bit 2 of `query_flag` is set and it has received a
|
|
|
|
* `channel_update` from `node_id_2`:
|
|
|
|
* - MUST reply with the latest `channel_update` for
|
|
|
|
* `node_id_2` */
|
|
|
|
if ((peer->scid_query_flags[i] & SCID_QF_UPDATE1)
|
|
|
|
&& is_halfchan_defined(&chan->half[0]))
|
2019-04-11 07:16:30 +02:00
|
|
|
queue_peer_from_store(peer, &chan->half[0].bcast);
|
2019-08-09 18:07:01 +02:00
|
|
|
if ((peer->scid_query_flags[i] & SCID_QF_UPDATE2)
|
|
|
|
&& is_halfchan_defined(&chan->half[1]))
|
2019-04-11 07:16:30 +02:00
|
|
|
queue_peer_from_store(peer, &chan->half[1].bcast);
|
2018-06-04 06:20:25 +02:00
|
|
|
|
2019-08-09 18:07:01 +02:00
|
|
|
/* BOLT-61a1365a45cc8b463ddbbe3429d350f8eac787dd #7:
|
|
|
|
* - if bit 3 of `query_flag` is set and it has received
|
|
|
|
* a `node_announcement` from `node_id_1`:
|
|
|
|
* - MUST reply with the latest `node_announcement` for
|
|
|
|
* `node_id_1`
|
|
|
|
* - if bit 4 of `query_flag` is set and it has received a
|
|
|
|
* `node_announcement` from `node_id_2`:
|
|
|
|
* - MUST reply with the latest `node_announcement` for
|
|
|
|
* `node_id_2` */
|
|
|
|
/* Save node ids for later transmission of node_announcement */
|
|
|
|
if (peer->scid_query_flags[i] & SCID_QF_NODE1)
|
|
|
|
tal_arr_expand(&peer->scid_query_nodes,
|
|
|
|
chan->nodes[0]->id);
|
|
|
|
if (peer->scid_query_flags[i] & SCID_QF_NODE2)
|
|
|
|
tal_arr_expand(&peer->scid_query_nodes,
|
|
|
|
chan->nodes[1]->id);
|
2018-06-04 06:19:25 +02:00
|
|
|
sent = true;
|
|
|
|
}
|
2018-06-04 06:20:25 +02:00
|
|
|
|
|
|
|
/* Just finished channels? Remove duplicate nodes. */
|
|
|
|
if (peer->scid_query_idx != num && i == num)
|
|
|
|
uniquify_node_ids(&peer->scid_query_nodes);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Update index for next time we're called. */
|
2018-06-04 06:19:25 +02:00
|
|
|
peer->scid_query_idx = i;
|
|
|
|
|
2018-06-04 06:20:25 +02:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - MUST follow with any `node_announcement`s for each
|
|
|
|
* `channel_announcement`
|
|
|
|
* - SHOULD avoid sending duplicate `node_announcements` in response
|
|
|
|
* to a single `query_short_channel_ids`.
|
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If we haven't sent anything above, we look for the next
|
|
|
|
* node_announcement to send. */
|
2018-06-04 06:20:25 +02:00
|
|
|
num = tal_count(peer->scid_query_nodes);
|
|
|
|
for (i = peer->scid_query_nodes_idx; !sent && i < num; i++) {
|
|
|
|
const struct node *n;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Not every node announces itself (we know it exists because
|
|
|
|
* of a channel_announcement, however) */
|
2018-06-04 06:20:25 +02:00
|
|
|
n = get_node(rstate, &peer->scid_query_nodes[i]);
|
2019-04-10 09:31:29 +02:00
|
|
|
if (!n || !n->bcast.index)
|
2018-06-04 06:20:25 +02:00
|
|
|
continue;
|
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
queue_peer_from_store(peer, &n->bcast);
|
2018-06-04 06:20:25 +02:00
|
|
|
sent = true;
|
|
|
|
}
|
|
|
|
peer->scid_query_nodes_idx = i;
|
|
|
|
|
2018-06-04 06:19:25 +02:00
|
|
|
/* All finished? */
|
2018-06-04 06:20:25 +02:00
|
|
|
if (peer->scid_queries && peer->scid_query_nodes_idx == num) {
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - MUST follow these responses with
|
|
|
|
* `reply_short_channel_ids_end`.
|
|
|
|
* - if does not maintain up-to-date channel information for
|
|
|
|
* `chain_hash`:
|
|
|
|
* - MUST set `complete` to 0.
|
|
|
|
* - otherwise:
|
|
|
|
* - SHOULD set `complete` to 1.
|
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: We consider ourselves to have complete knowledge. */
|
2018-06-04 06:19:25 +02:00
|
|
|
u8 *end = towire_reply_short_channel_ids_end(peer,
|
2018-11-21 04:10:03 +01:00
|
|
|
&peer->daemon->chain_hash,
|
2018-06-04 06:19:25 +02:00
|
|
|
true);
|
|
|
|
queue_peer_msg(peer, take(end));
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* We're done! Clean up so we simply pass-through next time. */
|
2018-06-04 06:19:25 +02:00
|
|
|
peer->scid_queries = tal_free(peer->scid_queries);
|
2019-08-09 18:07:01 +02:00
|
|
|
peer->scid_query_flags = tal_free(peer->scid_query_flags);
|
2018-06-04 06:20:25 +02:00
|
|
|
peer->scid_query_idx = 0;
|
|
|
|
peer->scid_query_nodes = tal_free(peer->scid_query_nodes);
|
|
|
|
peer->scid_query_nodes_idx = 0;
|
2018-06-04 06:19:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is called when the outgoing queue is empty; gossip has lower priority
|
|
|
|
* than just about anything else. */
|
2018-11-13 05:03:50 +01:00
|
|
|
static void dump_gossip(struct peer *peer)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
/* Do we have scid query replies to send? */
|
2018-11-13 05:03:50 +01:00
|
|
|
maybe_create_next_scid_reply(peer);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This generates a `channel_update` message for one of our channels. We do
|
|
|
|
* this here, rather than in `channeld` because we (may) need to do it
|
|
|
|
* ourselves anyway if channeld dies, or when we refresh it once a week. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static void update_local_channel(struct daemon *daemon,
|
2018-05-21 06:35:40 +02:00
|
|
|
const struct chan *chan,
|
|
|
|
int direction,
|
|
|
|
bool disable,
|
|
|
|
u16 cltv_expiry_delta,
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat htlc_minimum,
|
2018-05-21 06:35:40 +02:00
|
|
|
u32 fee_base_msat,
|
2018-09-25 07:43:56 +02:00
|
|
|
u32 fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat htlc_maximum,
|
2018-09-25 07:43:56 +02:00
|
|
|
const char *caller)
|
2018-05-21 06:35:40 +02:00
|
|
|
{
|
|
|
|
secp256k1_ecdsa_signature dummy_sig;
|
|
|
|
u8 *update, *msg;
|
|
|
|
u32 timestamp = time_now().ts.tv_sec;
|
2018-09-20 02:59:46 +02:00
|
|
|
u8 message_flags, channel_flags;
|
|
|
|
|
2018-05-21 06:35:40 +02:00
|
|
|
/* So valgrind doesn't complain */
|
|
|
|
memset(&dummy_sig, 0, sizeof(dummy_sig));
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The origin node:
|
|
|
|
*...
|
|
|
|
* - MUST set `timestamp` to greater than 0, AND to greater than any
|
|
|
|
* previously-sent `channel_update` for this `short_channel_id`.
|
|
|
|
* - SHOULD base `timestamp` on a UNIX timestamp.
|
|
|
|
*/
|
2018-05-21 06:35:40 +02:00
|
|
|
if (is_halfchan_defined(&chan->half[direction])
|
2019-04-10 09:31:29 +02:00
|
|
|
&& timestamp == chan->half[direction].bcast.timestamp)
|
2018-05-21 06:35:40 +02:00
|
|
|
timestamp++;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The `channel_flags` bitfield is used to indicate the direction of
|
|
|
|
* the channel: it identifies the node that this update originated
|
|
|
|
* from and signals various options concerning the channel. The
|
|
|
|
* following table specifies the meaning of its individual bits:
|
|
|
|
*
|
|
|
|
* | Bit Position | Name | Meaning |
|
|
|
|
* | ------------- | ----------- | -------------------------------- |
|
|
|
|
* | 0 | `direction` | Direction this update refers to. |
|
|
|
|
* | 1 | `disable` | Disable the channel. |
|
|
|
|
*/
|
2018-09-20 02:59:46 +02:00
|
|
|
channel_flags = direction;
|
2018-05-21 06:35:40 +02:00
|
|
|
if (disable)
|
2018-09-20 02:59:46 +02:00
|
|
|
channel_flags |= ROUTING_FLAGS_DISABLED;
|
2018-05-21 06:35:40 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The `message_flags` bitfield is used to indicate the presence of
|
|
|
|
* optional fields in the `channel_update` message:
|
|
|
|
*
|
|
|
|
*| Bit Position | Name | Field |
|
|
|
|
*...
|
|
|
|
*| 0 | `option_channel_htlc_max` | `htlc_maximum_msat` |
|
|
|
|
*/
|
2018-10-16 02:35:08 +02:00
|
|
|
message_flags = 0 | ROUTING_OPT_HTLC_MAX_MSAT;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We create an update with a dummy signature, and hand to hsmd to get
|
|
|
|
* it signed. */
|
2018-10-16 02:35:08 +02:00
|
|
|
update = towire_channel_update_option_channel_htlc_max(tmpctx, &dummy_sig,
|
2018-11-21 04:10:03 +01:00
|
|
|
&daemon->chain_hash,
|
2018-05-21 06:35:40 +02:00
|
|
|
&chan->scid,
|
|
|
|
timestamp,
|
2018-09-20 02:59:46 +02:00
|
|
|
message_flags, channel_flags,
|
|
|
|
cltv_expiry_delta,
|
2019-02-21 04:45:55 +01:00
|
|
|
htlc_minimum,
|
2018-05-21 06:35:40 +02:00
|
|
|
fee_base_msat,
|
2018-10-16 02:35:08 +02:00
|
|
|
fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
htlc_maximum);
|
2018-05-21 06:35:40 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note that we treat the hsmd as synchronous. This is simple (no
|
|
|
|
* callback hell)!, but may need to change to async if we ever want
|
|
|
|
* remote HSMs */
|
2018-05-21 06:35:40 +02:00
|
|
|
if (!wire_sync_write(HSM_FD,
|
|
|
|
towire_hsm_cupdate_sig_req(tmpctx, update))) {
|
|
|
|
status_failed(STATUS_FAIL_HSM_IO, "Writing cupdate_sig_req: %s",
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
msg = wire_sync_read(tmpctx, HSM_FD);
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!msg || !fromwire_hsm_cupdate_sig_reply(NULL, msg, &update)) {
|
2018-05-21 06:35:40 +02:00
|
|
|
status_failed(STATUS_FAIL_HSM_IO,
|
|
|
|
"Reading cupdate_sig_req: %s",
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The origin node:
|
2019-01-14 03:22:05 +01:00
|
|
|
* - MAY create a `channel_update` to communicate the channel parameters to the
|
|
|
|
* channel peer, even though the channel has not yet been announced (i.e. the
|
|
|
|
* `announce_channel` bit was not set).
|
2018-11-21 01:36:08 +01:00
|
|
|
*/
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!is_chan_public(chan)) {
|
2018-11-21 01:36:08 +01:00
|
|
|
/* handle_channel_update will not put private updates in the
|
|
|
|
* broadcast list, but we send it direct to the peer (if we
|
|
|
|
* have one connected) now */
|
2018-09-25 07:43:56 +02:00
|
|
|
struct peer *peer = find_peer(daemon,
|
|
|
|
&chan->nodes[!direction]->id);
|
|
|
|
if (peer)
|
|
|
|
queue_peer_msg(peer, update);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We feed it into routing.c like any other channel_update; it may
|
|
|
|
* discard it (eg. non-public channel), but it should not complain
|
|
|
|
* about it being invalid! */
|
2019-06-12 01:27:07 +02:00
|
|
|
msg = handle_channel_update(daemon->rstate, take(update), caller, NULL);
|
2018-09-25 07:43:56 +02:00
|
|
|
if (msg)
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"%s: rejected local channel update %s: %s",
|
|
|
|
caller,
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Normally we must not touch something taken()
|
|
|
|
* but we're in deep trouble anyway, and
|
|
|
|
* handle_channel_update only tal_steals onto
|
|
|
|
* tmpctx, so it's actually OK. */
|
2018-09-25 07:43:56 +02:00
|
|
|
tal_hex(tmpctx, update),
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We generate local channel updates lazily; most of the time we simply
|
|
|
|
* toggle the `local_disabled` flag so we don't use it to route. We never
|
|
|
|
* change anything else after startup (yet!) */
|
2018-09-25 07:43:56 +02:00
|
|
|
static void maybe_update_local_channel(struct daemon *daemon,
|
|
|
|
struct chan *chan, int direction)
|
2018-07-02 22:54:12 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
const struct half_chan *hc = &chan->half[direction];
|
2019-04-11 07:15:22 +02:00
|
|
|
bool local_disabled;
|
2018-07-02 22:54:12 +02:00
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* Don't generate a channel_update for an uninitialized channel. */
|
2019-04-11 07:16:30 +02:00
|
|
|
if (!is_halfchan_defined(hc))
|
2018-09-25 07:43:56 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Nothing to update? */
|
2019-04-11 07:15:22 +02:00
|
|
|
local_disabled = is_chan_local_disabled(daemon->rstate, chan);
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Note the inversions here on both sides, which is cheap conversion to
|
|
|
|
* boolean for the RHS! */
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!local_disabled == !(hc->channel_flags & ROUTING_FLAGS_DISABLED))
|
2018-09-25 07:43:56 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
update_local_channel(daemon, chan, direction,
|
2019-04-11 07:15:22 +02:00
|
|
|
local_disabled,
|
2018-09-25 07:43:56 +02:00
|
|
|
hc->delay,
|
2019-02-21 04:45:55 +01:00
|
|
|
hc->htlc_minimum,
|
2018-09-25 07:43:56 +02:00
|
|
|
hc->base_fee,
|
|
|
|
hc->proportional_fee,
|
2019-02-21 04:45:55 +01:00
|
|
|
hc->htlc_maximum,
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note this magic C macro which expands to the
|
|
|
|
* function name, for debug messages */
|
2018-09-25 07:43:56 +02:00
|
|
|
__func__);
|
2018-07-02 22:54:12 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This helper figures out which direction of the channel is from-us; if
|
|
|
|
* neither, it returns false. This meets Linus' rule "Always return the error",
|
|
|
|
* without doing some horrible 0/1/-1 return. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static bool local_direction(struct daemon *daemon,
|
|
|
|
const struct chan *chan,
|
|
|
|
int *direction)
|
2018-05-21 06:35:40 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
for (*direction = 0; *direction < 2; (*direction)++) {
|
2019-04-08 11:58:32 +02:00
|
|
|
if (node_id_eq(&chan->nodes[*direction]->id, &daemon->id))
|
2018-09-25 07:43:56 +02:00
|
|
|
return true;
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
2018-09-25 07:43:56 +02:00
|
|
|
return false;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is when channeld asks us for a channel_update for a local channel.
|
|
|
|
* It does that to fill in the error field when lightningd fails an HTLC and
|
|
|
|
* sets the UPDATE bit in the error type. lightningd is too important to
|
|
|
|
* fetch this itself, so channeld does it (channeld has to talk to us for
|
|
|
|
* other things anyway, so why not?). */
|
2018-11-05 02:21:51 +01:00
|
|
|
static bool handle_get_update(struct peer *peer, const u8 *msg)
|
2018-07-03 13:30:36 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
struct short_channel_id scid;
|
2018-07-03 13:30:36 +02:00
|
|
|
struct chan *chan;
|
2018-09-25 07:43:56 +02:00
|
|
|
const u8 *update;
|
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
|
|
|
int direction;
|
2018-05-21 06:35:40 +02:00
|
|
|
|
2018-11-13 05:03:51 +01:00
|
|
|
if (!fromwire_gossipd_get_update(msg, &scid)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
status_broken("peer %s sent bad gossip_get_update %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-11-05 02:21:51 +01:00
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return false;
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* It's possible that the channel has just closed (though v. unlikely) */
|
2018-09-25 07:43:56 +02:00
|
|
|
chan = get_channel(rstate, &scid);
|
|
|
|
if (!chan) {
|
|
|
|
status_unusual("peer %s scid %s: unknown channel",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-09-25 07:43:56 +02:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&scid));
|
|
|
|
update = NULL;
|
|
|
|
goto out;
|
2018-07-02 22:54:12 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* We want the update that comes from our end. */
|
|
|
|
if (!local_direction(peer->daemon, chan, &direction)) {
|
|
|
|
status_unusual("peer %s scid %s: not our channel?",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-09-25 07:43:56 +02:00
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&scid));
|
|
|
|
update = NULL;
|
|
|
|
goto out;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* Since we're going to send it out, make sure it's up-to-date. */
|
|
|
|
maybe_update_local_channel(peer->daemon, chan, direction);
|
2018-06-04 06:15:25 +02:00
|
|
|
|
2019-04-11 07:16:30 +02:00
|
|
|
/* It's possible this is zero, if we've never sent a channel_update
|
2018-11-21 01:36:08 +01:00
|
|
|
* for that channel. */
|
2019-04-11 07:16:30 +02:00
|
|
|
if (!is_halfchan_defined(&chan->half[direction]))
|
|
|
|
update = NULL;
|
|
|
|
else
|
2019-06-03 20:22:25 +02:00
|
|
|
update = gossip_store_get(tmpctx, rstate->gs,
|
2019-04-11 07:16:30 +02:00
|
|
|
chan->half[direction].bcast.index);
|
2018-09-25 07:43:56 +02:00
|
|
|
out:
|
|
|
|
status_trace("peer %s schanid %s: %s update",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-09-25 07:43:56 +02:00
|
|
|
type_to_string(tmpctx, struct short_channel_id, &scid),
|
|
|
|
update ? "got" : "no");
|
2018-07-03 13:30:36 +02:00
|
|
|
|
2018-11-13 05:03:51 +01:00
|
|
|
msg = towire_gossipd_get_update_reply(NULL, update);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(peer->dc, take(msg));
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Return true if the channel information has changed. This can only
|
|
|
|
* currently happen if the user restarts with different fee options, but we
|
|
|
|
* don't assume that. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static bool halfchan_new_info(const struct half_chan *hc,
|
2019-02-21 04:45:55 +01:00
|
|
|
u16 cltv_delta, struct amount_msat htlc_minimum,
|
2018-10-16 02:35:08 +02:00
|
|
|
u32 fee_base_msat, u32 fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat htlc_maximum)
|
2018-07-03 13:43:45 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!is_halfchan_defined(hc))
|
|
|
|
return true;
|
2018-07-03 13:43:45 +02:00
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
return hc->delay != cltv_delta
|
2019-02-21 04:45:55 +01:00
|
|
|
|| !amount_msat_eq(hc->htlc_minimum, htlc_minimum)
|
2018-09-25 07:43:56 +02:00
|
|
|
|| hc->base_fee != fee_base_msat
|
2018-10-16 02:35:08 +02:00
|
|
|
|| hc->proportional_fee != fee_proportional_millionths
|
2019-02-21 04:45:55 +01:00
|
|
|
|| !amount_msat_eq(hc->htlc_maximum, htlc_maximum);
|
2018-07-03 13:43:45 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ channeld asks us to update the local channel. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static bool handle_local_channel_update(struct peer *peer, const u8 *msg)
|
2018-07-03 13:30:36 +02:00
|
|
|
{
|
|
|
|
struct chan *chan;
|
2018-09-25 07:43:56 +02:00
|
|
|
struct short_channel_id scid;
|
|
|
|
bool disable;
|
|
|
|
u16 cltv_expiry_delta;
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat htlc_minimum, htlc_maximum;
|
2018-09-25 07:43:56 +02:00
|
|
|
u32 fee_base_msat;
|
|
|
|
u32 fee_proportional_millionths;
|
|
|
|
int direction;
|
2018-07-03 13:30:36 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: We should get scid from lightningd when setting up the
|
|
|
|
* connection, so no per-peer daemon can mess with channels other than
|
|
|
|
* its own! */
|
2018-11-13 05:03:51 +01:00
|
|
|
if (!fromwire_gossipd_local_channel_update(msg,
|
|
|
|
&scid,
|
|
|
|
&disable,
|
|
|
|
&cltv_expiry_delta,
|
2019-02-21 04:45:55 +01:00
|
|
|
&htlc_minimum,
|
2018-11-13 05:03:51 +01:00
|
|
|
&fee_base_msat,
|
|
|
|
&fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
&htlc_maximum)) {
|
2018-07-03 13:30:36 +02:00
|
|
|
status_broken("peer %s bad local_channel_update %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-07-03 13:30:36 +02:00
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:21:51 +01:00
|
|
|
return false;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Can theoretically happen if channel just closed. */
|
2018-09-25 07:43:56 +02:00
|
|
|
chan = get_channel(peer->daemon->rstate, &scid);
|
2018-07-03 13:30:36 +02:00
|
|
|
if (!chan) {
|
|
|
|
status_trace("peer %s local_channel_update for unknown %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-07-03 13:30:36 +02:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-09-25 07:43:56 +02:00
|
|
|
&scid));
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* You shouldn't be asking for a non-local channel though. */
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!local_direction(peer->daemon, chan, &direction)) {
|
2018-07-03 13:30:36 +02:00
|
|
|
status_broken("peer %s bad local_channel_update for non-local %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-07-03 13:30:36 +02:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-09-25 07:43:56 +02:00
|
|
|
&scid));
|
2018-11-05 02:21:51 +01:00
|
|
|
return false;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* We could change configuration on restart; update immediately.
|
|
|
|
* Or, if we're *enabling* an announced-disabled channel.
|
|
|
|
* Or, if it's an unannounced channel (only sending to peer). */
|
|
|
|
if (halfchan_new_info(&chan->half[direction],
|
2019-02-21 04:45:55 +01:00
|
|
|
cltv_expiry_delta, htlc_minimum,
|
2018-10-16 02:35:08 +02:00
|
|
|
fee_base_msat, fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
htlc_maximum)
|
2018-09-25 07:43:56 +02:00
|
|
|
|| ((chan->half[direction].channel_flags & ROUTING_FLAGS_DISABLED)
|
|
|
|
&& !disable)
|
|
|
|
|| !is_chan_public(chan)) {
|
|
|
|
update_local_channel(peer->daemon, chan, direction,
|
|
|
|
disable,
|
|
|
|
cltv_expiry_delta,
|
2019-02-21 04:45:55 +01:00
|
|
|
htlc_minimum,
|
2018-09-25 07:43:56 +02:00
|
|
|
fee_base_msat,
|
|
|
|
fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
htlc_maximum,
|
2018-09-25 07:43:56 +02:00
|
|
|
__func__);
|
|
|
|
}
|
2018-08-14 01:02:04 +02:00
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* Normal case: just toggle local_disabled, and generate broadcast in
|
|
|
|
* maybe_update_local_channel when/if someone asks about it. */
|
2019-04-11 07:15:22 +02:00
|
|
|
if (disable)
|
|
|
|
local_disable_chan(peer->daemon->rstate, chan);
|
|
|
|
else
|
|
|
|
local_enable_chan(peer->daemon->rstate, chan);
|
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where the per-peer daemons send us messages. It's either forwarded
|
|
|
|
* gossip, or a request for information. We deliberately use non-overlapping
|
|
|
|
* message types so we can distinguish them. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static struct io_plan *peer_msg_in(struct io_conn *conn,
|
2018-10-25 01:43:05 +02:00
|
|
|
const u8 *msg,
|
|
|
|
struct peer *peer)
|
2017-03-09 16:56:04 +01:00
|
|
|
{
|
2018-11-05 02:21:51 +01:00
|
|
|
const u8 *err;
|
|
|
|
bool ok;
|
|
|
|
|
|
|
|
/* These are messages relayed from peer */
|
|
|
|
switch ((enum wire_type)fromwire_peektype(msg)) {
|
|
|
|
case WIRE_CHANNEL_ANNOUNCEMENT:
|
|
|
|
err = handle_channel_announcement_msg(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_CHANNEL_UPDATE:
|
|
|
|
err = handle_channel_update_msg(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_NODE_ANNOUNCEMENT:
|
|
|
|
err = handle_node_announcement(peer->daemon->rstate, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_QUERY_CHANNEL_RANGE:
|
|
|
|
err = handle_query_channel_range(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_REPLY_CHANNEL_RANGE:
|
|
|
|
err = handle_reply_channel_range(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_QUERY_SHORT_CHANNEL_IDS:
|
|
|
|
err = handle_query_short_channel_ids(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_REPLY_SHORT_CHANNEL_IDS_END:
|
|
|
|
err = handle_reply_short_channel_ids_end(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_PING:
|
|
|
|
err = handle_ping(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_PONG:
|
|
|
|
err = handle_pong(peer, msg);
|
|
|
|
goto handled_relay;
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* These are non-gossip messages (!is_msg_for_gossipd()) */
|
2018-11-05 02:21:51 +01:00
|
|
|
case WIRE_INIT:
|
|
|
|
case WIRE_ERROR:
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
case WIRE_ACCEPT_CHANNEL:
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
case WIRE_FUNDING_SIGNED:
|
|
|
|
case WIRE_FUNDING_LOCKED:
|
|
|
|
case WIRE_SHUTDOWN:
|
|
|
|
case WIRE_CLOSING_SIGNED:
|
|
|
|
case WIRE_UPDATE_ADD_HTLC:
|
|
|
|
case WIRE_UPDATE_FULFILL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_MALFORMED_HTLC:
|
|
|
|
case WIRE_COMMITMENT_SIGNED:
|
|
|
|
case WIRE_REVOKE_AND_ACK:
|
|
|
|
case WIRE_UPDATE_FEE:
|
|
|
|
case WIRE_CHANNEL_REESTABLISH:
|
|
|
|
case WIRE_ANNOUNCEMENT_SIGNATURES:
|
2019-06-03 20:19:25 +02:00
|
|
|
case WIRE_GOSSIP_TIMESTAMP_FILTER:
|
2018-11-05 02:21:51 +01:00
|
|
|
status_broken("peer %s: relayed unexpected msg of type %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-11-05 02:21:51 +01:00
|
|
|
wire_type_name(fromwire_peektype(msg)));
|
2018-03-08 04:16:34 +01:00
|
|
|
return io_close(conn);
|
2017-03-11 15:31:17 +01:00
|
|
|
}
|
2017-12-15 23:20:13 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Must be a gossip_peerd_wire_type asking us to do something. */
|
2018-11-13 05:03:51 +01:00
|
|
|
switch ((enum gossip_peerd_wire_type)fromwire_peektype(msg)) {
|
|
|
|
case WIRE_GOSSIPD_GET_UPDATE:
|
2018-11-05 02:21:51 +01:00
|
|
|
ok = handle_get_update(peer, msg);
|
|
|
|
goto handled_cmd;
|
2018-11-13 05:03:51 +01:00
|
|
|
case WIRE_GOSSIPD_LOCAL_ADD_CHANNEL:
|
2019-06-03 20:07:25 +02:00
|
|
|
ok = handle_local_add_channel(peer->daemon->rstate, msg, 0);
|
2018-11-05 02:21:51 +01:00
|
|
|
goto handled_cmd;
|
2018-11-13 05:03:51 +01:00
|
|
|
case WIRE_GOSSIPD_LOCAL_CHANNEL_UPDATE:
|
2018-11-05 02:21:51 +01:00
|
|
|
ok = handle_local_channel_update(peer, msg);
|
|
|
|
goto handled_cmd;
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* These are the ones we send, not them */
|
2018-11-13 05:03:51 +01:00
|
|
|
case WIRE_GOSSIPD_GET_UPDATE_REPLY:
|
2019-05-04 07:53:13 +02:00
|
|
|
case WIRE_GOSSIPD_NEW_STORE_FD:
|
2018-11-05 02:21:51 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Anything else should not have been sent to us: close on it */
|
2018-11-13 05:03:51 +01:00
|
|
|
status_broken("peer %s: unexpected cmd of type %i %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
2018-11-13 05:03:51 +01:00
|
|
|
fromwire_peektype(msg),
|
|
|
|
gossip_peerd_wire_type_name(fromwire_peektype(msg)));
|
2018-11-05 02:21:51 +01:00
|
|
|
return io_close(conn);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Commands should always be OK. */
|
2018-11-05 02:21:51 +01:00
|
|
|
handled_cmd:
|
|
|
|
if (!ok)
|
|
|
|
return io_close(conn);
|
|
|
|
goto done;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Forwarded messages may be bad, so we have error which the per-peer
|
|
|
|
* daemon will forward to the peer. */
|
2018-11-05 02:21:51 +01:00
|
|
|
handled_relay:
|
|
|
|
if (err)
|
|
|
|
queue_peer_msg(peer, take(err));
|
|
|
|
done:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, peer->dc);
|
2017-03-09 16:56:04 +01:00
|
|
|
}
|
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
/* What gossip level do we set for this to meet our target? */
|
|
|
|
static enum gossip_level peer_gossip_level(const struct daemon *daemon,
|
|
|
|
bool gossip_queries_feature)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
size_t gossip_levels[ARRAY_SIZE(gossip_level_targets)];
|
|
|
|
enum gossip_level glevel;
|
|
|
|
|
|
|
|
/* Old peers always give us a flood. */
|
|
|
|
if (!gossip_queries_feature)
|
|
|
|
return GOSSIP_HIGH;
|
|
|
|
|
|
|
|
/* Figure out how many we have at each level. */
|
|
|
|
memset(gossip_levels, 0, sizeof(gossip_levels));
|
|
|
|
list_for_each(&daemon->peers, peer, list)
|
|
|
|
gossip_levels[peer->gossip_level]++;
|
|
|
|
|
|
|
|
/* If we're missing gossip, try to fill GOSSIP_HIGH */
|
|
|
|
if (daemon->gossip_missing != NULL)
|
|
|
|
glevel = GOSSIP_HIGH;
|
|
|
|
else
|
|
|
|
glevel = GOSSIP_MEDIUM;
|
|
|
|
|
|
|
|
while (gossip_levels[glevel] >= gossip_level_targets[glevel])
|
|
|
|
glevel++;
|
|
|
|
|
|
|
|
return glevel;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where connectd tells us about a new peer, and we hand back an fd for
|
|
|
|
* it to send us messages via peer_msg_in above */
|
2018-07-24 08:18:58 +02:00
|
|
|
static struct io_plan *connectd_new_peer(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct peer *peer = tal(conn, struct peer);
|
|
|
|
int fds[2];
|
2019-05-04 07:53:13 +02:00
|
|
|
int gossip_store_fd;
|
2019-06-03 20:15:25 +02:00
|
|
|
struct gossip_state *gs;
|
2018-07-24 08:18:58 +02:00
|
|
|
|
|
|
|
if (!fromwire_gossip_new_peer(msg, &peer->id,
|
|
|
|
&peer->gossip_queries_feature,
|
|
|
|
&peer->initial_routing_sync_feature)) {
|
|
|
|
status_broken("Bad new_peer msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return io_close(conn);
|
|
|
|
}
|
|
|
|
|
2019-06-03 20:22:25 +02:00
|
|
|
gossip_store_fd = gossip_store_readonly_fd(daemon->rstate->gs);;
|
2019-05-04 07:53:13 +02:00
|
|
|
if (gossip_store_fd < 0) {
|
|
|
|
status_broken("Failed to get readonly store fd: %s",
|
|
|
|
strerror(errno));
|
|
|
|
daemon_conn_send(daemon->connectd,
|
2019-06-03 20:15:25 +02:00
|
|
|
take(towire_gossip_new_peer_reply(NULL,
|
|
|
|
false,
|
|
|
|
NULL)));
|
2019-05-04 07:53:13 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This can happen: we handle it gracefully, returning a `failed` msg. */
|
2018-07-24 08:18:58 +02:00
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
status_broken("Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
2019-05-04 07:53:13 +02:00
|
|
|
close(gossip_store_fd);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->connectd,
|
2019-06-03 20:15:25 +02:00
|
|
|
take(towire_gossip_new_peer_reply(NULL,
|
|
|
|
false,
|
|
|
|
NULL)));
|
2018-07-24 08:18:58 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We might not have noticed old peer is dead; kill it now. */
|
|
|
|
tal_free(find_peer(daemon, &peer->id));
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Populate the rest of the peer info. */
|
2018-07-24 08:18:58 +02:00
|
|
|
peer->daemon = daemon;
|
|
|
|
peer->scid_queries = NULL;
|
|
|
|
peer->scid_query_idx = 0;
|
|
|
|
peer->scid_query_nodes = NULL;
|
|
|
|
peer->scid_query_nodes_idx = 0;
|
2019-06-12 01:26:07 +02:00
|
|
|
peer->scid_query_outstanding = false;
|
2018-07-24 08:18:58 +02:00
|
|
|
peer->query_channel_blocks = NULL;
|
|
|
|
peer->num_pings_outstanding = 0;
|
2019-06-12 01:28:40 +02:00
|
|
|
peer->gossip_level = peer_gossip_level(daemon,
|
|
|
|
peer->gossip_queries_feature);
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep a list so we can find peer by id */
|
2018-07-24 08:18:58 +02:00
|
|
|
list_add_tail(&peer->daemon->peers, &peer->list);
|
|
|
|
tal_add_destructor(peer, destroy_peer);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is the new connection: calls dump_gossip when nothing else to
|
|
|
|
* send. */
|
2018-10-25 01:43:05 +02:00
|
|
|
peer->dc = daemon_conn_new(daemon, fds[0],
|
2018-11-05 02:21:51 +01:00
|
|
|
peer_msg_in, dump_gossip, peer);
|
2018-10-25 01:43:05 +02:00
|
|
|
/* Free peer if conn closed (destroy_peer closes conn if peer freed) */
|
|
|
|
tal_steal(peer->dc, peer);
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This sends the initial timestamp filter. */
|
2018-07-24 08:18:58 +02:00
|
|
|
setup_gossip_range(peer);
|
|
|
|
|
2019-06-03 20:19:25 +02:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* A node:
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages unless explicitly requested.
|
|
|
|
*/
|
|
|
|
if (peer->gossip_queries_feature) {
|
|
|
|
gs = NULL;
|
|
|
|
} else {
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - upon receiving an `init` message with the
|
|
|
|
* `initial_routing_sync` flag set to 1:
|
|
|
|
* - SHOULD send gossip messages for all known channels and
|
|
|
|
* nodes, as if they were just received.
|
|
|
|
* - if the `initial_routing_sync` flag is set to 0, OR if the
|
|
|
|
* initial sync was completed:
|
|
|
|
* - SHOULD resume normal operation, as specified in the
|
|
|
|
* following [Rebroadcasting](#rebroadcasting) section.
|
|
|
|
*/
|
|
|
|
gs = tal(tmpctx, struct gossip_state);
|
|
|
|
gs->timestamp_min = 0;
|
|
|
|
gs->timestamp_max = UINT32_MAX;
|
|
|
|
|
|
|
|
/* If they don't want initial sync, start at end of store */
|
|
|
|
if (!peer->initial_routing_sync_feature)
|
|
|
|
lseek(gossip_store_fd, 0, SEEK_END);
|
|
|
|
|
|
|
|
gs->next_gossip = time_mono();
|
|
|
|
}
|
2018-10-25 01:43:05 +02:00
|
|
|
|
2019-06-03 20:15:25 +02:00
|
|
|
/* Reply with success, and the new fd and gossip_state. */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->connectd,
|
2019-06-03 20:15:25 +02:00
|
|
|
take(towire_gossip_new_peer_reply(NULL, true, gs)));
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send_fd(daemon->connectd, fds[1]);
|
2019-05-04 07:53:13 +02:00
|
|
|
daemon_conn_send_fd(daemon->connectd, gossip_store_fd);
|
2018-07-24 08:18:58 +02:00
|
|
|
|
|
|
|
done:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->connectd);
|
2018-07-24 08:18:58 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ connectd can also ask us if we know any addresses for a given id. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *connectd_get_address(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-10 13:06:51 +01:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2019-04-11 07:15:22 +02:00
|
|
|
u8 rgb_color[3];
|
|
|
|
u8 alias[32];
|
|
|
|
u8 *features;
|
|
|
|
struct wireaddr *addrs;
|
2018-06-04 06:19:25 +02:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
if (!fromwire_gossip_get_addrs(msg, &id)) {
|
|
|
|
status_broken("Bad gossip_get_addrs msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return io_close(conn);
|
|
|
|
}
|
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!get_node_announcement_by_id(tmpctx, daemon, &id,
|
|
|
|
rgb_color, alias, &features, &addrs))
|
2018-11-05 02:16:48 +01:00
|
|
|
addrs = NULL;
|
|
|
|
|
|
|
|
daemon_conn_send(daemon->connectd,
|
|
|
|
take(towire_gossip_get_addrs_reply(NULL, addrs)));
|
|
|
|
return daemon_conn_read_next(conn, daemon->connectd);
|
2017-03-10 13:06:51 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ connectd's input handler is very simple. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *connectd_req(struct io_conn *conn,
|
|
|
|
const u8 *msg,
|
|
|
|
struct daemon *daemon)
|
2017-03-15 11:36:52 +01:00
|
|
|
{
|
2018-11-05 02:16:48 +01:00
|
|
|
enum connect_gossip_wire_type t = fromwire_peektype(msg);
|
2017-03-15 13:46:29 +01:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
switch (t) {
|
|
|
|
case WIRE_GOSSIP_NEW_PEER:
|
|
|
|
return connectd_new_peer(conn, daemon, msg);
|
2018-09-29 08:33:51 +02:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
case WIRE_GOSSIP_GET_ADDRS:
|
|
|
|
return connectd_get_address(conn, daemon, msg);
|
2017-03-15 13:46:29 +01:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
/* We send these, don't receive them. */
|
|
|
|
case WIRE_GOSSIP_NEW_PEER_REPLY:
|
|
|
|
case WIRE_GOSSIP_GET_ADDRS_REPLY:
|
|
|
|
break;
|
|
|
|
}
|
2017-03-15 11:36:52 +01:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
status_broken("Bad msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return io_close(conn);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is our twice-weekly timer callback for refreshing our channels. This
|
|
|
|
* was added to the spec because people abandoned their channels without
|
|
|
|
* closing them. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_send_keepalive_update(struct daemon *daemon,
|
|
|
|
const struct chan *chan,
|
|
|
|
const struct half_chan *hc)
|
|
|
|
{
|
|
|
|
status_trace("Sending keepalive channel_update for %s",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&chan->scid));
|
|
|
|
|
|
|
|
/* As a side-effect, this will create an update which matches the
|
|
|
|
* local_disabled state */
|
|
|
|
update_local_channel(daemon, chan,
|
|
|
|
hc->channel_flags & ROUTING_FLAGS_DIRECTION,
|
2019-04-11 07:15:22 +02:00
|
|
|
is_chan_local_disabled(daemon->rstate, chan),
|
2018-11-05 02:16:48 +01:00
|
|
|
hc->delay,
|
2019-02-21 04:45:55 +01:00
|
|
|
hc->htlc_minimum,
|
2018-11-05 02:16:48 +01:00
|
|
|
hc->base_fee,
|
|
|
|
hc->proportional_fee,
|
2019-02-21 04:45:55 +01:00
|
|
|
hc->htlc_maximum,
|
2018-11-05 02:16:48 +01:00
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* A node:
|
2018-11-21 01:36:08 +01:00
|
|
|
* - if a channel's latest `channel_update`s `timestamp` is older than two weeks
|
|
|
|
* (1209600 seconds):
|
|
|
|
* - MAY prune the channel.
|
|
|
|
* - MAY ignore the channel.
|
|
|
|
*/
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_refresh_network(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
u64 now = time_now().ts.tv_sec;
|
|
|
|
/* Anything below this highwater mark could be pruned if not refreshed */
|
|
|
|
s64 highwater = now - daemon->rstate->prune_timeout / 2;
|
|
|
|
struct node *n;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Schedule next run now (prune_timeout is 2 weeks) */
|
2018-11-21 23:39:31 +01:00
|
|
|
notleak(new_reltimer(&daemon->timers, daemon,
|
|
|
|
time_from_sec(daemon->rstate->prune_timeout/4),
|
|
|
|
gossip_refresh_network, daemon));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
/* Find myself in the network */
|
|
|
|
n = get_node(daemon->rstate, &daemon->id);
|
|
|
|
if (n) {
|
|
|
|
/* Iterate through all outgoing connection and check whether
|
|
|
|
* it's time to re-announce */
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(n, &i); c; c = next_chan(n, &i)) {
|
2019-04-08 01:51:30 +02:00
|
|
|
struct half_chan *hc = half_chan_from(n, c);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
if (!is_halfchan_defined(hc)) {
|
|
|
|
/* Connection is not announced yet, so don't even
|
|
|
|
* try to re-announce it */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
if (hc->bcast.timestamp > highwater) {
|
2018-11-05 02:16:48 +01:00
|
|
|
/* No need to send a keepalive update message */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_halfchan_enabled(hc)) {
|
|
|
|
/* Only send keepalives for active connections */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-04-08 01:51:30 +02:00
|
|
|
gossip_send_keepalive_update(daemon, c, hc);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Now we've refreshed our channels, we can prune without clobbering
|
|
|
|
* them */
|
2018-11-05 02:16:48 +01:00
|
|
|
route_prune(daemon->rstate);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Disables all channels connected to our node. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_disable_local_channels(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
struct node *local_node = get_node(daemon->rstate, &daemon->id);
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
/* We don't have a local_node, so we don't have any channels yet
|
|
|
|
* either */
|
|
|
|
if (!local_node)
|
|
|
|
return;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(local_node, &i); c; c = next_chan(local_node, &i))
|
2019-04-11 07:15:22 +02:00
|
|
|
local_disable_chan(daemon->rstate, c);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
/* Mutual recursion, so we pre-declare this. */
|
|
|
|
static void gossip_not_missing(struct daemon *daemon);
|
|
|
|
|
2019-06-12 01:29:12 +02:00
|
|
|
/* Pick a random peer which is not already GOSSIP_HIGH. */
|
|
|
|
static struct peer *random_peer_to_gossip(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
u64 target = UINT64_MAX;
|
|
|
|
struct peer *best = NULL, *i;
|
|
|
|
|
|
|
|
/* Reservoir sampling */
|
|
|
|
list_for_each(&daemon->peers, i, list) {
|
|
|
|
u64 r = pseudorand_u64();
|
|
|
|
if (i->gossip_level != GOSSIP_HIGH && r <= target) {
|
|
|
|
best = i;
|
|
|
|
target = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
2019-06-12 01:25:07 +02:00
|
|
|
/*~ We've found gossip is missing. */
|
|
|
|
static void gossip_missing(struct daemon *daemon)
|
|
|
|
{
|
2019-06-12 01:29:12 +02:00
|
|
|
if (!daemon->gossip_missing) {
|
2019-06-12 01:28:40 +02:00
|
|
|
status_info("We seem to be missing gossip messages");
|
2019-06-12 01:29:12 +02:00
|
|
|
/* FIXME: we could use query_channel_range. */
|
|
|
|
/* Make some peers gossip harder. */
|
|
|
|
for (size_t i = 0; i < gossip_level_targets[GOSSIP_HIGH]; i++) {
|
|
|
|
struct peer *peer = random_peer_to_gossip(daemon);
|
|
|
|
|
|
|
|
if (!peer)
|
|
|
|
break;
|
|
|
|
|
|
|
|
status_info("%s: gossip harder!",
|
|
|
|
type_to_string(tmpctx, struct node_id,
|
|
|
|
&peer->id));
|
|
|
|
peer->gossip_level = GOSSIP_HIGH;
|
|
|
|
setup_gossip_range(peer);
|
|
|
|
}
|
|
|
|
}
|
2019-06-12 01:28:40 +02:00
|
|
|
|
|
|
|
tal_free(daemon->gossip_missing);
|
|
|
|
/* Check again in 10 minutes. */
|
|
|
|
daemon->gossip_missing = new_reltimer(&daemon->timers, daemon,
|
|
|
|
time_from_sec(600),
|
|
|
|
gossip_not_missing, daemon);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*~ This is a timer, which goes off 10 minutes after the last time we noticed
|
|
|
|
* that gossip was missing. */
|
|
|
|
static void gossip_not_missing(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
/* Corner case: no peers, try again! */
|
|
|
|
if (list_empty(&daemon->peers))
|
|
|
|
gossip_missing(daemon);
|
|
|
|
else {
|
2019-06-12 01:29:12 +02:00
|
|
|
struct peer *peer;
|
|
|
|
|
2019-06-12 01:28:40 +02:00
|
|
|
daemon->gossip_missing = tal_free(daemon->gossip_missing);
|
|
|
|
status_info("We seem to be caught up on gossip messages");
|
|
|
|
/* Free any lagging/stale unknown scids. */
|
|
|
|
daemon->unknown_scids = tal_free(daemon->unknown_scids);
|
2019-06-12 01:29:12 +02:00
|
|
|
|
|
|
|
/* Reset peers we marked as HIGH */
|
|
|
|
list_for_each(&daemon->peers, peer, list) {
|
|
|
|
if (peer->gossip_level != GOSSIP_HIGH)
|
|
|
|
continue;
|
|
|
|
if (!peer->gossip_queries_feature)
|
|
|
|
continue;
|
|
|
|
peer->gossip_level = peer_gossip_level(daemon, true);
|
|
|
|
setup_gossip_range(peer);
|
|
|
|
}
|
2019-06-12 01:28:40 +02:00
|
|
|
}
|
2019-06-12 01:25:07 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Parse init message from lightningd: starts the daemon properly. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *gossip_init(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
u32 update_channel_interval;
|
2019-04-08 01:51:30 +02:00
|
|
|
u32 *dev_gossip_time;
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
if (!fromwire_gossipctl_init(daemon, msg,
|
2018-11-21 04:10:03 +01:00
|
|
|
&daemon->chain_hash,
|
2018-11-21 01:36:08 +01:00
|
|
|
&daemon->id, &daemon->globalfeatures,
|
|
|
|
daemon->rgb,
|
|
|
|
daemon->alias,
|
|
|
|
/* 1 week in seconds
|
|
|
|
* (unless --dev-channel-update-interval) */
|
|
|
|
&update_channel_interval,
|
2019-04-08 01:51:30 +02:00
|
|
|
&daemon->announcable,
|
2019-05-16 21:56:17 +02:00
|
|
|
&dev_gossip_time)) {
|
2018-11-05 02:16:48 +01:00
|
|
|
master_badmsg(WIRE_GOSSIPCTL_INIT, msg);
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Prune time (usually 2 weeks) is twice update time */
|
2018-11-21 04:10:03 +01:00
|
|
|
daemon->rstate = new_routing_state(daemon,
|
|
|
|
chainparams_by_chainhash(&daemon->chain_hash),
|
|
|
|
&daemon->id,
|
2019-04-08 01:51:30 +02:00
|
|
|
update_channel_interval * 2,
|
2019-04-11 07:16:57 +02:00
|
|
|
&daemon->peers,
|
2019-05-16 21:56:17 +02:00
|
|
|
dev_gossip_time);
|
2019-04-08 01:52:06 +02:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
/* Load stored gossip messages */
|
2019-06-12 01:25:07 +02:00
|
|
|
if (!gossip_store_load(daemon->rstate, daemon->rstate->gs))
|
|
|
|
gossip_missing(daemon);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
/* Now disable all local channels, they can't be connected yet. */
|
|
|
|
gossip_disable_local_channels(daemon);
|
|
|
|
|
|
|
|
/* If that announced channels, we can announce ourselves (options
|
|
|
|
* or addresses might have changed!) */
|
|
|
|
maybe_send_own_node_announce(daemon);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Start the weekly refresh timer. */
|
2018-11-21 23:39:31 +01:00
|
|
|
notleak(new_reltimer(&daemon->timers, daemon,
|
|
|
|
time_from_sec(daemon->rstate->prune_timeout/4),
|
|
|
|
gossip_refresh_network, daemon));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ lightningd can ask for a route between nodes. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *getroute_req(struct io_conn *conn, struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-05-31 09:30:33 +02:00
|
|
|
struct node_id *source, destination;
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat msat;
|
2018-11-05 02:16:48 +01:00
|
|
|
u32 final_cltv;
|
2019-02-01 06:53:38 +01:00
|
|
|
u64 riskfactor_by_million;
|
2019-01-15 05:06:27 +01:00
|
|
|
u32 max_hops;
|
2018-11-05 02:16:48 +01:00
|
|
|
u8 *out;
|
|
|
|
struct route_hop *hops;
|
|
|
|
double fuzz;
|
2019-01-15 05:11:27 +01:00
|
|
|
struct short_channel_id_dir *excluded;
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* To choose between variations, we need to know how much we're
|
|
|
|
* sending (eliminates too-small channels, and also effects the fees
|
|
|
|
* we'll pay), how to trade off more locktime vs. more fees, and how
|
|
|
|
* much cltv we need a the final node to give exact values for each
|
|
|
|
* intermediate hop, as well as how much random fuzz to inject to
|
2019-05-31 09:30:33 +02:00
|
|
|
* avoid being too predictable.
|
|
|
|
*
|
|
|
|
* We also treat routing slightly differently if we're asking
|
|
|
|
* for a route from ourselves (the usual case): in that case,
|
|
|
|
* we don't have to consider fees on our own outgoing channels.
|
|
|
|
*/
|
2019-01-15 05:04:27 +01:00
|
|
|
if (!fromwire_gossip_getroute_request(msg, msg,
|
2018-11-05 02:16:48 +01:00
|
|
|
&source, &destination,
|
2019-02-21 04:45:55 +01:00
|
|
|
&msat, &riskfactor_by_million,
|
2019-01-15 05:04:27 +01:00
|
|
|
&final_cltv, &fuzz,
|
2019-01-15 05:11:27 +01:00
|
|
|
&excluded,
|
2019-01-15 05:06:27 +01:00
|
|
|
&max_hops))
|
2018-11-05 02:16:48 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_GETROUTE_REQUEST, msg);
|
|
|
|
|
2019-02-21 04:45:55 +01:00
|
|
|
status_trace("Trying to find a route from %s to %s for %s",
|
2019-05-31 09:30:33 +02:00
|
|
|
source
|
|
|
|
? type_to_string(tmpctx, struct node_id, source) : "(me)",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &destination),
|
2019-02-21 04:45:55 +01:00
|
|
|
type_to_string(tmpctx, struct amount_msat, &msat));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* routing.c does all the hard work; can return NULL. */
|
2019-05-31 09:30:33 +02:00
|
|
|
hops = get_route(tmpctx, daemon->rstate, source, &destination,
|
2019-02-21 04:45:55 +01:00
|
|
|
msat, riskfactor_by_million / 1000000.0, final_cltv,
|
2019-02-01 03:36:18 +01:00
|
|
|
fuzz, pseudorand_u64(), excluded, max_hops);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2018-11-13 05:03:52 +01:00
|
|
|
out = towire_gossip_getroute_reply(NULL, hops);
|
|
|
|
daemon_conn_send(daemon->master, take(out));
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-15 13:46:29 +01:00
|
|
|
}
|
2017-03-15 11:36:52 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When someone asks lightningd to `listchannels`, gossipd does the work:
|
|
|
|
* marshalling the channel information for all channels into an array of
|
|
|
|
* gossip_getchannels_entry, which lightningd converts to JSON. Each channel
|
|
|
|
* is represented by two half_chan; one in each direction.
|
|
|
|
*/
|
2019-04-08 11:58:44 +02:00
|
|
|
static struct gossip_halfchannel_entry *hc_entry(const tal_t *ctx,
|
|
|
|
const struct chan *chan,
|
|
|
|
int idx)
|
2018-03-02 03:27:30 +01:00
|
|
|
{
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Our 'struct chan' contains two nodes: they are in pubkey_cmp order
|
|
|
|
* (ie. chan->nodes[0] is the lesser pubkey) and this is the same as
|
|
|
|
* the direction bit in `channel_update`s `channel_flags`.
|
|
|
|
*
|
|
|
|
* The halfchans are arranged so that half[0] src == nodes[0], and we
|
2019-04-08 11:58:44 +02:00
|
|
|
* use that here. */
|
|
|
|
const struct half_chan *c = &chan->half[idx];
|
|
|
|
struct gossip_halfchannel_entry *e;
|
|
|
|
|
|
|
|
/* If we've never seen a channel_update for this direction... */
|
|
|
|
if (!is_halfchan_defined(c))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
e = tal(ctx, struct gossip_halfchannel_entry);
|
|
|
|
e->channel_flags = c->channel_flags;
|
|
|
|
e->message_flags = c->message_flags;
|
2019-04-10 09:31:29 +02:00
|
|
|
e->last_update_timestamp = c->bcast.timestamp;
|
2019-04-08 11:58:44 +02:00
|
|
|
e->base_fee_msat = c->base_fee;
|
|
|
|
e->fee_per_millionth = c->proportional_fee;
|
|
|
|
e->delay = c->delay;
|
2019-05-31 09:28:06 +02:00
|
|
|
e->min = c->htlc_minimum;
|
|
|
|
e->max = c->htlc_maximum;
|
2019-04-08 11:58:44 +02:00
|
|
|
|
|
|
|
return e;
|
2018-03-02 03:27:30 +01:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:44 +02:00
|
|
|
/*~ Marshal (possibly) both channel directions into entries. */
|
2019-04-11 07:15:22 +02:00
|
|
|
static void append_channel(struct routing_state *rstate,
|
|
|
|
const struct gossip_getchannels_entry ***entries,
|
2019-04-08 11:58:44 +02:00
|
|
|
const struct chan *chan,
|
|
|
|
const struct node_id *srcfilter)
|
2018-03-02 09:59:16 +01:00
|
|
|
{
|
2019-04-08 11:58:44 +02:00
|
|
|
struct gossip_getchannels_entry *e = tal(*entries, struct gossip_getchannels_entry);
|
|
|
|
|
|
|
|
e->node[0] = chan->nodes[0]->id;
|
|
|
|
e->node[1] = chan->nodes[1]->id;
|
|
|
|
e->sat = chan->sat;
|
2019-04-11 07:15:22 +02:00
|
|
|
e->local_disabled = is_chan_local_disabled(rstate, chan);
|
2019-04-08 11:58:44 +02:00
|
|
|
e->public = is_chan_public(chan);
|
|
|
|
e->short_channel_id = chan->scid;
|
|
|
|
if (!srcfilter || node_id_eq(&e->node[0], srcfilter))
|
|
|
|
e->e[0] = hc_entry(*entries, chan, 0);
|
|
|
|
else
|
|
|
|
e->e[0] = NULL;
|
|
|
|
if (!srcfilter || node_id_eq(&e->node[1], srcfilter))
|
|
|
|
e->e[1] = hc_entry(*entries, chan, 1);
|
|
|
|
else
|
|
|
|
e->e[1] = NULL;
|
|
|
|
|
|
|
|
/* We choose not to tell lightningd about channels with no updates,
|
|
|
|
* as they're unusable and can't be represented in the listchannels
|
|
|
|
* JSON output we use anyway. */
|
|
|
|
if (e->e[0] || e->e[1])
|
|
|
|
tal_arr_expand(entries, e);
|
2018-03-02 09:59:16 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where lightningd asks for all channels we know about. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static struct io_plan *getchannels_req(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-22 13:30:09 +01:00
|
|
|
{
|
|
|
|
u8 *out;
|
2019-04-08 11:58:44 +02:00
|
|
|
const struct gossip_getchannels_entry **entries;
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *chan;
|
2019-05-21 09:13:26 +02:00
|
|
|
struct short_channel_id *scid, *prev;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id *source;
|
2019-05-21 09:13:26 +02:00
|
|
|
bool complete = true;
|
2018-01-16 20:44:32 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note: scid is marked optional in gossip_wire.csv */
|
2019-05-21 09:13:26 +02:00
|
|
|
if (!fromwire_gossip_getchannels_request(msg, msg, &scid, &source,
|
|
|
|
&prev))
|
2018-09-29 08:33:51 +02:00
|
|
|
master_badmsg(WIRE_GOSSIP_GETCHANNELS_REQUEST, msg);
|
2017-03-22 13:30:09 +01:00
|
|
|
|
2019-04-08 11:58:44 +02:00
|
|
|
entries = tal_arr(tmpctx, const struct gossip_getchannels_entry *, 0);
|
2018-11-21 01:36:08 +01:00
|
|
|
/* They can ask about a particular channel by short_channel_id */
|
2018-03-02 09:59:16 +01:00
|
|
|
if (scid) {
|
|
|
|
chan = get_channel(daemon->rstate, scid);
|
|
|
|
if (chan)
|
2019-04-11 07:15:22 +02:00
|
|
|
append_channel(daemon->rstate, &entries, chan, NULL);
|
2019-01-15 05:09:27 +01:00
|
|
|
} else if (source) {
|
|
|
|
struct node *s = get_node(daemon->rstate, source);
|
|
|
|
if (s) {
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(s, &i); c; c = next_chan(s, &i)) {
|
2019-04-11 07:15:22 +02:00
|
|
|
append_channel(daemon->rstate,
|
|
|
|
&entries, c, source);
|
2019-04-08 01:51:30 +02:00
|
|
|
}
|
2019-01-15 05:09:27 +01:00
|
|
|
}
|
2018-03-02 09:59:16 +01:00
|
|
|
} else {
|
|
|
|
u64 idx;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* For the more general case, we just iterate through every
|
2019-05-21 09:13:26 +02:00
|
|
|
* short channel id, starting with previous if any (there is
|
|
|
|
* no scid 0). */
|
|
|
|
idx = prev ? prev->u64 : 0;
|
|
|
|
while ((chan = uintmap_after(&daemon->rstate->chanmap, &idx))) {
|
2019-04-11 07:15:22 +02:00
|
|
|
append_channel(daemon->rstate, &entries, chan, NULL);
|
2019-05-21 09:13:26 +02:00
|
|
|
/* Limit how many we do at once. */
|
|
|
|
if (tal_count(entries) == 4096) {
|
|
|
|
complete = false;
|
|
|
|
break;
|
|
|
|
}
|
2017-03-22 13:30:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-21 09:13:26 +02:00
|
|
|
out = towire_gossip_getchannels_reply(NULL, complete, entries);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master, take(out));
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-22 13:30:09 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Similarly, lightningd asks us for all nodes when it gets `listnodes` */
|
|
|
|
/* We keep pointers into n, assuming it won't change. */
|
2019-05-03 04:15:33 +02:00
|
|
|
static void add_node_entry(const tal_t *ctx,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const struct node *n,
|
|
|
|
struct gossip_getnodes_entry *e)
|
2018-01-16 20:44:32 +01:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
e->nodeid = n->id;
|
2019-05-03 04:15:33 +02:00
|
|
|
if (get_node_announcement(ctx, daemon, n,
|
2019-04-11 07:15:22 +02:00
|
|
|
e->color, e->alias,
|
|
|
|
&e->globalfeatures,
|
|
|
|
&e->addresses)) {
|
2019-04-10 09:31:29 +02:00
|
|
|
e->last_timestamp = n->bcast.timestamp;
|
2019-04-11 07:15:22 +02:00
|
|
|
} else {
|
|
|
|
/* Timestamp on wire is an unsigned 32 bit: we use a 64-bit
|
|
|
|
* signed, so -1 means "we never received a
|
|
|
|
* channel_update". */
|
|
|
|
e->last_timestamp = -1;
|
2019-01-15 04:51:27 +01:00
|
|
|
}
|
2018-01-16 20:44:32 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Simply routine when they ask for `listnodes` */
|
2018-01-16 20:44:32 +01:00
|
|
|
static struct io_plan *getnodes(struct io_conn *conn, struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-12 13:39:23 +01:00
|
|
|
{
|
2017-03-16 05:05:26 +01:00
|
|
|
u8 *out;
|
2017-03-12 13:39:23 +01:00
|
|
|
struct node *n;
|
2018-02-08 02:24:46 +01:00
|
|
|
const struct gossip_getnodes_entry **nodes;
|
2019-05-03 04:15:33 +02:00
|
|
|
struct gossip_getnodes_entry *node_arr;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id *id;
|
2017-03-12 13:39:23 +01:00
|
|
|
|
2018-09-29 08:33:51 +02:00
|
|
|
if (!fromwire_gossip_getnodes_request(tmpctx, msg, &id))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GETNODES_REQUEST, msg);
|
2018-01-16 20:44:32 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Format of reply is the same whether they ask for a specific node
|
|
|
|
* (0 or one responses) or all nodes (0 or more) */
|
2018-07-09 13:17:49 +02:00
|
|
|
if (id) {
|
|
|
|
n = get_node(daemon->rstate, id);
|
2019-05-03 04:15:33 +02:00
|
|
|
if (n) {
|
|
|
|
node_arr = tal_arr(tmpctx,
|
|
|
|
struct gossip_getnodes_entry,
|
|
|
|
1);
|
|
|
|
add_node_entry(node_arr, daemon, n, &node_arr[0]);
|
2019-05-15 22:01:55 +02:00
|
|
|
} else {
|
2019-05-03 04:15:33 +02:00
|
|
|
nodes = NULL;
|
2019-05-15 22:01:55 +02:00
|
|
|
node_arr = NULL;
|
|
|
|
}
|
2018-01-16 20:44:32 +01:00
|
|
|
} else {
|
2019-05-03 04:15:33 +02:00
|
|
|
struct node_map_iter it;
|
|
|
|
size_t i = 0;
|
|
|
|
node_arr = tal_arr(tmpctx, struct gossip_getnodes_entry,
|
|
|
|
daemon->rstate->nodes->raw.elems);
|
|
|
|
n = node_map_first(daemon->rstate->nodes, &it);
|
2018-01-16 20:44:32 +01:00
|
|
|
while (n != NULL) {
|
2019-05-03 04:15:33 +02:00
|
|
|
add_node_entry(node_arr, daemon, n, &node_arr[i++]);
|
|
|
|
n = node_map_next(daemon->rstate->nodes, &it);
|
2018-01-16 20:44:32 +01:00
|
|
|
}
|
2019-05-03 04:15:33 +02:00
|
|
|
assert(i == daemon->rstate->nodes->raw.elems);
|
2017-03-12 13:39:23 +01:00
|
|
|
}
|
2019-05-03 04:15:33 +02:00
|
|
|
|
|
|
|
/* FIXME: towire wants array of pointers. */
|
|
|
|
nodes = tal_arr(node_arr, const struct gossip_getnodes_entry *,
|
|
|
|
tal_count(node_arr));
|
|
|
|
for (size_t i = 0; i < tal_count(node_arr); i++)
|
|
|
|
nodes[i] = &node_arr[i];
|
2018-03-15 07:10:22 +01:00
|
|
|
out = towire_gossip_getnodes_reply(NULL, nodes);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master, take(out));
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-12 13:39:23 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We currently have a JSON command to ping a peer: it ends up here, where
|
|
|
|
* gossipd generates the actual ping and sends it like any other gossip. */
|
2017-04-12 20:20:48 +02:00
|
|
|
static struct io_plan *ping_req(struct io_conn *conn, struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2017-04-12 20:20:48 +02:00
|
|
|
u16 num_pong_bytes, len;
|
|
|
|
struct peer *peer;
|
|
|
|
u8 *ping;
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_gossip_ping(msg, &id, &num_pong_bytes, &len))
|
2017-09-12 06:55:52 +02:00
|
|
|
master_badmsg(WIRE_GOSSIP_PING, msg);
|
2017-04-12 20:20:48 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Even if lightningd were to check for valid ids, there's a race
|
|
|
|
* where it might vanish before we read this command; cleaner to
|
|
|
|
* handle it here with 'sent' = false. */
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-09-11 21:57:11 +02:00
|
|
|
take(towire_gossip_ping_reply(NULL, &id,
|
|
|
|
false, 0)));
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2017-04-12 20:20:48 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* It should never ask for an oversize ping. */
|
2017-04-12 20:20:48 +02:00
|
|
|
ping = make_ping(peer, num_pong_bytes, len);
|
2018-07-28 08:00:16 +02:00
|
|
|
if (tal_count(ping) > 65535)
|
2017-09-12 06:55:52 +02:00
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "Oversize ping");
|
2017-04-12 20:20:48 +02:00
|
|
|
|
2018-07-24 02:26:43 +02:00
|
|
|
queue_peer_msg(peer, take(ping));
|
2017-04-12 20:20:48 +02:00
|
|
|
status_trace("sending ping expecting %sresponse",
|
|
|
|
num_pong_bytes >= 65532 ? "no " : "");
|
|
|
|
|
|
|
|
/* BOLT #1:
|
|
|
|
*
|
2018-06-17 12:13:44 +02:00
|
|
|
* A node receiving a `ping` message:
|
|
|
|
*...
|
|
|
|
* - if `num_pong_bytes` is less than 65532:
|
|
|
|
* - MUST respond by sending a `pong` message, with `byteslen` equal
|
|
|
|
* to `num_pong_bytes`.
|
|
|
|
* - otherwise (`num_pong_bytes` is **not** less than 65532):
|
|
|
|
* - MUST ignore the `ping`.
|
2017-04-12 20:20:48 +02:00
|
|
|
*/
|
|
|
|
if (num_pong_bytes >= 65532)
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-09-11 21:57:11 +02:00
|
|
|
take(towire_gossip_ping_reply(NULL, &id,
|
|
|
|
true, 0)));
|
2017-04-12 20:20:48 +02:00
|
|
|
else
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We'll respond to lightningd once the pong comes in */
|
2018-07-24 02:26:43 +02:00
|
|
|
peer->num_pings_outstanding++;
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-04-12 20:20:48 +02:00
|
|
|
}
|
|
|
|
|
2019-01-15 04:57:27 +01:00
|
|
|
/*~ If a node has no public channels (other than the one to us), it's not
|
|
|
|
* a very useful route to tell anyone about. */
|
|
|
|
static bool node_has_public_channels(const struct node *peer,
|
|
|
|
const struct chan *exclude)
|
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(peer, &i); c; c = next_chan(peer, &i)) {
|
2019-04-08 01:51:30 +02:00
|
|
|
if (c == exclude)
|
2019-01-15 04:57:27 +01:00
|
|
|
continue;
|
2019-04-08 01:51:30 +02:00
|
|
|
if (is_chan_public(c))
|
2019-01-15 04:57:27 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:58:27 +01:00
|
|
|
/*~ The `exposeprivate` flag is a trinary: NULL == dynamic, otherwise
|
|
|
|
* value decides. Thus, we provide two wrappers for clarity: */
|
|
|
|
static bool never_expose(bool *exposeprivate)
|
|
|
|
{
|
|
|
|
return exposeprivate && !*exposeprivate;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool always_expose(bool *exposeprivate)
|
|
|
|
{
|
|
|
|
return exposeprivate && *exposeprivate;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ For routeboost, we offer payers a hint of what incoming channels might
|
|
|
|
* have capacity for their payment. To do this, lightningd asks for the
|
|
|
|
* information about all channels to this node; but gossipd doesn't know about
|
|
|
|
* current capacities, so lightningd selects which to use. */
|
2018-09-27 07:29:17 +02:00
|
|
|
static struct io_plan *get_incoming_channels(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct node *node;
|
2019-01-15 04:56:27 +01:00
|
|
|
struct route_info *public = tal_arr(tmpctx, struct route_info, 0);
|
|
|
|
struct route_info *private = tal_arr(tmpctx, struct route_info, 0);
|
2019-01-15 04:58:27 +01:00
|
|
|
bool has_public;
|
|
|
|
bool *exposeprivate;
|
2018-09-27 07:29:17 +02:00
|
|
|
|
2019-01-15 04:58:27 +01:00
|
|
|
if (!fromwire_gossip_get_incoming_channels(tmpctx, msg, &exposeprivate))
|
2018-09-27 07:29:17 +02:00
|
|
|
master_badmsg(WIRE_GOSSIP_GET_INCOMING_CHANNELS, msg);
|
|
|
|
|
2019-01-15 04:58:27 +01:00
|
|
|
status_trace("exposeprivate = %s",
|
|
|
|
exposeprivate ? (*exposeprivate ? "TRUE" : "FALSE") : "NULL");
|
|
|
|
status_trace("msg = %s", tal_hex(tmpctx, msg));
|
|
|
|
status_trace("always_expose = %u, never_expose = %u",
|
|
|
|
always_expose(exposeprivate), never_expose(exposeprivate));
|
|
|
|
|
|
|
|
has_public = always_expose(exposeprivate);
|
|
|
|
|
2018-09-27 07:29:17 +02:00
|
|
|
node = get_node(daemon->rstate, &daemon->rstate->local_id);
|
|
|
|
if (node) {
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(node, &i); c; c = next_chan(node, &i)) {
|
2018-09-27 07:29:17 +02:00
|
|
|
const struct half_chan *hc;
|
2019-01-15 04:51:27 +01:00
|
|
|
struct route_info ri;
|
2018-09-27 07:29:17 +02:00
|
|
|
|
|
|
|
hc = &c->half[half_chan_to(node, c)];
|
|
|
|
|
|
|
|
if (!is_halfchan_enabled(hc))
|
|
|
|
continue;
|
|
|
|
|
2019-01-15 04:51:27 +01:00
|
|
|
ri.pubkey = other_node(node, c)->id;
|
|
|
|
ri.short_channel_id = c->scid;
|
|
|
|
ri.fee_base_msat = hc->base_fee;
|
|
|
|
ri.fee_proportional_millionths = hc->proportional_fee;
|
|
|
|
ri.cltv_expiry_delta = hc->delay;
|
2019-01-15 04:56:27 +01:00
|
|
|
|
2019-01-15 04:57:27 +01:00
|
|
|
has_public |= is_chan_public(c);
|
|
|
|
|
|
|
|
/* If peer doesn't have other public channels,
|
|
|
|
* no point giving route */
|
|
|
|
if (!node_has_public_channels(other_node(node, c), c))
|
|
|
|
continue;
|
|
|
|
|
2019-01-15 04:58:27 +01:00
|
|
|
if (always_expose(exposeprivate) || is_chan_public(c))
|
2019-01-15 04:56:27 +01:00
|
|
|
tal_arr_expand(&public, ri);
|
|
|
|
else
|
|
|
|
tal_arr_expand(&private, ri);
|
2018-09-27 07:29:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-15 04:57:27 +01:00
|
|
|
/* If no public channels (even deadend ones!), share private ones. */
|
2019-01-15 04:58:27 +01:00
|
|
|
if (!has_public && !never_expose(exposeprivate))
|
2019-01-15 04:56:27 +01:00
|
|
|
msg = towire_gossip_get_incoming_channels_reply(NULL, private);
|
|
|
|
else
|
|
|
|
msg = towire_gossip_get_incoming_channels_reply(NULL, public);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master, take(msg));
|
2018-09-27 07:29:17 +02:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-09-27 07:29:17 +02:00
|
|
|
}
|
|
|
|
|
2018-08-14 09:13:00 +02:00
|
|
|
#if DEVELOPER
|
2018-06-04 06:22:25 +02:00
|
|
|
static struct io_plan *query_scids_req(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2018-06-04 06:22:25 +02:00
|
|
|
struct short_channel_id *scids;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_query_scids(msg, msg, &id, &scids))
|
|
|
|
master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
|
|
|
status_broken("query_scids: unknown peer %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
2019-06-12 01:26:07 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossip_scids_reply(NULL,
|
|
|
|
false, false)));
|
|
|
|
} else if (!query_short_channel_ids(daemon, peer, scids, false))
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossip_scids_reply(NULL,
|
|
|
|
false, false)));
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:22:25 +02:00
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* ### The `gossip_timestamp_filter` Message
|
|
|
|
*...
|
|
|
|
* This message allows a node to constrain future gossip messages to
|
|
|
|
* a specific range. A node which wants any gossip messages would have
|
|
|
|
* to send this, otherwise `gossip_queries` negotiation means no gossip
|
|
|
|
* messages would be received.
|
|
|
|
*
|
|
|
|
* Note that this filter replaces any previous one, so it can be used
|
|
|
|
* multiple times to change the gossip from a peer. */
|
|
|
|
/* This is the entry point for dev_send_timestamp_filter testing. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static struct io_plan *send_timestamp_filter(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2018-06-04 06:28:02 +02:00
|
|
|
u32 first, range;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_send_timestamp_filter(msg, &id, &first, &range))
|
|
|
|
master_badmsg(WIRE_GOSSIP_SEND_TIMESTAMP_FILTER, msg);
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
|
|
|
status_broken("send_timestamp_filter: unknown peer %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
2018-06-04 06:28:02 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
if (!peer->gossip_queries_feature) {
|
2018-06-04 06:28:02 +02:00
|
|
|
status_broken("send_timestamp_filter: no gossip_query support in peer %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
2018-06-04 06:28:02 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
msg = towire_gossip_timestamp_filter(NULL, &daemon->chain_hash,
|
2018-06-04 06:28:02 +02:00
|
|
|
first, range);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: One day this will be called internally; for now it's just for
|
|
|
|
* testing with dev_query_channel_range. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static struct io_plan *query_channel_range(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2018-06-04 06:28:02 +02:00
|
|
|
u32 first_blocknum, number_of_blocks;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_query_channel_range(msg, &id, &first_blocknum,
|
|
|
|
&number_of_blocks))
|
|
|
|
master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
|
|
|
status_broken("query_channel_range: unknown peer %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
2018-06-04 06:28:02 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
if (!peer->gossip_queries_feature) {
|
2018-06-04 06:28:02 +02:00
|
|
|
status_broken("query_channel_range: no gossip_query support in peer %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
2018-06-04 06:28:02 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (peer->query_channel_blocks) {
|
|
|
|
status_broken("query_channel_range: previous query active");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
gossipd: handle overflowing query properly (avoid slow 100% CPU reports)
Don't do this:
(gdb) bt
#0 0x00007f37ae667c40 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#1 0x00007f37ae668b38 in ?? () from /lib/x86_64-linux-gnu/libz.so.1
#2 0x00007f37ae669907 in deflate () from /lib/x86_64-linux-gnu/libz.so.1
#3 0x00007f37ae674c65 in compress2 () from /lib/x86_64-linux-gnu/libz.so.1
#4 0x000000000040cfe3 in zencode_scids (ctx=0xc1f118, scids=0x2599bc49 "\a\325{", len=176320) at gossipd/gossipd.c:218
#5 0x000000000040d0b3 in encode_short_channel_ids_end (encoded=0x7fff8f98d9f0, max_bytes=65490) at gossipd/gossipd.c:236
#6 0x000000000040dd28 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=8) at gossipd/gossipd.c:576
#7 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290511, number_of_blocks=16) at gossipd/gossipd.c:595
#8 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=32) at gossipd/gossipd.c:596
#9 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290495, number_of_blocks=64) at gossipd/gossipd.c:595
#10 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=128) at gossipd/gossipd.c:596
#11 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=256) at gossipd/gossipd.c:595
#12 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=512) at gossipd/gossipd.c:595
#13 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17290431, number_of_blocks=1024) at gossipd/gossipd.c:595
#14 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2047) at gossipd/gossipd.c:596
#15 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4095) at gossipd/gossipd.c:595
#16 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8191) at gossipd/gossipd.c:595
#17 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16382) at gossipd/gossipd.c:595
#18 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=32764) at gossipd/gossipd.c:595
#19 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=65528) at gossipd/gossipd.c:595
#20 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=131056) at gossipd/gossipd.c:595
#21 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=262112) at gossipd/gossipd.c:595
#22 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=524225) at gossipd/gossipd.c:595
#23 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=1048450) at gossipd/gossipd.c:595
#24 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=2096900) at gossipd/gossipd.c:595
#25 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=4193801) at gossipd/gossipd.c:595
#26 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=8387603) at gossipd/gossipd.c:595
#27 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=17289408, number_of_blocks=16775207) at gossipd/gossipd.c:595
#28 0x000000000040ddee in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=33550414) at gossipd/gossipd.c:596
#29 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=67100829) at gossipd/gossipd.c:595
#30 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=134201659) at gossipd/gossipd.c:595
#31 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=268403318) at gossipd/gossipd.c:595
#32 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=536806636) at gossipd/gossipd.c:595
#33 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=1073613273) at gossipd/gossipd.c:595
#34 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=2147226547) at gossipd/gossipd.c:595
#35 0x000000000040ddc6 in queue_channel_ranges (peer=0x3868fc8, first_blocknum=514201, number_of_blocks=4294453094) at gossipd/gossipd.c:595
#36 0x000000000040df26 in handle_query_channel_range (peer=0x3868fc8, msg=0x37e0678 "\001\ao\342\214\n\266\361\263r\301\246\242F\256c\367O\223\036\203e\341Z\b\234h\326\031") at gossipd/gossipd.c:625
The cause was that converting a block number to an scid truncates it
at 24 bits. When we look through the index from (truncated number) to
(real end number) we get every channel, which is too large to encode,
so we iterate again.
This fixes both that problem, and also the issue that we'd end up
dividing into many empty sections until we get to the highest block
number. Instead, we just tack the empty blocks on to then end of the
final query.
(My initial version requested 0xFFFFFFFE blocks, but the dev code
which records what blocks were returned can't make a bitmap that big
on 32 bit).
Reported-by: George Vaccaro
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-01-15 10:37:57 +01:00
|
|
|
/* Check for overflow on 32-bit machines! */
|
|
|
|
if (BITMAP_NWORDS(number_of_blocks) < number_of_blocks / BITMAP_WORD_BITS) {
|
|
|
|
status_broken("query_channel_range: huge number_of_blocks (%u) not supported",
|
|
|
|
number_of_blocks);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
status_debug("sending query_channel_range for blocks %u+%u",
|
|
|
|
first_blocknum, number_of_blocks);
|
2019-08-09 18:05:01 +02:00
|
|
|
|
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
msg = towire_query_channel_range(NULL, &daemon->chain_hash,
|
|
|
|
first_blocknum, number_of_blocks,
|
|
|
|
NULL);
|
|
|
|
#else
|
2018-11-21 04:10:03 +01:00
|
|
|
msg = towire_query_channel_range(NULL, &daemon->chain_hash,
|
2018-06-04 06:28:02 +02:00
|
|
|
first_blocknum, number_of_blocks);
|
2019-08-09 18:05:01 +02:00
|
|
|
#endif
|
2018-06-04 06:28:02 +02:00
|
|
|
queue_peer_msg(peer, take(msg));
|
2018-11-13 05:03:49 +01:00
|
|
|
peer->range_first_blocknum = first_blocknum;
|
|
|
|
peer->range_end_blocknum = first_blocknum + number_of_blocks;
|
|
|
|
peer->range_blocks_remaining = number_of_blocks;
|
|
|
|
peer->query_channel_blocks = tal_arrz(peer, bitmap,
|
|
|
|
BITMAP_NWORDS(number_of_blocks));
|
2018-06-04 06:28:02 +02:00
|
|
|
peer->query_channel_scids = tal_arr(peer, struct short_channel_id, 0);
|
|
|
|
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
fail:
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-06-04 06:28:02 +02:00
|
|
|
take(towire_gossip_query_channel_range_reply(NULL,
|
|
|
|
0, 0,
|
|
|
|
false,
|
|
|
|
NULL)));
|
|
|
|
goto out;
|
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is a testing hack to allow us to artificially lower the maximum bytes
|
|
|
|
* of short_channel_ids we'll encode, using dev_set_max_scids_encode_size. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static struct io_plan *dev_set_max_scids_encode_size(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
if (!fromwire_gossip_dev_set_max_scids_encode_size(msg,
|
2019-08-09 18:08:01 +02:00
|
|
|
&max_encoding_bytes))
|
2018-06-04 06:28:02 +02:00
|
|
|
master_badmsg(WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE, msg);
|
|
|
|
|
2019-08-09 18:08:01 +02:00
|
|
|
status_trace("Set max_scids_encode_bytes to %u", max_encoding_bytes);
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2018-07-26 23:27:37 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Another testing hack */
|
2018-07-26 23:27:37 +02:00
|
|
|
static struct io_plan *dev_gossip_suppress(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
if (!fromwire_gossip_dev_suppress(msg))
|
|
|
|
master_badmsg(WIRE_GOSSIP_DEV_SUPPRESS, msg);
|
|
|
|
|
|
|
|
status_unusual("Suppressing all gossip");
|
|
|
|
suppress_gossip = true;
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-07-26 23:27:37 +02:00
|
|
|
}
|
2018-11-21 23:41:49 +01:00
|
|
|
|
|
|
|
static struct io_plan *dev_gossip_memleak(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct htable *memtable;
|
|
|
|
bool found_leak;
|
|
|
|
|
|
|
|
memtable = memleak_enter_allocations(tmpctx, msg, msg);
|
|
|
|
|
|
|
|
/* Now delete daemon and those which it has pointers to. */
|
|
|
|
memleak_remove_referenced(memtable, daemon);
|
|
|
|
memleak_remove_routing_tables(memtable, daemon->rstate);
|
|
|
|
|
|
|
|
found_leak = dump_memleak(memtable);
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossip_dev_memleak_reply(NULL,
|
|
|
|
found_leak)));
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
|
|
|
}
|
2019-04-08 01:52:19 +02:00
|
|
|
|
|
|
|
static struct io_plan *dev_compact_store(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-06-03 20:22:25 +02:00
|
|
|
bool done = gossip_store_compact(daemon->rstate->gs);
|
2019-04-11 07:16:57 +02:00
|
|
|
|
2019-04-08 01:52:19 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossip_dev_compact_store_reply(NULL,
|
|
|
|
done)));
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
|
|
|
}
|
2018-06-04 06:22:25 +02:00
|
|
|
#endif /* DEVELOPER */
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ lightningd: so, tell me about this channel, so we can forward to it. */
|
2018-10-15 06:57:22 +02:00
|
|
|
static struct io_plan *get_channel_peer(struct io_conn *conn,
|
|
|
|
struct daemon *daemon, const u8 *msg)
|
2017-04-30 23:49:15 +02:00
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *chan;
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *key;
|
2018-10-15 06:57:22 +02:00
|
|
|
int direction;
|
2017-05-05 08:41:44 +02:00
|
|
|
|
2018-10-15 06:57:22 +02:00
|
|
|
if (!fromwire_gossip_get_channel_peer(msg, &scid))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GET_CHANNEL_PEER, msg);
|
2017-05-05 08:41:44 +02:00
|
|
|
|
2018-03-02 09:59:16 +01:00
|
|
|
chan = get_channel(daemon->rstate, &scid);
|
|
|
|
if (!chan) {
|
2017-06-06 01:47:10 +02:00
|
|
|
status_trace("Failed to resolve channel %s",
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct short_channel_id, &scid));
|
2018-10-15 06:57:22 +02:00
|
|
|
key = NULL;
|
|
|
|
} else if (local_direction(daemon, chan, &direction)) {
|
|
|
|
key = &chan->nodes[!direction]->id;
|
2017-04-30 23:49:15 +02:00
|
|
|
} else {
|
2018-10-15 06:57:22 +02:00
|
|
|
status_trace("Resolved channel %s was not local",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&scid));
|
|
|
|
key = NULL;
|
2017-04-30 23:49:15 +02:00
|
|
|
}
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-10-15 06:57:22 +02:00
|
|
|
take(towire_gossip_get_channel_peer_reply(NULL, key)));
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-04-30 23:49:15 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We queue incoming channel_announcement pending confirmation from lightningd
|
|
|
|
* that it really is an unspent output. Here's its reply. */
|
2018-01-04 12:40:58 +01:00
|
|
|
static struct io_plan *handle_txout_reply(struct io_conn *conn,
|
|
|
|
struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
|
|
|
u8 *outscript;
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_sat sat;
|
2019-06-12 01:28:07 +02:00
|
|
|
bool was_unknown;
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2019-02-21 04:45:55 +01:00
|
|
|
if (!fromwire_gossip_get_txout_reply(msg, msg, &scid, &sat, &outscript))
|
2018-01-04 12:40:58 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_GET_TXOUT_REPLY, msg);
|
|
|
|
|
2019-06-12 01:28:07 +02:00
|
|
|
/* Were we looking specifically for this? */
|
|
|
|
was_unknown = false;
|
|
|
|
for (size_t i = 0; i < tal_count(daemon->unknown_scids); i++) {
|
|
|
|
if (short_channel_id_eq(&daemon->unknown_scids[i], &scid)) {
|
|
|
|
was_unknown = true;
|
|
|
|
tal_arr_remove(&daemon->unknown_scids, i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Outscript is NULL if it's not an unspent output */
|
2019-06-12 01:28:07 +02:00
|
|
|
if (handle_pending_cannouncement(daemon->rstate, &scid, sat, outscript)
|
|
|
|
&& was_unknown) {
|
|
|
|
/* It was real: we're missing gossip. */
|
|
|
|
gossip_missing(daemon);
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Anywhere we might have announced a channel, we check if it's time to
|
|
|
|
* announce ourselves (ie. if we just announced our own first channel) */
|
2018-06-04 06:38:39 +02:00
|
|
|
maybe_send_own_node_announce(daemon);
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
|
|
|
|
2019-01-17 16:24:32 +01:00
|
|
|
/* Fix up the channel_update to include the type if it doesn't currently have
|
|
|
|
* one. See ElementsProject/lightning#1730 and lightningnetwork/lnd#1599 for the
|
|
|
|
* in-depth discussion on why we break message parsing here... */
|
|
|
|
static u8 *patch_channel_update(const tal_t *ctx, u8 *channel_update TAKES)
|
|
|
|
{
|
|
|
|
u8 *fixed;
|
|
|
|
if (channel_update != NULL &&
|
|
|
|
fromwire_peektype(channel_update) != WIRE_CHANNEL_UPDATE) {
|
|
|
|
/* This should be a channel_update, prefix with the
|
|
|
|
* WIRE_CHANNEL_UPDATE type, but isn't. Let's prefix it. */
|
|
|
|
fixed = tal_arr(ctx, u8, 0);
|
|
|
|
towire_u16(&fixed, WIRE_CHANNEL_UPDATE);
|
|
|
|
towire(&fixed, channel_update, tal_bytelen(channel_update));
|
|
|
|
if (taken(channel_update))
|
|
|
|
tal_free(channel_update);
|
|
|
|
return fixed;
|
|
|
|
} else {
|
|
|
|
return tal_dup_arr(ctx, u8,
|
|
|
|
channel_update, tal_count(channel_update), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return NULL if the wrapped onion error message has no channel_update field,
|
|
|
|
* or return the embedded channel_update message otherwise. */
|
|
|
|
static u8 *channel_update_from_onion_error(const tal_t *ctx,
|
|
|
|
const u8 *onion_message)
|
|
|
|
{
|
|
|
|
u8 *channel_update = NULL;
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat unused_msat;
|
2019-01-17 16:24:32 +01:00
|
|
|
u32 unused32;
|
|
|
|
|
|
|
|
/* Identify failcodes that have some channel_update.
|
|
|
|
*
|
|
|
|
* TODO > BOLT 1.0: Add new failcodes when updating to a
|
|
|
|
* new BOLT version. */
|
|
|
|
if (!fromwire_temporary_channel_failure(ctx,
|
|
|
|
onion_message,
|
|
|
|
&channel_update) &&
|
|
|
|
!fromwire_amount_below_minimum(ctx,
|
2019-02-21 04:45:55 +01:00
|
|
|
onion_message, &unused_msat,
|
2019-01-17 16:24:32 +01:00
|
|
|
&channel_update) &&
|
|
|
|
!fromwire_fee_insufficient(ctx,
|
2019-02-21 04:45:55 +01:00
|
|
|
onion_message, &unused_msat,
|
2019-01-17 16:24:32 +01:00
|
|
|
&channel_update) &&
|
|
|
|
!fromwire_incorrect_cltv_expiry(ctx,
|
|
|
|
onion_message, &unused32,
|
|
|
|
&channel_update) &&
|
|
|
|
!fromwire_expiry_too_soon(ctx,
|
|
|
|
onion_message,
|
|
|
|
&channel_update))
|
|
|
|
/* No channel update. */
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return patch_channel_update(ctx, take(channel_update));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ lightningd tells us when a payment has failed; we mark the channel (or
|
2019-01-17 16:24:32 +01:00
|
|
|
* node) unusable here if it's a permanent failure, and unpack any
|
|
|
|
* channel_update contained in the error. */
|
|
|
|
static struct io_plan *handle_payment_failure(struct io_conn *conn,
|
2018-01-18 00:32:36 +01:00
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id erring_node;
|
2018-01-18 00:32:36 +01:00
|
|
|
struct short_channel_id erring_channel;
|
2019-01-17 16:24:32 +01:00
|
|
|
u8 erring_channel_direction;
|
|
|
|
u8 *error;
|
|
|
|
enum onion_type failcode;
|
2018-01-21 01:36:41 +01:00
|
|
|
u8 *channel_update;
|
2018-01-18 00:32:36 +01:00
|
|
|
|
2019-01-17 16:24:32 +01:00
|
|
|
if (!fromwire_gossip_payment_failure(msg, msg,
|
2018-01-18 00:32:36 +01:00
|
|
|
&erring_node,
|
|
|
|
&erring_channel,
|
2019-01-17 16:24:32 +01:00
|
|
|
&erring_channel_direction,
|
|
|
|
&error))
|
|
|
|
master_badmsg(WIRE_GOSSIP_PAYMENT_FAILURE, msg);
|
|
|
|
|
|
|
|
failcode = fromwire_peektype(error);
|
|
|
|
channel_update = channel_update_from_onion_error(tmpctx, error);
|
|
|
|
if (channel_update)
|
|
|
|
status_debug("Extracted channel_update %s from onionreply %s",
|
|
|
|
tal_hex(tmpctx, channel_update),
|
|
|
|
tal_hex(tmpctx, error));
|
2018-01-18 00:32:36 +01:00
|
|
|
routing_failure(daemon->rstate,
|
|
|
|
&erring_node,
|
|
|
|
&erring_channel,
|
2019-01-17 16:24:32 +01:00
|
|
|
erring_channel_direction,
|
|
|
|
failcode,
|
2018-01-21 01:36:41 +01:00
|
|
|
channel_update);
|
2018-01-18 00:32:36 +01:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-01-18 00:32:36 +01:00
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/*~ This is where lightningd tells us that a channel's funding transaction has
|
|
|
|
* been spent. */
|
2018-03-26 20:10:03 +02:00
|
|
|
static struct io_plan *handle_outpoint_spent(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2018-03-28 12:14:01 +02:00
|
|
|
struct short_channel_id scid;
|
|
|
|
struct chan *chan;
|
|
|
|
struct routing_state *rstate = daemon->rstate;
|
|
|
|
if (!fromwire_gossip_outpoint_spent(msg, &scid))
|
2019-01-17 16:24:32 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_OUTPOINT_SPENT, msg);
|
2018-03-28 12:14:01 +02:00
|
|
|
|
|
|
|
chan = get_channel(rstate, &scid);
|
|
|
|
if (chan) {
|
|
|
|
status_trace(
|
|
|
|
"Deleting channel %s due to the funding outpoint being "
|
|
|
|
"spent",
|
|
|
|
type_to_string(msg, struct short_channel_id, &scid));
|
2019-06-03 20:09:25 +02:00
|
|
|
remove_channel_from_store(rstate, chan);
|
2018-03-28 12:14:01 +02:00
|
|
|
/* Freeing is sufficient since everything else is allocated off
|
2019-05-21 09:13:28 +02:00
|
|
|
* of the channel and this takes care of unregistering
|
2018-03-28 12:14:01 +02:00
|
|
|
* the channel */
|
2019-05-21 09:13:28 +02:00
|
|
|
free_chan(rstate, chan);
|
2018-03-28 12:14:01 +02:00
|
|
|
}
|
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-03-26 20:10:03 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is sent by lightningd when it kicks off 'closingd': we disable it
|
|
|
|
* in both directions.
|
2018-05-26 15:19:24 +02:00
|
|
|
*
|
|
|
|
* We'll leave it to handle_outpoint_spent to delete the channel from our view
|
|
|
|
* once the close gets confirmed. This avoids having strange states in which the
|
|
|
|
* channel is list in our peer list but won't be returned when listing public
|
|
|
|
* channels. This does not send out updates since that's triggered by the peer
|
|
|
|
* connection closing.
|
|
|
|
*/
|
|
|
|
static struct io_plan *handle_local_channel_close(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
|
|
|
struct chan *chan;
|
|
|
|
struct routing_state *rstate = daemon->rstate;
|
|
|
|
if (!fromwire_gossip_local_channel_close(msg, &scid))
|
2019-01-17 16:24:32 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_LOCAL_CHANNEL_CLOSE, msg);
|
2018-05-26 15:19:24 +02:00
|
|
|
|
|
|
|
chan = get_channel(rstate, &scid);
|
2018-05-29 13:36:57 +02:00
|
|
|
if (chan)
|
2019-04-11 07:15:22 +02:00
|
|
|
local_disable_chan(rstate, chan);
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-05-26 15:19:24 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This routine handles all the commands from lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static struct io_plan *recv_req(struct io_conn *conn,
|
|
|
|
const u8 *msg,
|
|
|
|
struct daemon *daemon)
|
2017-01-10 06:08:33 +01:00
|
|
|
{
|
2018-10-25 01:43:05 +02:00
|
|
|
enum gossip_wire_type t = fromwire_peektype(msg);
|
2017-01-10 06:08:33 +01:00
|
|
|
|
|
|
|
switch (t) {
|
2017-04-24 14:31:26 +02:00
|
|
|
case WIRE_GOSSIPCTL_INIT:
|
2018-10-25 01:43:05 +02:00
|
|
|
return gossip_init(conn, daemon, msg);
|
2017-04-24 14:31:26 +02:00
|
|
|
|
2017-03-12 13:39:23 +01:00
|
|
|
case WIRE_GOSSIP_GETNODES_REQUEST:
|
2018-10-25 01:43:05 +02:00
|
|
|
return getnodes(conn, daemon, msg);
|
2017-03-12 13:39:23 +01:00
|
|
|
|
2017-03-15 11:36:52 +01:00
|
|
|
case WIRE_GOSSIP_GETROUTE_REQUEST:
|
2018-10-25 01:43:05 +02:00
|
|
|
return getroute_req(conn, daemon, msg);
|
2017-03-15 11:36:52 +01:00
|
|
|
|
2017-03-22 13:30:09 +01:00
|
|
|
case WIRE_GOSSIP_GETCHANNELS_REQUEST:
|
2018-10-25 01:43:05 +02:00
|
|
|
return getchannels_req(conn, daemon, msg);
|
2017-03-22 13:30:09 +01:00
|
|
|
|
2018-10-15 06:57:22 +02:00
|
|
|
case WIRE_GOSSIP_GET_CHANNEL_PEER:
|
2018-10-25 01:43:05 +02:00
|
|
|
return get_channel_peer(conn, daemon, msg);
|
2017-04-30 23:49:15 +02:00
|
|
|
|
2018-01-04 12:40:58 +01:00
|
|
|
case WIRE_GOSSIP_GET_TXOUT_REPLY:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_txout_reply(conn, daemon, msg);
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2019-01-17 16:24:32 +01:00
|
|
|
case WIRE_GOSSIP_PAYMENT_FAILURE:
|
|
|
|
return handle_payment_failure(conn, daemon, msg);
|
2018-01-18 00:32:36 +01:00
|
|
|
|
2018-03-26 20:10:03 +02:00
|
|
|
case WIRE_GOSSIP_OUTPOINT_SPENT:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_outpoint_spent(conn, daemon, msg);
|
2018-05-26 15:19:24 +02:00
|
|
|
|
|
|
|
case WIRE_GOSSIP_LOCAL_CHANNEL_CLOSE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_local_channel_close(conn, daemon, msg);
|
2018-03-05 17:16:20 +01:00
|
|
|
|
2018-06-04 06:22:25 +02:00
|
|
|
case WIRE_GOSSIP_PING:
|
2018-10-25 01:43:05 +02:00
|
|
|
return ping_req(conn, daemon, msg);
|
2018-06-04 06:22:25 +02:00
|
|
|
|
2018-09-27 07:29:17 +02:00
|
|
|
case WIRE_GOSSIP_GET_INCOMING_CHANNELS:
|
2018-10-25 01:43:05 +02:00
|
|
|
return get_incoming_channels(conn, daemon, msg);
|
2018-09-27 07:29:17 +02:00
|
|
|
|
2018-08-14 09:13:00 +02:00
|
|
|
#if DEVELOPER
|
2018-06-04 06:22:25 +02:00
|
|
|
case WIRE_GOSSIP_QUERY_SCIDS:
|
2018-10-25 01:43:05 +02:00
|
|
|
return query_scids_req(conn, daemon, msg);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
2018-10-25 01:43:05 +02:00
|
|
|
return send_timestamp_filter(conn, daemon, msg);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
case WIRE_GOSSIP_QUERY_CHANNEL_RANGE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return query_channel_range(conn, daemon, msg);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return dev_set_max_scids_encode_size(conn, daemon, msg);
|
2018-07-26 23:27:37 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SUPPRESS:
|
2018-10-25 01:43:05 +02:00
|
|
|
return dev_gossip_suppress(conn, daemon, msg);
|
2018-11-21 23:41:49 +01:00
|
|
|
case WIRE_GOSSIP_DEV_MEMLEAK:
|
|
|
|
return dev_gossip_memleak(conn, daemon, msg);
|
2019-04-08 01:52:19 +02:00
|
|
|
case WIRE_GOSSIP_DEV_COMPACT_STORE:
|
|
|
|
return dev_compact_store(conn, daemon, msg);
|
2018-06-04 06:22:25 +02:00
|
|
|
#else
|
|
|
|
case WIRE_GOSSIP_QUERY_SCIDS:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_QUERY_CHANNEL_RANGE:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE:
|
2018-07-26 23:27:37 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SUPPRESS:
|
2018-11-21 23:41:49 +01:00
|
|
|
case WIRE_GOSSIP_DEV_MEMLEAK:
|
2019-04-08 01:52:19 +02:00
|
|
|
case WIRE_GOSSIP_DEV_COMPACT_STORE:
|
2018-06-04 06:22:25 +02:00
|
|
|
break;
|
|
|
|
#endif /* !DEVELOPER */
|
|
|
|
|
2017-12-15 15:16:42 +01:00
|
|
|
/* We send these, we don't receive them */
|
2017-03-12 13:39:23 +01:00
|
|
|
case WIRE_GOSSIP_GETNODES_REPLY:
|
2017-03-15 11:36:52 +01:00
|
|
|
case WIRE_GOSSIP_GETROUTE_REPLY:
|
2017-03-22 13:30:09 +01:00
|
|
|
case WIRE_GOSSIP_GETCHANNELS_REPLY:
|
2017-04-12 20:20:48 +02:00
|
|
|
case WIRE_GOSSIP_PING_REPLY:
|
2018-06-04 06:22:25 +02:00
|
|
|
case WIRE_GOSSIP_SCIDS_REPLY:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_QUERY_CHANNEL_RANGE_REPLY:
|
2018-10-15 06:57:22 +02:00
|
|
|
case WIRE_GOSSIP_GET_CHANNEL_PEER_REPLY:
|
2018-09-27 07:29:17 +02:00
|
|
|
case WIRE_GOSSIP_GET_INCOMING_CHANNELS_REPLY:
|
2018-01-04 12:40:58 +01:00
|
|
|
case WIRE_GOSSIP_GET_TXOUT:
|
2018-11-21 23:41:49 +01:00
|
|
|
case WIRE_GOSSIP_DEV_MEMLEAK_REPLY:
|
2019-04-08 01:52:19 +02:00
|
|
|
case WIRE_GOSSIP_DEV_COMPACT_STORE_REPLY:
|
2017-12-15 15:16:42 +01:00
|
|
|
break;
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
|
|
|
|
2017-09-12 06:55:52 +02:00
|
|
|
/* Master shouldn't give bad requests. */
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "%i: %s",
|
2018-10-25 01:43:05 +02:00
|
|
|
t, tal_hex(tmpctx, msg));
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is called when lightningd closes its connection to us. We simply
|
|
|
|
* exit. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static void master_gone(struct daemon_conn *master UNUSED)
|
2017-06-06 05:08:42 +02:00
|
|
|
{
|
2018-11-13 05:03:53 +01:00
|
|
|
daemon_shutdown();
|
2017-06-06 05:08:42 +02:00
|
|
|
/* Can't tell master, it's gone. */
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
|
2017-01-10 06:08:33 +01:00
|
|
|
int main(int argc, char *argv[])
|
|
|
|
{
|
2018-04-25 12:55:34 +02:00
|
|
|
setup_locale();
|
|
|
|
|
2017-01-10 06:08:33 +01:00
|
|
|
struct daemon *daemon;
|
|
|
|
|
2018-01-08 11:01:09 +01:00
|
|
|
subdaemon_setup(argc, argv);
|
2017-01-10 06:08:33 +01:00
|
|
|
|
|
|
|
daemon = tal(NULL, struct daemon);
|
|
|
|
list_head_init(&daemon->peers);
|
2019-06-12 01:28:07 +02:00
|
|
|
daemon->unknown_scids = tal_arr(daemon, struct short_channel_id, 0);
|
|
|
|
daemon->gossip_missing = NULL;
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Note the use of time_mono() here. That's a monotonic clock, which
|
|
|
|
* is really useful: it can only be used to measure relative events
|
|
|
|
* (there's no correspondence to time-since-Ken-grew-a-beard or
|
|
|
|
* anything), but unlike time_now(), this will never jump backwards by
|
|
|
|
* half a second and leave me wondering how my tests failed CI! */
|
2017-02-04 16:28:35 +01:00
|
|
|
timers_init(&daemon->timers, time_mono());
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Our daemons always use STDIN for commands from lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon->master = daemon_conn_new(daemon, STDIN_FILENO,
|
|
|
|
recv_req, NULL, daemon);
|
|
|
|
tal_add_destructor(daemon->master, master_gone);
|
2018-10-24 03:39:31 +02:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
status_setup_async(daemon->master);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* connectd is already started, and uses this fd to ask us things. */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon->connectd = daemon_conn_new(daemon, CONNECTD_FD,
|
|
|
|
connectd_req, NULL, daemon);
|
2017-04-12 08:12:15 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This loop never exits. io_loop() only returns if a timer has
|
|
|
|
* expired, or io_break() is called, or all fds are closed. We don't
|
|
|
|
* use io_break and closing the lightningd fd calls master_gone()
|
|
|
|
* which exits. */
|
2017-02-04 16:28:35 +01:00
|
|
|
for (;;) {
|
|
|
|
struct timer *expired = NULL;
|
|
|
|
io_loop(&daemon->timers, &expired);
|
|
|
|
|
2018-11-13 05:03:53 +01:00
|
|
|
timer_expired(daemon, expired);
|
2017-02-04 16:28:35 +01:00
|
|
|
}
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/*~ Note that the actual routing stuff is in routing.c; you might want to
|
|
|
|
* check that out later.
|
|
|
|
*
|
|
|
|
* But that's the last of the global daemons. We now move on to the first of
|
|
|
|
* the per-peer daemons: openingd/openingd.c.
|
|
|
|
*/
|