2018-11-21 04:10:03 +01:00
|
|
|
#include <bitcoin/chainparams.h>
|
2018-09-24 03:41:39 +02:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Welcome to the gossip daemon: keeper of maps!
|
|
|
|
*
|
|
|
|
* This is the last "global" daemon; it has three purposes.
|
|
|
|
*
|
|
|
|
* 1. To determine routes for payments when lightningd asks.
|
|
|
|
* 2. The second purpose is to receive gossip from peers (via their
|
|
|
|
* per-peer daemons) and send it out to them.
|
|
|
|
* 3. Talk to `connectd` to to answer address queries for nodes.
|
|
|
|
*
|
|
|
|
* The gossip protocol itself is fairly simple, but has some twists which
|
|
|
|
* add complexity to this daemon.
|
|
|
|
*/
|
2018-06-04 06:20:25 +02:00
|
|
|
#include <ccan/asort/asort.h>
|
2018-11-13 05:03:49 +01:00
|
|
|
#include <ccan/bitmap/bitmap.h>
|
2017-10-23 06:12:38 +02:00
|
|
|
#include <ccan/build_assert/build_assert.h>
|
2018-02-08 02:24:46 +01:00
|
|
|
#include <ccan/cast/cast.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/container_of/container_of.h>
|
|
|
|
#include <ccan/crypto/hkdf_sha256/hkdf_sha256.h>
|
2018-02-23 01:00:00 +01:00
|
|
|
#include <ccan/crypto/siphash24/siphash24.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/endian/endian.h>
|
|
|
|
#include <ccan/fdpass/fdpass.h>
|
|
|
|
#include <ccan/io/fdpass/fdpass.h>
|
|
|
|
#include <ccan/io/io.h>
|
|
|
|
#include <ccan/list/list.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <ccan/mem/mem.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/noerr/noerr.h>
|
2017-03-07 02:08:20 +01:00
|
|
|
#include <ccan/take/take.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <ccan/timer/timer.h>
|
2018-04-23 16:36:16 +02:00
|
|
|
#include <common/bech32.h>
|
|
|
|
#include <common/bech32_util.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/cryptomsg.h>
|
|
|
|
#include <common/daemon_conn.h>
|
2018-06-28 03:34:47 +02:00
|
|
|
#include <common/decode_short_channel_ids.h>
|
2018-03-13 16:42:55 +01:00
|
|
|
#include <common/features.h>
|
2018-11-21 23:39:31 +01:00
|
|
|
#include <common/memleak.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/ping.h>
|
2018-04-25 14:39:38 +02:00
|
|
|
#include <common/pseudorand.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/status.h>
|
2018-01-08 11:01:09 +01:00
|
|
|
#include <common/subdaemon.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/timeout.h>
|
2017-08-28 18:03:01 +02:00
|
|
|
#include <common/type_to_string.h>
|
2017-08-28 18:02:01 +02:00
|
|
|
#include <common/utils.h>
|
|
|
|
#include <common/version.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <common/wire_error.h>
|
2017-10-23 06:17:38 +02:00
|
|
|
#include <common/wireaddr.h>
|
2018-07-24 08:18:58 +02:00
|
|
|
#include <connectd/gen_connect_gossip_wire.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
2017-08-29 06:12:04 +02:00
|
|
|
#include <gossipd/broadcast.h>
|
2018-11-13 05:03:51 +01:00
|
|
|
#include <gossipd/gen_gossip_peerd_wire.h>
|
2017-08-29 06:12:04 +02:00
|
|
|
#include <gossipd/gen_gossip_wire.h>
|
|
|
|
#include <gossipd/routing.h>
|
2018-09-20 05:06:42 +02:00
|
|
|
#include <hsmd/gen_hsm_wire.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <inttypes.h>
|
2017-03-12 13:39:23 +01:00
|
|
|
#include <lightningd/gossip_msg.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <netdb.h>
|
|
|
|
#include <netinet/in.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <secp256k1_ecdh.h>
|
|
|
|
#include <sodium/randombytes.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
2018-05-07 06:29:21 +02:00
|
|
|
#include <sys/un.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <wire/gen_peer_wire.h>
|
|
|
|
#include <wire/wire_io.h>
|
2017-11-24 15:47:14 +01:00
|
|
|
#include <wire/wire_sync.h>
|
2018-06-04 06:28:02 +02:00
|
|
|
#include <zlib.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We talk to `hsmd` to sign our gossip messages with the node key */
|
2017-10-11 11:58:50 +02:00
|
|
|
#define HSM_FD 3
|
2018-11-21 01:36:08 +01:00
|
|
|
/* connectd asks us for help finding nodes, and gossip fds for new peers */
|
2018-07-24 08:18:58 +02:00
|
|
|
#define CONNECTD_FD 4
|
2017-10-11 11:58:50 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* In developer mode we provide hooks for whitebox testing */
|
2018-06-04 06:28:02 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
static u32 max_scids_encode_bytes = -1U;
|
2018-07-26 23:27:37 +02:00
|
|
|
static bool suppress_gossip = false;
|
2018-06-04 06:28:02 +02:00
|
|
|
#endif
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The core daemon structure: */
|
2017-01-10 06:08:33 +01:00
|
|
|
struct daemon {
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Who am I? Helps us find ourself in the routing map. */
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
struct pubkey id;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Peers we are gossiping to: id is unique */
|
2017-01-10 06:08:33 +01:00
|
|
|
struct list_head peers;
|
2017-03-19 21:28:29 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Connection to lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
struct daemon_conn *master;
|
2017-02-01 15:49:01 +01:00
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
/* Connection to connect daemon. */
|
2018-10-25 01:43:05 +02:00
|
|
|
struct daemon_conn *connectd;
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2017-02-01 15:49:01 +01:00
|
|
|
/* Routing information */
|
|
|
|
struct routing_state *rstate;
|
2017-02-04 16:28:35 +01:00
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
/* chainhash for checking/making gossip msgs */
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Timers: we batch gossip, and also refresh announcements */
|
2017-02-04 16:28:35 +01:00
|
|
|
struct timers timers;
|
2017-04-24 14:31:26 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* How often we flush gossip (60 seconds unless DEVELOPER override) */
|
2018-10-15 06:57:38 +02:00
|
|
|
u32 broadcast_interval_msec;
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
/* Global features to list in node_announcement. */
|
|
|
|
u8 *globalfeatures;
|
2017-11-24 15:03:22 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Alias (not NUL terminated) and favorite color for node_announcement */
|
2018-09-24 03:41:39 +02:00
|
|
|
u8 alias[32];
|
2017-11-24 15:03:22 +01:00
|
|
|
u8 rgb[3];
|
2018-05-07 06:29:22 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* What addresses we can actually announce. */
|
2018-05-07 06:29:22 +02:00
|
|
|
struct wireaddr *announcable;
|
2017-12-06 07:15:06 +01:00
|
|
|
};
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This represents each peer we're gossiping with */
|
2017-01-10 06:08:33 +01:00
|
|
|
struct peer {
|
|
|
|
/* daemon->peers */
|
|
|
|
struct list_node list;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* parent pointer. */
|
2018-08-24 07:20:06 +02:00
|
|
|
struct daemon *daemon;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The ID of the peer (always unique) */
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
struct pubkey id;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The two features gossip cares about (so far) */
|
2018-07-24 08:18:58 +02:00
|
|
|
bool gossip_queries_feature, initial_routing_sync_feature;
|
|
|
|
|
2017-02-01 15:49:01 +01:00
|
|
|
/* High water mark for the staggered broadcast */
|
|
|
|
u64 broadcast_index;
|
2017-04-12 18:10:10 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Timestamp range the peer asked us to filter gossip by */
|
2018-06-04 06:26:25 +02:00
|
|
|
u32 gossip_timestamp_min, gossip_timestamp_max;
|
|
|
|
|
2018-06-04 06:19:25 +02:00
|
|
|
/* Are there outstanding queries on short_channel_ids? */
|
|
|
|
const struct short_channel_id *scid_queries;
|
|
|
|
size_t scid_query_idx;
|
|
|
|
|
2018-06-04 06:20:25 +02:00
|
|
|
/* Are there outstanding node_announcements from scid_queries? */
|
|
|
|
struct pubkey *scid_query_nodes;
|
|
|
|
size_t scid_query_nodes_idx;
|
|
|
|
|
2018-06-04 06:27:25 +02:00
|
|
|
/* If this is NULL, we're syncing gossip now. */
|
|
|
|
struct oneshot *gossip_timer;
|
2017-03-09 14:24:32 +01:00
|
|
|
|
2018-06-04 06:22:25 +02:00
|
|
|
/* How many query responses are we expecting? */
|
|
|
|
size_t num_scid_queries_outstanding;
|
|
|
|
|
2018-07-24 02:26:43 +02:00
|
|
|
/* How many pongs are we expecting? */
|
|
|
|
size_t num_pings_outstanding;
|
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
/* Map of outstanding channel_range requests. */
|
2018-11-13 05:03:49 +01:00
|
|
|
bitmap *query_channel_blocks;
|
|
|
|
/* What we're querying: [range_first_blocknum, range_end_blocknum) */
|
|
|
|
u32 range_first_blocknum, range_end_blocknum;
|
|
|
|
u32 range_blocks_remaining;
|
2018-06-04 06:28:02 +02:00
|
|
|
struct short_channel_id *query_channel_scids;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The daemon_conn used to queue messages to/from the peer. */
|
2018-10-25 01:43:05 +02:00
|
|
|
struct daemon_conn *dc;
|
2017-01-10 06:08:33 +01:00
|
|
|
};
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ A channel consists of a `struct half_chan` for each direction, each of
|
|
|
|
* which has a `flags` word from the `channel_update`; bit 1 is
|
|
|
|
* ROUTING_FLAGS_DISABLED in the `channel_update`. But we also keep a local
|
|
|
|
* whole-channel flag which indicates it's not available; we use this when a
|
|
|
|
* peer disconnects, and generate a `channel_update` to tell the world lazily
|
|
|
|
* when someone asks. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static void peer_disable_channels(struct daemon *daemon, struct node *node)
|
|
|
|
{
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If this peer had a channel with us, mark it disabled. */
|
2018-09-25 07:43:56 +02:00
|
|
|
for (size_t i = 0; i < tal_count(node->chans); i++) {
|
|
|
|
struct chan *c = node->chans[i];
|
|
|
|
if (pubkey_eq(&other_node(node, c)->id, &daemon->id))
|
|
|
|
c->local_disabled = true;
|
|
|
|
}
|
|
|
|
}
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Destroy a peer, usually because the per-peer daemon has exited.
|
|
|
|
*
|
|
|
|
* Were you wondering why we call this "destroy_peer" and not "peer_destroy"?
|
|
|
|
* I thought not! But while CCAN modules are required to keep to their own
|
|
|
|
* prefix namespace, leading to unnatural word order, we couldn't stomach that
|
|
|
|
* for our own internal use. We use 'find_foo', 'destroy_foo' and 'new_foo'.
|
|
|
|
*/
|
2017-01-10 06:08:33 +01:00
|
|
|
static void destroy_peer(struct peer *peer)
|
|
|
|
{
|
2018-07-24 08:18:58 +02:00
|
|
|
struct node *node;
|
2018-04-25 14:39:38 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Remove it from the peers list */
|
2017-01-10 06:08:33 +01:00
|
|
|
list_del_from(&peer->daemon->peers, &peer->list);
|
2018-09-25 07:43:56 +02:00
|
|
|
|
|
|
|
/* If we have a channel with this peer, disable it. */
|
2018-07-24 08:18:58 +02:00
|
|
|
node = get_node(peer->daemon->rstate, &peer->id);
|
|
|
|
if (node)
|
|
|
|
peer_disable_channels(peer->daemon, node);
|
2018-10-24 03:41:31 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is tricky: our lifetime is tied to the daemon_conn; it's our
|
|
|
|
* parent, so we are freed if it is, but we need to free it if we're
|
|
|
|
* freed manually. tal_free() treats this as a noop if it's already
|
|
|
|
* being freed */
|
2018-10-25 01:43:05 +02:00
|
|
|
tal_free(peer->dc);
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Search for a peer. */
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
static struct peer *find_peer(struct daemon *daemon, const struct pubkey *id)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
list_for_each(&daemon->peers, peer, list)
|
|
|
|
if (pubkey_eq(&peer->id, id))
|
|
|
|
return peer;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Queue a gossip message for the peer: we wrap every gossip message; the
|
|
|
|
* subdaemon simply unwraps and sends. Note that we don't wrap messages
|
|
|
|
* coming from the subdaemon to gossipd, because gossipd has to process the
|
|
|
|
* messages anyway (and it doesn't trust the subdaemon); the subdaemon
|
|
|
|
* trusts gossipd and will forward whatever it's told to. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void queue_peer_msg(struct peer *peer, const u8 *msg TAKES)
|
|
|
|
{
|
2018-11-13 05:03:51 +01:00
|
|
|
const u8 *send = towire_gossipd_send_gossip(NULL, msg);
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Autogenerated functions don't take(), so we do here */
|
2018-11-05 02:16:48 +01:00
|
|
|
if (taken(msg))
|
|
|
|
tal_free(msg);
|
|
|
|
daemon_conn_send(peer->dc, take(send));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This pokes daemon_conn, which calls dump_gossip: the NULL gossip_timer
|
|
|
|
* tells it that the gossip timer has expired and it should send any queued
|
|
|
|
* gossip messages. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void wake_gossip_out(struct peer *peer)
|
|
|
|
{
|
|
|
|
/* If we were waiting, we're not any more */
|
|
|
|
peer->gossip_timer = tal_free(peer->gossip_timer);
|
|
|
|
|
|
|
|
/* Notify the daemon_conn-write loop */
|
|
|
|
daemon_conn_wake(peer->dc);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* There are several messages which contain a long array of
|
|
|
|
* `short_channel_id`s (called `encoded_short_ids`) so we utilize a
|
|
|
|
* simple compression scheme: the first byte indicates the encoding, the
|
|
|
|
* rest contains the data.
|
|
|
|
*/
|
2018-06-04 06:22:25 +02:00
|
|
|
static u8 *encode_short_channel_ids_start(const tal_t *ctx)
|
|
|
|
{
|
2018-07-31 13:57:14 +02:00
|
|
|
u8 *encoded = tal_arr(ctx, u8, 0);
|
2018-06-04 06:28:02 +02:00
|
|
|
towire_u8(&encoded, SHORTIDS_ZLIB);
|
2018-06-04 06:22:25 +02:00
|
|
|
return encoded;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Marshal a single short_channel_id */
|
2018-06-04 06:22:25 +02:00
|
|
|
static void encode_add_short_channel_id(u8 **encoded,
|
|
|
|
const struct short_channel_id *scid)
|
|
|
|
{
|
|
|
|
towire_short_channel_id(encoded, scid);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Greg Maxwell asked me privately about using zlib for communicating a set,
|
|
|
|
* and suggested that we'd be better off using Golomb-Rice coding a-la BIP
|
|
|
|
* 158. However, naively using Rice encoding isn't a win: we have to get
|
|
|
|
* more complex and use separate streams. The upside is that it's between
|
|
|
|
* 2 and 5 times smaller (assuming optimal Rice encoding + gzip). We can add
|
|
|
|
* that later. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static u8 *zencode_scids(const tal_t *ctx, const u8 *scids, size_t len)
|
|
|
|
{
|
|
|
|
u8 *z;
|
|
|
|
int err;
|
|
|
|
unsigned long compressed_len = len;
|
|
|
|
|
|
|
|
/* Prefer to fail if zlib makes it larger */
|
|
|
|
z = tal_arr(ctx, u8, len);
|
|
|
|
err = compress2(z, &compressed_len, scids, len, Z_BEST_COMPRESSION);
|
|
|
|
if (err == Z_OK) {
|
|
|
|
status_trace("short_ids compressed %zu into %lu",
|
|
|
|
len, compressed_len);
|
|
|
|
tal_resize(&z, compressed_len);
|
|
|
|
return z;
|
|
|
|
}
|
|
|
|
status_trace("short_ids compress %zu returned %i:"
|
|
|
|
" not compresssing", len, err);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Once we've assembled */
|
2018-06-04 06:22:25 +02:00
|
|
|
static bool encode_short_channel_ids_end(u8 **encoded, size_t max_bytes)
|
|
|
|
{
|
2018-06-04 06:28:02 +02:00
|
|
|
u8 *z;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* First byte says what encoding we want. */
|
2018-06-04 06:28:02 +02:00
|
|
|
switch ((enum scid_encode_types)(*encoded)[0]) {
|
|
|
|
case SHORTIDS_ZLIB:
|
2018-11-21 01:36:08 +01:00
|
|
|
/* compress */
|
2018-07-28 08:00:16 +02:00
|
|
|
z = zencode_scids(tmpctx, *encoded + 1, tal_count(*encoded) - 1);
|
2018-06-04 06:28:02 +02:00
|
|
|
if (z) {
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If successful, copy over and trimp */
|
2018-07-28 08:00:16 +02:00
|
|
|
tal_resize(encoded, 1 + tal_count(z));
|
|
|
|
memcpy((*encoded) + 1, z, tal_count(z));
|
2018-06-04 06:28:02 +02:00
|
|
|
goto check_length;
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Otherwise, change first byte to 'uncompressed' */
|
2018-06-04 06:28:02 +02:00
|
|
|
(*encoded)[0] = SHORTIDS_UNCOMPRESSED;
|
|
|
|
/* Fall thru */
|
|
|
|
case SHORTIDS_UNCOMPRESSED:
|
|
|
|
goto check_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Unknown short_ids encoding %u", (*encoded)[0]);
|
|
|
|
|
|
|
|
check_length:
|
2018-06-04 06:28:02 +02:00
|
|
|
#if DEVELOPER
|
2018-07-28 08:00:16 +02:00
|
|
|
if (tal_count(*encoded) > max_scids_encode_bytes)
|
2018-06-04 06:28:02 +02:00
|
|
|
return false;
|
|
|
|
#endif
|
2018-07-28 08:00:16 +02:00
|
|
|
return tal_count(*encoded) <= max_bytes;
|
2018-06-04 06:22:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* An endpoint node:
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages unless explicitly requested.
|
|
|
|
*/
|
2018-06-04 06:26:25 +02:00
|
|
|
static void setup_gossip_range(struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Without the `gossip_queries` feature, gossip flows automatically. */
|
2018-07-24 08:18:58 +02:00
|
|
|
if (!peer->gossip_queries_feature)
|
2018-06-04 06:26:25 +02:00
|
|
|
return;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We need to ask for something to start the gossip flowing: we ask
|
|
|
|
* for everything from 1970 to 2106; this is horribly naive. We
|
|
|
|
* should be much smarter about requesting only what we don't already
|
|
|
|
* have. */
|
2018-06-04 06:26:25 +02:00
|
|
|
msg = towire_gossip_timestamp_filter(peer,
|
2018-11-21 04:10:03 +01:00
|
|
|
&peer->daemon->chain_hash,
|
2018-06-04 06:26:25 +02:00
|
|
|
0, UINT32_MAX);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Create a node_announcement with the given signature. It may be NULL in the
|
|
|
|
* case we need to create a provisional announcement for the HSM to sign.
|
|
|
|
* This is called twice: once with the dummy signature to get it signed and a
|
|
|
|
* second time to build the full packet with the signature. The timestamp is
|
|
|
|
* handed in rather than using time_now() internally, since that could change
|
|
|
|
* between the dummy creation and the call with a signature. */
|
2017-11-24 15:47:14 +01:00
|
|
|
static u8 *create_node_announcement(const tal_t *ctx, struct daemon *daemon,
|
|
|
|
secp256k1_ecdsa_signature *sig,
|
|
|
|
u32 timestamp)
|
2017-03-11 14:45:54 +01:00
|
|
|
{
|
2018-08-24 07:20:06 +02:00
|
|
|
u8 *addresses = tal_arr(tmpctx, u8, 0);
|
2017-11-24 15:47:14 +01:00
|
|
|
u8 *announcement;
|
|
|
|
size_t i;
|
|
|
|
if (!sig) {
|
2018-08-24 07:20:06 +02:00
|
|
|
sig = tal(tmpctx, secp256k1_ecdsa_signature);
|
2017-11-24 15:47:14 +01:00
|
|
|
memset(sig, 0, sizeof(*sig));
|
|
|
|
}
|
2018-05-07 06:29:22 +02:00
|
|
|
for (i = 0; i < tal_count(daemon->announcable); i++)
|
|
|
|
towire_wireaddr(&addresses, &daemon->announcable[i]);
|
2017-11-24 15:47:14 +01:00
|
|
|
|
|
|
|
announcement =
|
2018-07-24 08:17:40 +02:00
|
|
|
towire_node_announcement(ctx, sig, daemon->globalfeatures, timestamp,
|
2017-11-24 15:47:14 +01:00
|
|
|
&daemon->id, daemon->rgb, daemon->alias,
|
|
|
|
addresses);
|
|
|
|
return announcement;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This routine created a `node_announcement` for our node, and hands it to
|
|
|
|
* the routing.c code like any other `node_announcement`. Such announcements
|
|
|
|
* are only accepted if there is an announced channel associated with that node
|
|
|
|
* (to prevent spam), so we only call this once we've announced a channel. */
|
2017-11-24 15:47:14 +01:00
|
|
|
static void send_node_announcement(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
u32 timestamp = time_now().ts.tv_sec;
|
|
|
|
secp256k1_ecdsa_signature sig;
|
2018-03-08 05:10:26 +01:00
|
|
|
u8 *msg, *nannounce, *err;
|
2018-10-16 05:44:53 +02:00
|
|
|
s64 last_timestamp;
|
|
|
|
struct node *self = get_node(daemon->rstate, &daemon->id);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The origin node:
|
|
|
|
* - MUST set `timestamp` to be greater than that of any previous
|
|
|
|
* `node_announcement` it has previously created.
|
|
|
|
*/
|
2018-10-16 05:44:53 +02:00
|
|
|
if (self)
|
|
|
|
last_timestamp = self->last_timestamp;
|
|
|
|
else
|
2018-11-21 01:36:08 +01:00
|
|
|
/* last_timestamp is carefully a s64, so this works */
|
2018-10-16 05:44:53 +02:00
|
|
|
last_timestamp = -1;
|
2018-01-04 12:40:46 +01:00
|
|
|
|
2018-10-16 05:44:53 +02:00
|
|
|
if (timestamp <= last_timestamp)
|
|
|
|
timestamp = last_timestamp + 1;
|
2018-01-04 12:40:46 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Get an unsigned one. */
|
2018-01-04 12:40:46 +01:00
|
|
|
nannounce = create_node_announcement(tmpctx, daemon, NULL, timestamp);
|
2017-11-24 15:47:14 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Ask hsmd to sign it (synchronous) */
|
2018-03-15 07:10:22 +01:00
|
|
|
if (!wire_sync_write(HSM_FD, take(towire_hsm_node_announcement_sig_req(NULL, nannounce))))
|
2017-11-24 15:47:14 +01:00
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "Could not write to HSM: %s", strerror(errno));
|
|
|
|
|
|
|
|
msg = wire_sync_read(tmpctx, HSM_FD);
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_hsm_node_announcement_sig_reply(msg, &sig))
|
2017-11-24 15:47:14 +01:00
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "HSM returned an invalid node_announcement sig");
|
|
|
|
|
|
|
|
/* We got the signature for out provisional node_announcement back
|
|
|
|
* from the HSM, create the real announcement and forward it to
|
|
|
|
* gossipd so it can take care of forwarding it. */
|
2018-03-15 07:10:22 +01:00
|
|
|
nannounce = create_node_announcement(NULL, daemon, &sig, timestamp);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* This injects it into the routing code in routing.c; it should not
|
|
|
|
* reject it! */
|
2018-03-25 18:30:47 +02:00
|
|
|
err = handle_node_announcement(daemon->rstate, take(nannounce));
|
2018-03-08 05:10:26 +01:00
|
|
|
if (err)
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"rejected own node announcement: %s",
|
2018-03-15 05:30:38 +01:00
|
|
|
tal_hex(tmpctx, err));
|
2017-11-24 15:47:14 +01:00
|
|
|
}
|
|
|
|
|
2018-09-24 03:42:00 +02:00
|
|
|
/* Return true if the only change would be the timestamp. */
|
|
|
|
static bool node_announcement_redundant(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
struct node *n = get_node(daemon->rstate, &daemon->id);
|
|
|
|
if (!n)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (n->last_timestamp == -1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (tal_count(n->addresses) != tal_count(daemon->announcable))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(n->addresses); i++)
|
|
|
|
if (!wireaddr_eq(&n->addresses[i], &daemon->announcable[i]))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
BUILD_ASSERT(ARRAY_SIZE(daemon->alias) == ARRAY_SIZE(n->alias));
|
|
|
|
if (!memeq(daemon->alias, ARRAY_SIZE(daemon->alias),
|
|
|
|
n->alias, ARRAY_SIZE(n->alias)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
BUILD_ASSERT(ARRAY_SIZE(daemon->rgb) == ARRAY_SIZE(n->rgb_color));
|
|
|
|
if (!memeq(daemon->rgb, ARRAY_SIZE(daemon->rgb),
|
|
|
|
n->rgb_color, ARRAY_SIZE(n->rgb_color)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!memeq(daemon->globalfeatures, tal_count(daemon->globalfeatures),
|
2018-09-29 02:57:01 +02:00
|
|
|
n->globalfeatures, tal_count(n->globalfeatures)))
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Should we announce our own node? Called at strategic places. */
|
2018-06-04 06:38:39 +02:00
|
|
|
static void maybe_send_own_node_announce(struct daemon *daemon)
|
2018-06-04 06:15:25 +02:00
|
|
|
{
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep an internal flag in the routing code to say we've announced
|
|
|
|
* a local channel. The alternative would be to have it make a
|
|
|
|
* callback, but when we start up we don't want to make multiple
|
|
|
|
* announcments, so we use this approach for now. */
|
2018-06-04 06:15:25 +02:00
|
|
|
if (!daemon->rstate->local_channel_announced)
|
|
|
|
return;
|
|
|
|
|
2018-09-24 03:42:00 +02:00
|
|
|
if (node_announcement_redundant(daemon))
|
|
|
|
return;
|
|
|
|
|
2018-06-04 06:15:25 +02:00
|
|
|
send_node_announcement(daemon);
|
|
|
|
daemon->rstate->local_channel_announced = false;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~Routines to handle gossip messages from peer, forwarded by subdaemons.
|
|
|
|
*-----------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* It's not the subdaemon's fault if they're malformed or invalid; so these
|
|
|
|
* all return an error packet which gets sent back to the subdaemon in that
|
|
|
|
* case.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* The routing code checks that it's basically valid, returning an
|
|
|
|
* error message for the peer or NULL. NULL means it's OK, but the
|
|
|
|
* message might be redundant, in which case scid is also NULL.
|
|
|
|
* Otherwise `scid` gives us the short_channel_id claimed by the
|
|
|
|
* message, and puts the announcemnt on an internal 'pending'
|
|
|
|
* queue. We'll send a request to lightningd to look it up, and continue
|
|
|
|
* processing in `handle_txout_reply`. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_channel_announcement_msg(struct peer *peer,
|
|
|
|
const u8 *msg)
|
2017-11-24 15:47:14 +01:00
|
|
|
{
|
2018-11-05 02:21:51 +01:00
|
|
|
const struct short_channel_id *scid;
|
|
|
|
const u8 *err;
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
/* If it's OK, tells us the short_channel_id to lookup */
|
|
|
|
err = handle_channel_announcement(peer->daemon->rstate, msg, &scid);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
else if (scid)
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_gossip_get_txout(NULL, scid)));
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_channel_update_msg(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Hand the channel_update to the routing code */
|
2018-11-05 02:21:51 +01:00
|
|
|
u8 *err = handle_channel_update(peer->daemon->rstate, msg, "subdaemon");
|
|
|
|
if (err)
|
|
|
|
return err;
|
2018-03-18 14:57:15 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ As a nasty compromise in the spec, we only forward channel_announce
|
|
|
|
* once we have a channel_update; the channel isn't *usable* for
|
|
|
|
* routing until you have both anyway. For this reason, we might have
|
|
|
|
* just sent out our own channel_announce, so we check if it's time to
|
|
|
|
* send a node_announcement too. */
|
2018-11-05 02:21:51 +01:00
|
|
|
maybe_send_own_node_announce(peer->daemon);
|
2018-03-18 14:57:15 +01:00
|
|
|
return NULL;
|
2017-03-11 14:45:54 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The peer can ask about an array of short channel ids: we don't assemble the
|
|
|
|
* reply immediately but process them one at a time in dump_gossip which is
|
|
|
|
* called when there's nothing more important to send. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_query_short_channel_ids(struct peer *peer, const u8 *msg)
|
2018-06-04 06:21:25 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain;
|
|
|
|
u8 *encoded;
|
|
|
|
struct short_channel_id *scids;
|
|
|
|
|
|
|
|
if (!fromwire_query_short_channel_ids(tmpctx, msg, &chain, &encoded)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
|
2018-06-04 06:21:25 +02:00
|
|
|
status_trace("%s sent query_short_channel_ids chainhash %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct bitcoin_blkid, &chain));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if it has not sent `reply_short_channel_ids_end` to a
|
|
|
|
* previously received `query_short_channel_ids` from this
|
|
|
|
* sender:
|
|
|
|
* - MAY fail the connection.
|
|
|
|
*/
|
|
|
|
if (peer->scid_queries || peer->scid_query_nodes) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad concurrent query_short_channel_ids");
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
scids = decode_short_ids(tmpctx, encoded);
|
|
|
|
if (!scids) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_short_channel_ids encoding %s",
|
|
|
|
tal_hex(tmpctx, encoded));
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2018-06-17 12:13:44 +02:00
|
|
|
* - MUST respond to each known `short_channel_id` with a
|
|
|
|
* `channel_announcement` and the latest `channel_update`s for each end
|
|
|
|
* - SHOULD NOT wait for the next outgoing gossip flush to send
|
2018-06-04 06:21:25 +02:00
|
|
|
* these.
|
|
|
|
*/
|
|
|
|
peer->scid_queries = tal_steal(peer, scids);
|
|
|
|
peer->scid_query_idx = 0;
|
|
|
|
peer->scid_query_nodes = tal_arr(peer, struct pubkey, 0);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Notify the daemon_conn-write loop to invoke create_next_scid_reply */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_wake(peer->dc);
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:21:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The peer can specify a timestamp range; gossip outside this range won't be
|
|
|
|
* sent any more, and we'll start streaming gossip in this range. This is
|
|
|
|
* only supposed to be used if we negotiate the `gossip_queries` in which case
|
|
|
|
* the first send triggers the first gossip to be sent.
|
|
|
|
*/
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_gossip_timestamp_filter(struct peer *peer, const u8 *msg)
|
2018-06-04 06:26:25 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
u32 first_timestamp, timestamp_range;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_timestamp_filter(msg, &chain_hash,
|
|
|
|
&first_timestamp,
|
|
|
|
×tamp_range)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad gossip_timestamp_filter %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:26:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain_hash)) {
|
2018-06-04 06:26:25 +02:00
|
|
|
status_trace("%s sent gossip_timestamp_filter chainhash %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct bitcoin_blkid,
|
|
|
|
&chain_hash));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:26:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We initialize the timestamps to "impossible" values so we can
|
|
|
|
* detect that this is the first filter: in this case, we gossip sync
|
|
|
|
* immediately. */
|
2018-06-04 06:26:25 +02:00
|
|
|
if (peer->gossip_timestamp_min > peer->gossip_timestamp_max)
|
2018-06-04 06:27:25 +02:00
|
|
|
wake_gossip_out(peer);
|
2018-06-04 06:26:25 +02:00
|
|
|
|
|
|
|
/* FIXME: We don't index by timestamp, so this forces a brute
|
2018-11-21 01:36:08 +01:00
|
|
|
* search! But keeping in correct order is v. hard. */
|
2018-06-04 06:26:25 +02:00
|
|
|
peer->gossip_timestamp_min = first_timestamp;
|
|
|
|
peer->gossip_timestamp_max = first_timestamp + timestamp_range - 1;
|
2018-11-21 01:36:08 +01:00
|
|
|
/* In case they overflow. */
|
2018-06-04 06:26:25 +02:00
|
|
|
if (peer->gossip_timestamp_max < peer->gossip_timestamp_min)
|
|
|
|
peer->gossip_timestamp_max = UINT32_MAX;
|
|
|
|
peer->broadcast_index = 0;
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:26:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We can send multiple replies when the peer queries for all channels in
|
|
|
|
* a given range of blocks; each one indicates the range of blocks it covers. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static void reply_channel_range(struct peer *peer,
|
|
|
|
u32 first_blocknum, u32 number_of_blocks,
|
|
|
|
const u8 *encoded)
|
|
|
|
{
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - For each `reply_channel_range`:
|
|
|
|
* - MUST set with `chain_hash` equal to that of `query_channel_range`,
|
|
|
|
* - MUST encode a `short_channel_id` for every open channel it
|
|
|
|
* knows in blocks `first_blocknum` to `first_blocknum` plus
|
|
|
|
* `number_of_blocks` minus one.
|
|
|
|
* - MUST limit `number_of_blocks` to the maximum number of blocks
|
|
|
|
* whose results could fit in `encoded_short_ids`
|
|
|
|
* - if does not maintain up-to-date channel information for
|
|
|
|
* `chain_hash`:
|
|
|
|
* - MUST set `complete` to 0.
|
|
|
|
* - otherwise:
|
|
|
|
* - SHOULD set `complete` to 1.
|
|
|
|
*/
|
|
|
|
u8 *msg = towire_reply_channel_range(NULL,
|
2018-11-21 04:10:03 +01:00
|
|
|
&peer->daemon->chain_hash,
|
2018-06-04 06:28:02 +02:00
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks,
|
|
|
|
1, encoded);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When we need to send an array of channels, it might go over our 64k packet
|
|
|
|
* size. If it doesn't, we recurse, splitting in two, etc. Each message
|
|
|
|
* indicates what blocks it contains, so the recipient knows when we're
|
|
|
|
* finished. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static void queue_channel_ranges(struct peer *peer,
|
|
|
|
u32 first_blocknum, u32 number_of_blocks)
|
|
|
|
{
|
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
|
|
|
u8 *encoded = encode_short_channel_ids_start(tmpctx);
|
|
|
|
struct short_channel_id scid;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* 1. type: 264 (`reply_channel_range`) (`gossip_queries`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`32`:`chain_hash`]
|
|
|
|
* * [`4`:`first_blocknum`]
|
|
|
|
* * [`4`:`number_of_blocks`]
|
|
|
|
* * [`1`:`complete`]
|
|
|
|
* * [`2`:`len`]
|
|
|
|
* * [`len`:`encoded_short_ids`]
|
|
|
|
*/
|
|
|
|
const size_t reply_overhead = 32 + 4 + 4 + 1 + 2;
|
|
|
|
const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
|
|
|
|
|
|
|
|
/* Avoid underflow: we don't use block 0 anyway */
|
|
|
|
if (first_blocknum == 0)
|
|
|
|
mk_short_channel_id(&scid, 1, 0, 0);
|
|
|
|
else
|
|
|
|
mk_short_channel_id(&scid, first_blocknum, 0, 0);
|
|
|
|
scid.u64--;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep a `uintmap` of `short_channel_id` to `struct chan *`.
|
|
|
|
* Unlike a htable, it's efficient to iterate through, but it only
|
|
|
|
* works because each short_channel_id is basically a 64-bit unsigned
|
|
|
|
* integer.
|
|
|
|
*
|
|
|
|
* First we iteraate and gather all the short channel ids. */
|
2018-06-04 06:28:02 +02:00
|
|
|
while (uintmap_after(&rstate->chanmap, &scid.u64)) {
|
|
|
|
u32 blocknum = short_channel_id_blocknum(&scid);
|
|
|
|
if (blocknum >= first_blocknum + number_of_blocks)
|
|
|
|
break;
|
|
|
|
|
|
|
|
encode_add_short_channel_id(&encoded, &scid);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If we can encode that, fine: send it */
|
2018-06-04 06:28:02 +02:00
|
|
|
if (encode_short_channel_ids_end(&encoded, max_encoded_bytes)) {
|
|
|
|
reply_channel_range(peer, first_blocknum, number_of_blocks,
|
|
|
|
encoded);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It wouldn't all fit: divide in half */
|
|
|
|
/* We assume we can always send one block! */
|
|
|
|
if (number_of_blocks <= 1) {
|
|
|
|
/* We always assume we can send 1 blocks worth */
|
|
|
|
status_broken("Could not fit scids for single block %u",
|
|
|
|
first_blocknum);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
status_debug("queue_channel_ranges full: splitting %u+%u and %u+%u",
|
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks / 2,
|
|
|
|
first_blocknum + number_of_blocks / 2,
|
|
|
|
number_of_blocks - number_of_blocks / 2);
|
|
|
|
queue_channel_ranges(peer, first_blocknum, number_of_blocks / 2);
|
|
|
|
queue_channel_ranges(peer, first_blocknum + number_of_blocks / 2,
|
|
|
|
number_of_blocks - number_of_blocks / 2);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ The peer can ask for all channels is a series of blocks. We reply with one
|
|
|
|
* or more messages containing the short_channel_ids. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
|
2018-06-04 06:28:02 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
u32 first_blocknum, number_of_blocks;
|
|
|
|
|
|
|
|
if (!fromwire_query_channel_range(msg, &chain_hash,
|
|
|
|
&first_blocknum, &number_of_blocks)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad query_channel_range %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: if they ask for the wrong chain, we should not ignore it,
|
|
|
|
* but give an empty response with the `complete` flag unset? */
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain_hash)) {
|
2018-06-04 06:28:02 +02:00
|
|
|
status_trace("%s sent query_channel_range chainhash %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct bitcoin_blkid,
|
|
|
|
&chain_hash));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This checks for 32-bit overflow! */
|
2018-06-04 06:28:02 +02:00
|
|
|
if (first_blocknum + number_of_blocks < first_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"query_channel_range overflow %u+%u",
|
|
|
|
first_blocknum, number_of_blocks);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2018-11-05 02:21:51 +01:00
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
queue_channel_ranges(peer, first_blocknum, number_of_blocks);
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is the reply we get when we send query_channel_range; we keep
|
|
|
|
* expecting them until the entire range we asked for is covered. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_reply_channel_range(struct peer *peer, const u8 *msg)
|
2018-06-04 06:28:02 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain;
|
|
|
|
u8 complete;
|
2018-11-13 05:03:49 +01:00
|
|
|
u32 first_blocknum, number_of_blocks, start, end;
|
|
|
|
u8 *encoded;
|
2018-06-04 06:28:02 +02:00
|
|
|
struct short_channel_id *scids;
|
|
|
|
size_t n;
|
2018-11-13 05:03:49 +01:00
|
|
|
unsigned long b;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
if (!fromwire_reply_channel_range(tmpctx, msg, &chain, &first_blocknum,
|
|
|
|
&number_of_blocks, &complete,
|
|
|
|
&encoded)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_channel_range %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_channel_range for bad chain: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!peer->query_channel_blocks) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_channel_range without query: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Beware overflow! */
|
2018-06-04 06:28:02 +02:00
|
|
|
if (first_blocknum + number_of_blocks < first_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_channel_range invalid %u+%u",
|
|
|
|
first_blocknum, number_of_blocks);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
scids = decode_short_ids(tmpctx, encoded);
|
|
|
|
if (!scids) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_channel_range encoding %s",
|
|
|
|
tal_hex(tmpctx, encoded));
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-13 05:03:49 +01:00
|
|
|
status_debug("peer %s reply_channel_range %u+%u (of %u+%u) %zu scids",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
first_blocknum, number_of_blocks,
|
|
|
|
peer->range_first_blocknum,
|
|
|
|
peer->range_end_blocknum - peer->range_first_blocknum,
|
|
|
|
tal_count(scids));
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The receiver of `query_channel_range`:
|
|
|
|
*...
|
|
|
|
* - MUST respond with one or more `reply_channel_range` whose
|
|
|
|
* combined range cover the requested `first_blocknum` to
|
|
|
|
* `first_blocknum` plus `number_of_blocks` minus one.
|
|
|
|
*/
|
|
|
|
/* ie. They can be outside range we asked, but they must overlap! */
|
2018-11-13 05:03:49 +01:00
|
|
|
if (first_blocknum + number_of_blocks <= peer->range_first_blocknum
|
|
|
|
|| first_blocknum >= peer->range_end_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
2018-11-13 05:03:49 +01:00
|
|
|
"reply_channel_range invalid %u+%u for query %u+%u",
|
2018-11-05 02:21:51 +01:00
|
|
|
first_blocknum, number_of_blocks,
|
2018-11-13 05:03:49 +01:00
|
|
|
peer->range_first_blocknum,
|
|
|
|
peer->range_end_blocknum
|
|
|
|
- peer->range_first_blocknum);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-13 05:03:49 +01:00
|
|
|
start = first_blocknum;
|
|
|
|
end = first_blocknum + number_of_blocks;
|
|
|
|
/* Trim to make it a subset of what we want. */
|
|
|
|
if (start < peer->range_first_blocknum)
|
|
|
|
start = peer->range_first_blocknum;
|
|
|
|
if (end > peer->range_end_blocknum)
|
|
|
|
end = peer->range_end_blocknum;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep a bitmap of what blocks have been covered by replies: bit 0
|
|
|
|
* represents block peer->range_first_blocknum */
|
2018-11-13 05:03:49 +01:00
|
|
|
b = bitmap_ffs(peer->query_channel_blocks,
|
|
|
|
start - peer->range_first_blocknum,
|
|
|
|
end - peer->range_first_blocknum);
|
|
|
|
if (b != end - peer->range_first_blocknum) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
2018-11-13 05:03:49 +01:00
|
|
|
"reply_channel_range %u+%u already have block %lu",
|
|
|
|
first_blocknum, number_of_blocks,
|
|
|
|
peer->range_first_blocknum + b);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Mark that short_channel_ids for this block have been received */
|
2018-11-13 05:03:49 +01:00
|
|
|
bitmap_fill_range(peer->query_channel_blocks,
|
2018-11-21 01:36:08 +01:00
|
|
|
start - peer->range_first_blocknum,
|
|
|
|
end - peer->range_first_blocknum);
|
2018-11-13 05:03:49 +01:00
|
|
|
peer->range_blocks_remaining -= end - start;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
/* Add scids */
|
|
|
|
n = tal_count(peer->query_channel_scids);
|
|
|
|
tal_resize(&peer->query_channel_scids, n + tal_count(scids));
|
2018-07-28 08:00:16 +02:00
|
|
|
memcpy(peer->query_channel_scids + n, scids, tal_bytelen(scids));
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
/* Still more to go? */
|
2018-11-13 05:03:49 +01:00
|
|
|
if (peer->range_blocks_remaining)
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* All done, send reply to lightningd: that's currently the only thing
|
|
|
|
* which triggers this (for testing). Eventually we might start probing
|
|
|
|
* for gossip information on our own. */
|
2018-06-04 06:28:02 +02:00
|
|
|
msg = towire_gossip_query_channel_range_reply(NULL,
|
|
|
|
first_blocknum,
|
|
|
|
number_of_blocks,
|
|
|
|
complete,
|
|
|
|
peer->query_channel_scids);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(peer->daemon->master, take(msg));
|
2018-06-04 06:28:02 +02:00
|
|
|
peer->query_channel_scids = tal_free(peer->query_channel_scids);
|
|
|
|
peer->query_channel_blocks = tal_free(peer->query_channel_blocks);
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ For simplicity, all pings and pongs are forwarded to us here in gossipd. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_ping(struct peer *peer, const u8 *ping)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
u8 *pong;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This checks the ping packet and makes a pong reply if needed; peer
|
|
|
|
* can specify it doesn't want a response, to simulate traffic. */
|
2018-11-05 02:21:51 +01:00
|
|
|
if (!check_ping_make_pong(NULL, ping, &pong))
|
|
|
|
return towire_errorfmt(peer, NULL, "Bad ping");
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
if (pong)
|
|
|
|
queue_peer_msg(peer, take(pong));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When we get a pong, we tell lightningd about it (it's probably a response
|
|
|
|
* to the `ping` JSON RPC command). */
|
2018-11-05 02:21:51 +01:00
|
|
|
static const u8 *handle_pong(struct peer *peer, const u8 *pong)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
const char *err = got_pong(pong, &peer->num_pings_outstanding);
|
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
if (err)
|
|
|
|
return towire_errorfmt(peer, NULL, "%s", err);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_gossip_ping_reply(NULL, &peer->id, true,
|
|
|
|
tal_count(pong))));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When we ask about an array of short_channel_ids, we get all channel &
|
|
|
|
* node announcements and channel updates which the peer knows. There's an
|
|
|
|
* explicit end packet; this is needed to differentiate between 'I'm slow'
|
|
|
|
* and 'I don't know those channels'. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain;
|
|
|
|
u8 complete;
|
|
|
|
|
|
|
|
if (!fromwire_reply_short_channel_ids_end(msg, &chain, &complete)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"Bad reply_short_channel_ids_end %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
if (!bitcoin_blkid_eq(&peer->daemon->chain_hash, &chain)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"reply_short_channel_ids_end for bad chain: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (peer->num_scid_queries_outstanding == 0) {
|
2018-11-05 02:21:51 +01:00
|
|
|
return towire_errorfmt(peer, NULL,
|
|
|
|
"unexpected reply_short_channel_ids_end: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
peer->num_scid_queries_outstanding--;
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We tell lightningd: this is because we currently only ask for
|
|
|
|
* query_short_channel_ids when lightningd asks. */
|
2018-11-05 02:16:48 +01:00
|
|
|
msg = towire_gossip_scids_reply(msg, true, complete);
|
|
|
|
daemon_conn_send(peer->daemon->master, take(msg));
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Arbitrary ordering function of pubkeys.
|
2018-07-24 08:18:58 +02:00
|
|
|
*
|
|
|
|
* Note that we could use memcmp() here: even if they had somehow different
|
|
|
|
* bitwise representations for the same key, we copied them all from struct
|
|
|
|
* node which should make them unique. Even if not (say, a node vanished
|
|
|
|
* and reappeared) we'd just end up sending two node_announcement for the
|
|
|
|
* same node.
|
|
|
|
*/
|
|
|
|
static int pubkey_order(const struct pubkey *k1, const struct pubkey *k2,
|
|
|
|
void *unused UNUSED)
|
2017-10-25 11:18:05 +02:00
|
|
|
{
|
2018-07-24 08:18:58 +02:00
|
|
|
return pubkey_cmp(k1, k2);
|
2017-10-25 11:18:05 +02:00
|
|
|
}
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
static void uniquify_node_ids(struct pubkey **ids)
|
2017-01-10 06:08:33 +01:00
|
|
|
{
|
2018-07-24 08:18:58 +02:00
|
|
|
size_t dst, src;
|
2018-06-04 06:20:25 +02:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - MUST follow with any `node_announcement`s for each
|
|
|
|
* `channel_announcement`
|
|
|
|
*
|
|
|
|
* - SHOULD avoid sending duplicate `node_announcements` in
|
|
|
|
* response to a single `query_short_channel_ids`.
|
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* ccan/asort is a typesafe qsort wrapper: like most ccan modules
|
|
|
|
* it eschews exposing 'void *' pointers and ensures that the
|
|
|
|
* callback function and its arguments match types correctly. */
|
2018-06-04 06:20:25 +02:00
|
|
|
asort(*ids, tal_count(*ids), pubkey_order, NULL);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Compact the array */
|
2018-06-04 06:20:25 +02:00
|
|
|
for (dst = 0, src = 0; src < tal_count(*ids); src++) {
|
|
|
|
if (dst && pubkey_eq(&(*ids)[dst-1], &(*ids)[src]))
|
|
|
|
continue;
|
|
|
|
(*ids)[dst++] = (*ids)[src];
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* And trim to length, so tal_count() gives correct answer. */
|
2018-06-04 06:20:25 +02:00
|
|
|
tal_resize(ids, dst);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We are fairly careful to avoid the peer DoSing us with channel queries:
|
|
|
|
* this routine sends information about a single short_channel_id, unless
|
|
|
|
* it's finished all of them. */
|
2018-11-13 05:03:50 +01:00
|
|
|
static void maybe_create_next_scid_reply(struct peer *peer)
|
2018-06-04 06:19:25 +02:00
|
|
|
{
|
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
|
|
|
size_t i, num;
|
|
|
|
bool sent = false;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2018-06-17 12:13:44 +02:00
|
|
|
* - MUST respond to each known `short_channel_id` with a
|
|
|
|
* `channel_announcement` and the latest `channel_update`s for
|
2018-06-04 06:19:25 +02:00
|
|
|
* each end
|
2018-06-17 12:13:44 +02:00
|
|
|
* - SHOULD NOT wait for the next outgoing gossip flush
|
|
|
|
* to send these.
|
2018-06-04 06:19:25 +02:00
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Search for next short_channel_id we know about. */
|
2018-06-04 06:19:25 +02:00
|
|
|
num = tal_count(peer->scid_queries);
|
|
|
|
for (i = peer->scid_query_idx; !sent && i < num; i++) {
|
|
|
|
struct chan *chan;
|
|
|
|
|
|
|
|
chan = get_channel(rstate, &peer->scid_queries[i]);
|
2018-09-28 01:45:37 +02:00
|
|
|
if (!chan || !is_chan_announced(chan))
|
2018-06-04 06:19:25 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
queue_peer_msg(peer, chan->channel_announce);
|
|
|
|
if (chan->half[0].channel_update)
|
|
|
|
queue_peer_msg(peer, chan->half[0].channel_update);
|
|
|
|
if (chan->half[1].channel_update)
|
2018-06-15 08:37:35 +02:00
|
|
|
queue_peer_msg(peer, chan->half[1].channel_update);
|
2018-06-04 06:20:25 +02:00
|
|
|
|
|
|
|
/* Record node ids for later transmission of node_announcement */
|
2018-09-29 08:33:50 +02:00
|
|
|
*tal_arr_expand(&peer->scid_query_nodes) = chan->nodes[0]->id;
|
|
|
|
*tal_arr_expand(&peer->scid_query_nodes) = chan->nodes[1]->id;
|
2018-06-04 06:19:25 +02:00
|
|
|
sent = true;
|
|
|
|
}
|
2018-06-04 06:20:25 +02:00
|
|
|
|
|
|
|
/* Just finished channels? Remove duplicate nodes. */
|
|
|
|
if (peer->scid_query_idx != num && i == num)
|
|
|
|
uniquify_node_ids(&peer->scid_query_nodes);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Update index for next time we're called. */
|
2018-06-04 06:19:25 +02:00
|
|
|
peer->scid_query_idx = i;
|
|
|
|
|
2018-06-04 06:20:25 +02:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - MUST follow with any `node_announcement`s for each
|
|
|
|
* `channel_announcement`
|
|
|
|
* - SHOULD avoid sending duplicate `node_announcements` in response
|
|
|
|
* to a single `query_short_channel_ids`.
|
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If we haven't sent anything above, we look for the next
|
|
|
|
* node_announcement to send. */
|
2018-06-04 06:20:25 +02:00
|
|
|
num = tal_count(peer->scid_query_nodes);
|
|
|
|
for (i = peer->scid_query_nodes_idx; !sent && i < num; i++) {
|
|
|
|
const struct node *n;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Not every node announces itself (we know it exists because
|
|
|
|
* of a channel_announcement, however) */
|
2018-06-04 06:20:25 +02:00
|
|
|
n = get_node(rstate, &peer->scid_query_nodes[i]);
|
2018-09-28 01:45:37 +02:00
|
|
|
if (!n || !n->node_announcement_index)
|
2018-06-04 06:20:25 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
queue_peer_msg(peer, n->node_announcement);
|
|
|
|
sent = true;
|
|
|
|
}
|
|
|
|
peer->scid_query_nodes_idx = i;
|
|
|
|
|
2018-06-04 06:19:25 +02:00
|
|
|
/* All finished? */
|
2018-06-04 06:20:25 +02:00
|
|
|
if (peer->scid_queries && peer->scid_query_nodes_idx == num) {
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - MUST follow these responses with
|
|
|
|
* `reply_short_channel_ids_end`.
|
|
|
|
* - if does not maintain up-to-date channel information for
|
|
|
|
* `chain_hash`:
|
|
|
|
* - MUST set `complete` to 0.
|
|
|
|
* - otherwise:
|
|
|
|
* - SHOULD set `complete` to 1.
|
|
|
|
*/
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: We consider ourselves to have complete knowledge. */
|
2018-06-04 06:19:25 +02:00
|
|
|
u8 *end = towire_reply_short_channel_ids_end(peer,
|
2018-11-21 04:10:03 +01:00
|
|
|
&peer->daemon->chain_hash,
|
2018-06-04 06:19:25 +02:00
|
|
|
true);
|
|
|
|
queue_peer_msg(peer, take(end));
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* We're done! Clean up so we simply pass-through next time. */
|
2018-06-04 06:19:25 +02:00
|
|
|
peer->scid_queries = tal_free(peer->scid_queries);
|
2018-06-04 06:20:25 +02:00
|
|
|
peer->scid_query_idx = 0;
|
|
|
|
peer->scid_query_nodes = tal_free(peer->scid_query_nodes);
|
|
|
|
peer->scid_query_nodes_idx = 0;
|
2018-06-04 06:19:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ If we're supposed to be sending gossip, do so now. */
|
2018-11-13 05:03:50 +01:00
|
|
|
static void maybe_queue_gossip(struct peer *peer)
|
2018-06-04 06:26:25 +02:00
|
|
|
{
|
|
|
|
const u8 *next;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If the gossip timer is still running, don't send. */
|
2018-06-04 06:27:25 +02:00
|
|
|
if (peer->gossip_timer)
|
2018-11-13 05:03:50 +01:00
|
|
|
return;
|
2018-06-04 06:26:25 +02:00
|
|
|
|
2018-07-26 23:27:37 +02:00
|
|
|
#if DEVELOPER
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The dev_suppress_gossip RPC is used for testing. */
|
2018-07-26 23:27:37 +02:00
|
|
|
if (suppress_gossip)
|
2018-11-13 05:03:50 +01:00
|
|
|
return;
|
2018-07-26 23:27:37 +02:00
|
|
|
#endif
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We maintain an ordered map of gossip to broadcast, so each peer
|
|
|
|
* only needs to keep an index; this returns the next gossip message
|
|
|
|
* which is past the previous index and within the timestamp: it
|
|
|
|
* also updates `broadcast_index`. */
|
2018-06-04 06:26:25 +02:00
|
|
|
next = next_broadcast(peer->daemon->rstate->broadcasts,
|
|
|
|
peer->gossip_timestamp_min,
|
|
|
|
peer->gossip_timestamp_max,
|
|
|
|
&peer->broadcast_index);
|
|
|
|
|
|
|
|
if (next) {
|
|
|
|
queue_peer_msg(peer, next);
|
2018-11-13 05:03:50 +01:00
|
|
|
return;
|
2018-06-04 06:26:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* An endpoint node:
|
|
|
|
*...
|
|
|
|
* - SHOULD flush outgoing gossip messages once every 60 seconds,
|
|
|
|
* independently of the arrival times of the messages.
|
|
|
|
* - Note: this results in staggered announcements that are unique
|
|
|
|
* (not duplicated).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Gossip is drained; we set up timer now, which is strictly-speaking
|
|
|
|
* more than 60 seconds if sending gossip took a long time. But
|
|
|
|
* that's their fault for being slow! */
|
2018-06-04 06:27:25 +02:00
|
|
|
peer->gossip_timer
|
|
|
|
= new_reltimer(&peer->daemon->timers, peer,
|
2018-11-21 01:36:08 +01:00
|
|
|
/* The time is adjustable for testing */
|
2018-10-15 06:57:38 +02:00
|
|
|
time_from_msec(peer->daemon->broadcast_interval_msec),
|
2018-06-04 06:27:25 +02:00
|
|
|
wake_gossip_out, peer);
|
2018-06-04 06:26:25 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is called when the outgoing queue is empty; gossip has lower priority
|
|
|
|
* than just about anything else. */
|
2018-11-13 05:03:50 +01:00
|
|
|
static void dump_gossip(struct peer *peer)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
|
|
|
/* Do we have scid query replies to send? */
|
2018-11-13 05:03:50 +01:00
|
|
|
maybe_create_next_scid_reply(peer);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2018-11-13 05:03:50 +01:00
|
|
|
/* Queue any gossip we want to send */
|
|
|
|
maybe_queue_gossip(peer);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This generates a `channel_update` message for one of our channels. We do
|
|
|
|
* this here, rather than in `channeld` because we (may) need to do it
|
|
|
|
* ourselves anyway if channeld dies, or when we refresh it once a week. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static void update_local_channel(struct daemon *daemon,
|
2018-05-21 06:35:40 +02:00
|
|
|
const struct chan *chan,
|
|
|
|
int direction,
|
|
|
|
bool disable,
|
|
|
|
u16 cltv_expiry_delta,
|
|
|
|
u64 htlc_minimum_msat,
|
|
|
|
u32 fee_base_msat,
|
2018-09-25 07:43:56 +02:00
|
|
|
u32 fee_proportional_millionths,
|
2018-10-16 02:35:08 +02:00
|
|
|
u64 htlc_maximum_msat,
|
2018-09-25 07:43:56 +02:00
|
|
|
const char *caller)
|
2018-05-21 06:35:40 +02:00
|
|
|
{
|
|
|
|
secp256k1_ecdsa_signature dummy_sig;
|
|
|
|
u8 *update, *msg;
|
|
|
|
u32 timestamp = time_now().ts.tv_sec;
|
2018-09-20 02:59:46 +02:00
|
|
|
u8 message_flags, channel_flags;
|
|
|
|
|
2018-05-21 06:35:40 +02:00
|
|
|
/* So valgrind doesn't complain */
|
|
|
|
memset(&dummy_sig, 0, sizeof(dummy_sig));
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The origin node:
|
|
|
|
*...
|
|
|
|
* - MUST set `timestamp` to greater than 0, AND to greater than any
|
|
|
|
* previously-sent `channel_update` for this `short_channel_id`.
|
|
|
|
* - SHOULD base `timestamp` on a UNIX timestamp.
|
|
|
|
*/
|
2018-05-21 06:35:40 +02:00
|
|
|
if (is_halfchan_defined(&chan->half[direction])
|
|
|
|
&& timestamp == chan->half[direction].last_timestamp)
|
|
|
|
timestamp++;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The `channel_flags` bitfield is used to indicate the direction of
|
|
|
|
* the channel: it identifies the node that this update originated
|
|
|
|
* from and signals various options concerning the channel. The
|
|
|
|
* following table specifies the meaning of its individual bits:
|
|
|
|
*
|
|
|
|
* | Bit Position | Name | Meaning |
|
|
|
|
* | ------------- | ----------- | -------------------------------- |
|
|
|
|
* | 0 | `direction` | Direction this update refers to. |
|
|
|
|
* | 1 | `disable` | Disable the channel. |
|
|
|
|
*/
|
2018-09-20 02:59:46 +02:00
|
|
|
channel_flags = direction;
|
2018-05-21 06:35:40 +02:00
|
|
|
if (disable)
|
2018-09-20 02:59:46 +02:00
|
|
|
channel_flags |= ROUTING_FLAGS_DISABLED;
|
2018-05-21 06:35:40 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The `message_flags` bitfield is used to indicate the presence of
|
|
|
|
* optional fields in the `channel_update` message:
|
|
|
|
*
|
|
|
|
*| Bit Position | Name | Field |
|
|
|
|
*...
|
|
|
|
*| 0 | `option_channel_htlc_max` | `htlc_maximum_msat` |
|
|
|
|
*/
|
2018-10-16 02:35:08 +02:00
|
|
|
message_flags = 0 | ROUTING_OPT_HTLC_MAX_MSAT;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We create an update with a dummy signature, and hand to hsmd to get
|
|
|
|
* it signed. */
|
2018-10-16 02:35:08 +02:00
|
|
|
update = towire_channel_update_option_channel_htlc_max(tmpctx, &dummy_sig,
|
2018-11-21 04:10:03 +01:00
|
|
|
&daemon->chain_hash,
|
2018-05-21 06:35:40 +02:00
|
|
|
&chan->scid,
|
|
|
|
timestamp,
|
2018-09-20 02:59:46 +02:00
|
|
|
message_flags, channel_flags,
|
|
|
|
cltv_expiry_delta,
|
2018-05-21 06:35:40 +02:00
|
|
|
htlc_minimum_msat,
|
|
|
|
fee_base_msat,
|
2018-10-16 02:35:08 +02:00
|
|
|
fee_proportional_millionths,
|
|
|
|
htlc_maximum_msat);
|
2018-05-21 06:35:40 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note that we treat the hsmd as synchronous. This is simple (no
|
|
|
|
* callback hell)!, but may need to change to async if we ever want
|
|
|
|
* remote HSMs */
|
2018-05-21 06:35:40 +02:00
|
|
|
if (!wire_sync_write(HSM_FD,
|
|
|
|
towire_hsm_cupdate_sig_req(tmpctx, update))) {
|
|
|
|
status_failed(STATUS_FAIL_HSM_IO, "Writing cupdate_sig_req: %s",
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
msg = wire_sync_read(tmpctx, HSM_FD);
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!msg || !fromwire_hsm_cupdate_sig_reply(NULL, msg, &update)) {
|
2018-05-21 06:35:40 +02:00
|
|
|
status_failed(STATUS_FAIL_HSM_IO,
|
|
|
|
"Reading cupdate_sig_req: %s",
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* The origin node:
|
|
|
|
* - MAY create a `channel_update` to communicate the channel
|
|
|
|
* parameters to the final node, even though the channel has not yet
|
|
|
|
* been announced
|
|
|
|
*/
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!is_chan_public(chan)) {
|
2018-11-21 01:36:08 +01:00
|
|
|
/* handle_channel_update will not put private updates in the
|
|
|
|
* broadcast list, but we send it direct to the peer (if we
|
|
|
|
* have one connected) now */
|
2018-09-25 07:43:56 +02:00
|
|
|
struct peer *peer = find_peer(daemon,
|
|
|
|
&chan->nodes[!direction]->id);
|
|
|
|
if (peer)
|
|
|
|
queue_peer_msg(peer, update);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We feed it into routing.c like any other channel_update; it may
|
|
|
|
* discard it (eg. non-public channel), but it should not complain
|
|
|
|
* about it being invalid! */
|
2018-09-25 07:43:56 +02:00
|
|
|
msg = handle_channel_update(daemon->rstate, take(update), caller);
|
|
|
|
if (msg)
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"%s: rejected local channel update %s: %s",
|
|
|
|
caller,
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Normally we must not touch something taken()
|
|
|
|
* but we're in deep trouble anyway, and
|
|
|
|
* handle_channel_update only tal_steals onto
|
|
|
|
* tmpctx, so it's actually OK. */
|
2018-09-25 07:43:56 +02:00
|
|
|
tal_hex(tmpctx, update),
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We generate local channel updates lazily; most of the time we simply
|
|
|
|
* toggle the `local_disabled` flag so we don't use it to route. We never
|
|
|
|
* change anything else after startup (yet!) */
|
2018-09-25 07:43:56 +02:00
|
|
|
static void maybe_update_local_channel(struct daemon *daemon,
|
|
|
|
struct chan *chan, int direction)
|
2018-07-02 22:54:12 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
const struct half_chan *hc = &chan->half[direction];
|
2018-07-02 22:54:12 +02:00
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* Don't generate a channel_update for an uninitialized channel. */
|
|
|
|
if (!hc->channel_update)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Nothing to update? */
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Note the inversions here on both sides, which is cheap conversion to
|
|
|
|
* boolean for the RHS! */
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!chan->local_disabled == !(hc->channel_flags & ROUTING_FLAGS_DISABLED))
|
|
|
|
return;
|
|
|
|
|
|
|
|
update_local_channel(daemon, chan, direction,
|
|
|
|
chan->local_disabled,
|
|
|
|
hc->delay,
|
|
|
|
hc->htlc_minimum_msat,
|
|
|
|
hc->base_fee,
|
|
|
|
hc->proportional_fee,
|
2018-10-16 02:35:08 +02:00
|
|
|
hc->htlc_maximum_msat,
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note this magic C macro which expands to the
|
|
|
|
* function name, for debug messages */
|
2018-09-25 07:43:56 +02:00
|
|
|
__func__);
|
2018-07-02 22:54:12 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This helper figures out which direction of the channel is from-us; if
|
|
|
|
* neither, it returns false. This meets Linus' rule "Always return the error",
|
|
|
|
* without doing some horrible 0/1/-1 return. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static bool local_direction(struct daemon *daemon,
|
|
|
|
const struct chan *chan,
|
|
|
|
int *direction)
|
2018-05-21 06:35:40 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
for (*direction = 0; *direction < 2; (*direction)++) {
|
|
|
|
if (pubkey_eq(&chan->nodes[*direction]->id, &daemon->id))
|
|
|
|
return true;
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
2018-09-25 07:43:56 +02:00
|
|
|
return false;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is when channeld asks us for a channel_update for a local channel.
|
|
|
|
* It does that to fill in the error field when lightningd fails an HTLC and
|
|
|
|
* sets the UPDATE bit in the error type. lightningd is too important to
|
|
|
|
* fetch this itself, so channeld does it (channeld has to talk to us for
|
|
|
|
* other things anyway, so why not?). */
|
2018-11-05 02:21:51 +01:00
|
|
|
static bool handle_get_update(struct peer *peer, const u8 *msg)
|
2018-07-03 13:30:36 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
struct short_channel_id scid;
|
2018-07-03 13:30:36 +02:00
|
|
|
struct chan *chan;
|
2018-09-25 07:43:56 +02:00
|
|
|
const u8 *update;
|
|
|
|
struct routing_state *rstate = peer->daemon->rstate;
|
|
|
|
int direction;
|
2018-05-21 06:35:40 +02:00
|
|
|
|
2018-11-13 05:03:51 +01:00
|
|
|
if (!fromwire_gossipd_get_update(msg, &scid)) {
|
2018-11-05 02:21:51 +01:00
|
|
|
status_broken("peer %s sent bad gossip_get_update %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return false;
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* It's possible that the channel has just closed (though v. unlikely) */
|
2018-09-25 07:43:56 +02:00
|
|
|
chan = get_channel(rstate, &scid);
|
|
|
|
if (!chan) {
|
|
|
|
status_unusual("peer %s scid %s: unknown channel",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&scid));
|
|
|
|
update = NULL;
|
|
|
|
goto out;
|
2018-07-02 22:54:12 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* We want the update that comes from our end. */
|
|
|
|
if (!local_direction(peer->daemon, chan, &direction)) {
|
|
|
|
status_unusual("peer %s scid %s: not our channel?",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&scid));
|
|
|
|
update = NULL;
|
|
|
|
goto out;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* Since we're going to send it out, make sure it's up-to-date. */
|
|
|
|
maybe_update_local_channel(peer->daemon, chan, direction);
|
2018-06-04 06:15:25 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* It's possible this is NULL, if we've never sent a channel_update
|
|
|
|
* for that channel. */
|
2018-09-25 07:43:56 +02:00
|
|
|
update = chan->half[direction].channel_update;
|
|
|
|
out:
|
|
|
|
status_trace("peer %s schanid %s: %s update",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct short_channel_id, &scid),
|
|
|
|
update ? "got" : "no");
|
2018-07-03 13:30:36 +02:00
|
|
|
|
2018-11-13 05:03:51 +01:00
|
|
|
msg = towire_gossipd_get_update_reply(NULL, update);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(peer->dc, take(msg));
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Return true if the channel information has changed. This can only
|
|
|
|
* currently happen if the user restarts with different fee options, but we
|
|
|
|
* don't assume that. */
|
2018-09-25 07:43:56 +02:00
|
|
|
static bool halfchan_new_info(const struct half_chan *hc,
|
|
|
|
u16 cltv_delta, u64 htlc_minimum_msat,
|
2018-10-16 02:35:08 +02:00
|
|
|
u32 fee_base_msat, u32 fee_proportional_millionths,
|
|
|
|
u64 htlc_maximum_msat)
|
2018-07-03 13:43:45 +02:00
|
|
|
{
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!is_halfchan_defined(hc))
|
|
|
|
return true;
|
2018-07-03 13:43:45 +02:00
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
return hc->delay != cltv_delta
|
|
|
|
|| hc->htlc_minimum_msat != htlc_minimum_msat
|
|
|
|
|| hc->base_fee != fee_base_msat
|
2018-10-16 02:35:08 +02:00
|
|
|
|| hc->proportional_fee != fee_proportional_millionths
|
|
|
|
|| hc->htlc_maximum_msat != htlc_maximum_msat;
|
2018-07-03 13:43:45 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ channeld asks us to update the local channel. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static bool handle_local_channel_update(struct peer *peer, const u8 *msg)
|
2018-07-03 13:30:36 +02:00
|
|
|
{
|
|
|
|
struct chan *chan;
|
2018-09-25 07:43:56 +02:00
|
|
|
struct short_channel_id scid;
|
|
|
|
bool disable;
|
|
|
|
u16 cltv_expiry_delta;
|
|
|
|
u64 htlc_minimum_msat;
|
2018-10-16 02:35:08 +02:00
|
|
|
u64 htlc_maximum_msat;
|
2018-09-25 07:43:56 +02:00
|
|
|
u32 fee_base_msat;
|
|
|
|
u32 fee_proportional_millionths;
|
|
|
|
int direction;
|
2018-07-03 13:30:36 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: We should get scid from lightningd when setting up the
|
|
|
|
* connection, so no per-peer daemon can mess with channels other than
|
|
|
|
* its own! */
|
2018-11-13 05:03:51 +01:00
|
|
|
if (!fromwire_gossipd_local_channel_update(msg,
|
|
|
|
&scid,
|
|
|
|
&disable,
|
|
|
|
&cltv_expiry_delta,
|
|
|
|
&htlc_minimum_msat,
|
|
|
|
&fee_base_msat,
|
|
|
|
&fee_proportional_millionths,
|
|
|
|
&htlc_maximum_msat)) {
|
2018-07-03 13:30:36 +02:00
|
|
|
status_broken("peer %s bad local_channel_update %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
tal_hex(tmpctx, msg));
|
2018-11-05 02:21:51 +01:00
|
|
|
return false;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Can theoretically happen if channel just closed. */
|
2018-09-25 07:43:56 +02:00
|
|
|
chan = get_channel(peer->daemon->rstate, &scid);
|
2018-07-03 13:30:36 +02:00
|
|
|
if (!chan) {
|
|
|
|
status_trace("peer %s local_channel_update for unknown %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-09-25 07:43:56 +02:00
|
|
|
&scid));
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* You shouldn't be asking for a non-local channel though. */
|
2018-09-25 07:43:56 +02:00
|
|
|
if (!local_direction(peer->daemon, chan, &direction)) {
|
2018-07-03 13:30:36 +02:00
|
|
|
status_broken("peer %s bad local_channel_update for non-local %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-09-25 07:43:56 +02:00
|
|
|
&scid));
|
2018-11-05 02:21:51 +01:00
|
|
|
return false;
|
2018-07-03 13:30:36 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* We could change configuration on restart; update immediately.
|
|
|
|
* Or, if we're *enabling* an announced-disabled channel.
|
|
|
|
* Or, if it's an unannounced channel (only sending to peer). */
|
|
|
|
if (halfchan_new_info(&chan->half[direction],
|
|
|
|
cltv_expiry_delta, htlc_minimum_msat,
|
2018-10-16 02:35:08 +02:00
|
|
|
fee_base_msat, fee_proportional_millionths,
|
|
|
|
htlc_maximum_msat)
|
2018-09-25 07:43:56 +02:00
|
|
|
|| ((chan->half[direction].channel_flags & ROUTING_FLAGS_DISABLED)
|
|
|
|
&& !disable)
|
|
|
|
|| !is_chan_public(chan)) {
|
|
|
|
update_local_channel(peer->daemon, chan, direction,
|
|
|
|
disable,
|
|
|
|
cltv_expiry_delta,
|
|
|
|
htlc_minimum_msat,
|
|
|
|
fee_base_msat,
|
|
|
|
fee_proportional_millionths,
|
2018-10-16 02:35:08 +02:00
|
|
|
htlc_maximum_msat,
|
2018-09-25 07:43:56 +02:00
|
|
|
__func__);
|
|
|
|
}
|
2018-08-14 01:02:04 +02:00
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
/* Normal case: just toggle local_disabled, and generate broadcast in
|
|
|
|
* maybe_update_local_channel when/if someone asks about it. */
|
|
|
|
chan->local_disabled = disable;
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-05-21 06:35:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where the per-peer daemons send us messages. It's either forwarded
|
|
|
|
* gossip, or a request for information. We deliberately use non-overlapping
|
|
|
|
* message types so we can distinguish them. */
|
2018-11-05 02:21:51 +01:00
|
|
|
static struct io_plan *peer_msg_in(struct io_conn *conn,
|
2018-10-25 01:43:05 +02:00
|
|
|
const u8 *msg,
|
|
|
|
struct peer *peer)
|
2017-03-09 16:56:04 +01:00
|
|
|
{
|
2018-11-05 02:21:51 +01:00
|
|
|
const u8 *err;
|
|
|
|
bool ok;
|
|
|
|
|
|
|
|
/* These are messages relayed from peer */
|
|
|
|
switch ((enum wire_type)fromwire_peektype(msg)) {
|
|
|
|
case WIRE_CHANNEL_ANNOUNCEMENT:
|
|
|
|
err = handle_channel_announcement_msg(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_CHANNEL_UPDATE:
|
|
|
|
err = handle_channel_update_msg(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_NODE_ANNOUNCEMENT:
|
|
|
|
err = handle_node_announcement(peer->daemon->rstate, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_QUERY_CHANNEL_RANGE:
|
|
|
|
err = handle_query_channel_range(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_REPLY_CHANNEL_RANGE:
|
|
|
|
err = handle_reply_channel_range(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_QUERY_SHORT_CHANNEL_IDS:
|
|
|
|
err = handle_query_short_channel_ids(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_REPLY_SHORT_CHANNEL_IDS_END:
|
|
|
|
err = handle_reply_short_channel_ids_end(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_GOSSIP_TIMESTAMP_FILTER:
|
|
|
|
err = handle_gossip_timestamp_filter(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_PING:
|
|
|
|
err = handle_ping(peer, msg);
|
|
|
|
goto handled_relay;
|
|
|
|
case WIRE_PONG:
|
|
|
|
err = handle_pong(peer, msg);
|
|
|
|
goto handled_relay;
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* These are non-gossip messages (!is_msg_for_gossipd()) */
|
2018-11-05 02:21:51 +01:00
|
|
|
case WIRE_INIT:
|
|
|
|
case WIRE_ERROR:
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
case WIRE_ACCEPT_CHANNEL:
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
case WIRE_FUNDING_SIGNED:
|
|
|
|
case WIRE_FUNDING_LOCKED:
|
|
|
|
case WIRE_SHUTDOWN:
|
|
|
|
case WIRE_CLOSING_SIGNED:
|
|
|
|
case WIRE_UPDATE_ADD_HTLC:
|
|
|
|
case WIRE_UPDATE_FULFILL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_MALFORMED_HTLC:
|
|
|
|
case WIRE_COMMITMENT_SIGNED:
|
|
|
|
case WIRE_REVOKE_AND_ACK:
|
|
|
|
case WIRE_UPDATE_FEE:
|
|
|
|
case WIRE_CHANNEL_REESTABLISH:
|
|
|
|
case WIRE_ANNOUNCEMENT_SIGNATURES:
|
|
|
|
status_broken("peer %s: relayed unexpected msg of type %s",
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
2018-11-05 02:21:51 +01:00
|
|
|
wire_type_name(fromwire_peektype(msg)));
|
2018-03-08 04:16:34 +01:00
|
|
|
return io_close(conn);
|
2017-03-11 15:31:17 +01:00
|
|
|
}
|
2017-12-15 23:20:13 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Must be a gossip_peerd_wire_type asking us to do something. */
|
2018-11-13 05:03:51 +01:00
|
|
|
switch ((enum gossip_peerd_wire_type)fromwire_peektype(msg)) {
|
|
|
|
case WIRE_GOSSIPD_GET_UPDATE:
|
2018-11-05 02:21:51 +01:00
|
|
|
ok = handle_get_update(peer, msg);
|
|
|
|
goto handled_cmd;
|
2018-11-13 05:03:51 +01:00
|
|
|
case WIRE_GOSSIPD_LOCAL_ADD_CHANNEL:
|
2018-11-05 02:21:51 +01:00
|
|
|
ok = handle_local_add_channel(peer->daemon->rstate, msg);
|
|
|
|
if (ok)
|
|
|
|
gossip_store_add(peer->daemon->rstate->store, msg);
|
|
|
|
goto handled_cmd;
|
2018-11-13 05:03:51 +01:00
|
|
|
case WIRE_GOSSIPD_LOCAL_CHANNEL_UPDATE:
|
2018-11-05 02:21:51 +01:00
|
|
|
ok = handle_local_channel_update(peer, msg);
|
|
|
|
goto handled_cmd;
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* These are the ones we send, not them */
|
2018-11-13 05:03:51 +01:00
|
|
|
case WIRE_GOSSIPD_GET_UPDATE_REPLY:
|
|
|
|
case WIRE_GOSSIPD_SEND_GOSSIP:
|
2018-11-05 02:21:51 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Anything else should not have been sent to us: close on it */
|
2018-11-13 05:03:51 +01:00
|
|
|
status_broken("peer %s: unexpected cmd of type %i %s",
|
2018-11-05 02:21:51 +01:00
|
|
|
type_to_string(tmpctx, struct pubkey, &peer->id),
|
2018-11-13 05:03:51 +01:00
|
|
|
fromwire_peektype(msg),
|
|
|
|
gossip_peerd_wire_type_name(fromwire_peektype(msg)));
|
2018-11-05 02:21:51 +01:00
|
|
|
return io_close(conn);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Commands should always be OK. */
|
2018-11-05 02:21:51 +01:00
|
|
|
handled_cmd:
|
|
|
|
if (!ok)
|
|
|
|
return io_close(conn);
|
|
|
|
goto done;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Forwarded messages may be bad, so we have error which the per-peer
|
|
|
|
* daemon will forward to the peer. */
|
2018-11-05 02:21:51 +01:00
|
|
|
handled_relay:
|
|
|
|
if (err)
|
|
|
|
queue_peer_msg(peer, take(err));
|
|
|
|
done:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, peer->dc);
|
2017-03-09 16:56:04 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where connectd tells us about a new peer, and we hand back an fd for
|
|
|
|
* it to send us messages via peer_msg_in above */
|
2018-07-24 08:18:58 +02:00
|
|
|
static struct io_plan *connectd_new_peer(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct peer *peer = tal(conn, struct peer);
|
|
|
|
int fds[2];
|
|
|
|
|
|
|
|
if (!fromwire_gossip_new_peer(msg, &peer->id,
|
|
|
|
&peer->gossip_queries_feature,
|
|
|
|
&peer->initial_routing_sync_feature)) {
|
|
|
|
status_broken("Bad new_peer msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return io_close(conn);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This can happen: we handle it gracefully, returning a `failed` msg. */
|
2018-07-24 08:18:58 +02:00
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
status_broken("Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->connectd,
|
2018-07-24 08:18:58 +02:00
|
|
|
take(towire_gossip_new_peer_reply(NULL, false)));
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We might not have noticed old peer is dead; kill it now. */
|
|
|
|
tal_free(find_peer(daemon, &peer->id));
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Populate the rest of the peer info. */
|
2018-07-24 08:18:58 +02:00
|
|
|
peer->daemon = daemon;
|
|
|
|
peer->scid_queries = NULL;
|
|
|
|
peer->scid_query_idx = 0;
|
|
|
|
peer->scid_query_nodes = NULL;
|
|
|
|
peer->scid_query_nodes_idx = 0;
|
|
|
|
peer->num_scid_queries_outstanding = 0;
|
|
|
|
peer->query_channel_blocks = NULL;
|
|
|
|
peer->num_pings_outstanding = 0;
|
|
|
|
peer->gossip_timer = NULL;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We keep a list so we can find peer by id */
|
2018-07-24 08:18:58 +02:00
|
|
|
list_add_tail(&peer->daemon->peers, &peer->list);
|
|
|
|
tal_add_destructor(peer, destroy_peer);
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages unless explicitly requested.
|
|
|
|
*/
|
|
|
|
if (peer->gossip_queries_feature) {
|
|
|
|
peer->broadcast_index = UINT64_MAX;
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Nothing in this "impossible" range */
|
2018-07-24 08:18:58 +02:00
|
|
|
peer->gossip_timestamp_min = UINT32_MAX;
|
|
|
|
peer->gossip_timestamp_max = 0;
|
|
|
|
} else {
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - upon receiving an `init` message with the
|
|
|
|
* `initial_routing_sync` flag set to 1:
|
|
|
|
* - SHOULD send gossip messages for all known channels and
|
|
|
|
* nodes, as if they were just received.
|
|
|
|
* - if the `initial_routing_sync` flag is set to 0, OR if the
|
|
|
|
* initial sync was completed:
|
|
|
|
* - SHOULD resume normal operation, as specified in the
|
|
|
|
* following [Rebroadcasting](#rebroadcasting) section.
|
|
|
|
*/
|
|
|
|
peer->gossip_timestamp_min = 0;
|
|
|
|
peer->gossip_timestamp_max = UINT32_MAX;
|
|
|
|
if (peer->initial_routing_sync_feature)
|
|
|
|
peer->broadcast_index = 0;
|
|
|
|
else
|
|
|
|
peer->broadcast_index
|
|
|
|
= peer->daemon->rstate->broadcasts->next_index;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is the new connection: calls dump_gossip when nothing else to
|
|
|
|
* send. */
|
2018-10-25 01:43:05 +02:00
|
|
|
peer->dc = daemon_conn_new(daemon, fds[0],
|
2018-11-05 02:21:51 +01:00
|
|
|
peer_msg_in, dump_gossip, peer);
|
2018-10-25 01:43:05 +02:00
|
|
|
/* Free peer if conn closed (destroy_peer closes conn if peer freed) */
|
|
|
|
tal_steal(peer->dc, peer);
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This sends the initial timestamp filter. */
|
2018-07-24 08:18:58 +02:00
|
|
|
setup_gossip_range(peer);
|
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
/* Start the gossip flowing. */
|
|
|
|
wake_gossip_out(peer);
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
/* Reply with success, and the new fd */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->connectd,
|
2018-07-24 08:18:58 +02:00
|
|
|
take(towire_gossip_new_peer_reply(NULL, true)));
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send_fd(daemon->connectd, fds[1]);
|
2018-07-24 08:18:58 +02:00
|
|
|
|
|
|
|
done:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->connectd);
|
2018-07-24 08:18:58 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ connectd can also ask us if we know any addresses for a given id. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *connectd_get_address(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-10 13:06:51 +01:00
|
|
|
{
|
2018-11-05 02:16:48 +01:00
|
|
|
struct pubkey id;
|
|
|
|
struct node *node;
|
|
|
|
const struct wireaddr *addrs;
|
2018-06-04 06:19:25 +02:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
if (!fromwire_gossip_get_addrs(msg, &id)) {
|
|
|
|
status_broken("Bad gossip_get_addrs msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return io_close(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
node = get_node(daemon->rstate, &id);
|
|
|
|
if (node)
|
|
|
|
addrs = node->addresses;
|
|
|
|
else
|
|
|
|
addrs = NULL;
|
|
|
|
|
|
|
|
daemon_conn_send(daemon->connectd,
|
|
|
|
take(towire_gossip_get_addrs_reply(NULL, addrs)));
|
|
|
|
return daemon_conn_read_next(conn, daemon->connectd);
|
2017-03-10 13:06:51 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ connectd's input handler is very simple. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *connectd_req(struct io_conn *conn,
|
|
|
|
const u8 *msg,
|
|
|
|
struct daemon *daemon)
|
2017-03-15 11:36:52 +01:00
|
|
|
{
|
2018-11-05 02:16:48 +01:00
|
|
|
enum connect_gossip_wire_type t = fromwire_peektype(msg);
|
2017-03-15 13:46:29 +01:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
switch (t) {
|
|
|
|
case WIRE_GOSSIP_NEW_PEER:
|
|
|
|
return connectd_new_peer(conn, daemon, msg);
|
2018-09-29 08:33:51 +02:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
case WIRE_GOSSIP_GET_ADDRS:
|
|
|
|
return connectd_get_address(conn, daemon, msg);
|
2017-03-15 13:46:29 +01:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
/* We send these, don't receive them. */
|
|
|
|
case WIRE_GOSSIP_NEW_PEER_REPLY:
|
|
|
|
case WIRE_GOSSIP_GET_ADDRS_REPLY:
|
|
|
|
break;
|
|
|
|
}
|
2017-03-15 11:36:52 +01:00
|
|
|
|
2018-11-05 02:16:48 +01:00
|
|
|
status_broken("Bad msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return io_close(conn);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is our twice-weekly timer callback for refreshing our channels. This
|
|
|
|
* was added to the spec because people abandoned their channels without
|
|
|
|
* closing them. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_send_keepalive_update(struct daemon *daemon,
|
|
|
|
const struct chan *chan,
|
|
|
|
const struct half_chan *hc)
|
|
|
|
{
|
|
|
|
status_trace("Sending keepalive channel_update for %s",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&chan->scid));
|
|
|
|
|
|
|
|
/* As a side-effect, this will create an update which matches the
|
|
|
|
* local_disabled state */
|
|
|
|
update_local_channel(daemon, chan,
|
|
|
|
hc->channel_flags & ROUTING_FLAGS_DIRECTION,
|
|
|
|
chan->local_disabled,
|
|
|
|
hc->delay,
|
|
|
|
hc->htlc_minimum_msat,
|
|
|
|
hc->base_fee,
|
|
|
|
hc->proportional_fee,
|
|
|
|
hc->htlc_maximum_msat,
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* An endpoint node:
|
|
|
|
* - if a channel's latest `channel_update`s `timestamp` is older than two weeks
|
|
|
|
* (1209600 seconds):
|
|
|
|
* - MAY prune the channel.
|
|
|
|
* - MAY ignore the channel.
|
|
|
|
*/
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_refresh_network(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
u64 now = time_now().ts.tv_sec;
|
|
|
|
/* Anything below this highwater mark could be pruned if not refreshed */
|
|
|
|
s64 highwater = now - daemon->rstate->prune_timeout / 2;
|
|
|
|
struct node *n;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Schedule next run now (prune_timeout is 2 weeks) */
|
2018-11-21 23:39:31 +01:00
|
|
|
notleak(new_reltimer(&daemon->timers, daemon,
|
|
|
|
time_from_sec(daemon->rstate->prune_timeout/4),
|
|
|
|
gossip_refresh_network, daemon));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
/* Find myself in the network */
|
|
|
|
n = get_node(daemon->rstate, &daemon->id);
|
|
|
|
if (n) {
|
|
|
|
/* Iterate through all outgoing connection and check whether
|
|
|
|
* it's time to re-announce */
|
|
|
|
for (size_t i = 0; i < tal_count(n->chans); i++) {
|
|
|
|
struct half_chan *hc = half_chan_from(n, n->chans[i]);
|
|
|
|
|
|
|
|
if (!is_halfchan_defined(hc)) {
|
|
|
|
/* Connection is not announced yet, so don't even
|
|
|
|
* try to re-announce it */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hc->last_timestamp > highwater) {
|
|
|
|
/* No need to send a keepalive update message */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_halfchan_enabled(hc)) {
|
|
|
|
/* Only send keepalives for active connections */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
gossip_send_keepalive_update(daemon, n->chans[i], hc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Now we've refreshed our channels, we can prune without clobbering
|
|
|
|
* them */
|
2018-11-05 02:16:48 +01:00
|
|
|
route_prune(daemon->rstate);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Disables all channels connected to our node. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_disable_local_channels(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
struct node *local_node = get_node(daemon->rstate, &daemon->id);
|
|
|
|
|
|
|
|
/* We don't have a local_node, so we don't have any channels yet
|
|
|
|
* either */
|
|
|
|
if (!local_node)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(local_node->chans); i++)
|
|
|
|
local_node->chans[i]->local_disabled = true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Parse init message from lightningd: starts the daemon properly. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *gossip_init(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
u32 update_channel_interval;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
if (!fromwire_gossipctl_init(daemon, msg,
|
|
|
|
/* 60,000 ms
|
|
|
|
* (unless --dev-broadcast-interval) */
|
|
|
|
&daemon->broadcast_interval_msec,
|
2018-11-21 04:10:03 +01:00
|
|
|
&daemon->chain_hash,
|
2018-11-21 01:36:08 +01:00
|
|
|
&daemon->id, &daemon->globalfeatures,
|
|
|
|
daemon->rgb,
|
|
|
|
daemon->alias,
|
|
|
|
/* 1 week in seconds
|
|
|
|
* (unless --dev-channel-update-interval) */
|
|
|
|
&update_channel_interval,
|
|
|
|
&daemon->announcable)) {
|
2018-11-05 02:16:48 +01:00
|
|
|
master_badmsg(WIRE_GOSSIPCTL_INIT, msg);
|
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Prune time (usually 2 weeks) is twice update time */
|
2018-11-21 04:10:03 +01:00
|
|
|
daemon->rstate = new_routing_state(daemon,
|
|
|
|
chainparams_by_chainhash(&daemon->chain_hash),
|
|
|
|
&daemon->id,
|
2018-11-05 02:16:48 +01:00
|
|
|
update_channel_interval * 2);
|
|
|
|
|
|
|
|
/* Load stored gossip messages */
|
|
|
|
gossip_store_load(daemon->rstate, daemon->rstate->store);
|
|
|
|
|
|
|
|
/* Now disable all local channels, they can't be connected yet. */
|
|
|
|
gossip_disable_local_channels(daemon);
|
|
|
|
|
|
|
|
/* If that announced channels, we can announce ourselves (options
|
|
|
|
* or addresses might have changed!) */
|
|
|
|
maybe_send_own_node_announce(daemon);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Start the weekly refresh timer. */
|
2018-11-21 23:39:31 +01:00
|
|
|
notleak(new_reltimer(&daemon->timers, daemon,
|
|
|
|
time_from_sec(daemon->rstate->prune_timeout/4),
|
|
|
|
gossip_refresh_network, daemon));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ lightningd can ask for a route between nodes. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *getroute_req(struct io_conn *conn, struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct pubkey source, destination;
|
|
|
|
u64 msatoshi;
|
|
|
|
u32 final_cltv;
|
|
|
|
u16 riskfactor;
|
|
|
|
u8 *out;
|
|
|
|
struct route_hop *hops;
|
|
|
|
double fuzz;
|
|
|
|
struct siphash_seed seed;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* To choose between variations, we need to know how much we're
|
|
|
|
* sending (eliminates too-small channels, and also effects the fees
|
|
|
|
* we'll pay), how to trade off more locktime vs. more fees, and how
|
|
|
|
* much cltv we need a the final node to give exact values for each
|
|
|
|
* intermediate hop, as well as how much random fuzz to inject to
|
|
|
|
* avoid being too predictable. */
|
2018-11-05 02:16:48 +01:00
|
|
|
if (!fromwire_gossip_getroute_request(msg,
|
|
|
|
&source, &destination,
|
|
|
|
&msatoshi, &riskfactor,
|
|
|
|
&final_cltv, &fuzz, &seed))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GETROUTE_REQUEST, msg);
|
|
|
|
|
|
|
|
status_trace("Trying to find a route from %s to %s for %"PRIu64" msatoshi",
|
|
|
|
pubkey_to_hexstr(tmpctx, &source),
|
|
|
|
pubkey_to_hexstr(tmpctx, &destination), msatoshi);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* routing.c does all the hard work; can return NULL. */
|
2018-11-05 02:16:48 +01:00
|
|
|
hops = get_route(tmpctx, daemon->rstate, &source, &destination,
|
|
|
|
msatoshi, riskfactor, final_cltv,
|
|
|
|
fuzz, &seed);
|
|
|
|
|
2018-11-13 05:03:52 +01:00
|
|
|
out = towire_gossip_getroute_reply(NULL, hops);
|
|
|
|
daemon_conn_send(daemon->master, take(out));
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-15 13:46:29 +01:00
|
|
|
}
|
2017-03-15 11:36:52 +01:00
|
|
|
|
2018-10-19 03:17:49 +02:00
|
|
|
#define raw_pubkey(arr, id) \
|
|
|
|
do { BUILD_ASSERT(sizeof(arr) == sizeof(*id)); \
|
|
|
|
memcpy(arr, id, sizeof(*id)); \
|
|
|
|
} while(0)
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ When someone asks lightningd to `listchannels`, gossipd does the work:
|
|
|
|
* marshalling the channel information for all channels into an array of
|
|
|
|
* gossip_getchannels_entry, which lightningd converts to JSON. Each channel
|
|
|
|
* is represented by two half_chan; one in each direction.
|
|
|
|
*
|
|
|
|
* FIXME: I run a lightning node permanently under valgrind, and Christian ran
|
|
|
|
* `listchannels` on it. After about 15 minutes I simply rebooted. There's
|
|
|
|
* been some optimization since then, but blocking gossipd to marshall all the
|
|
|
|
* channels will become in issue in future, I expect. We may even hit the
|
|
|
|
* 2^24 internal message limit.
|
|
|
|
*/
|
2018-03-02 03:27:30 +01:00
|
|
|
static void append_half_channel(struct gossip_getchannels_entry **entries,
|
2018-03-04 03:26:59 +01:00
|
|
|
const struct chan *chan,
|
2018-03-04 03:26:56 +01:00
|
|
|
int idx)
|
2018-03-02 03:27:30 +01:00
|
|
|
{
|
2018-03-04 03:26:59 +01:00
|
|
|
const struct half_chan *c = &chan->half[idx];
|
2018-03-02 03:27:30 +01:00
|
|
|
struct gossip_getchannels_entry *e;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* If we've never seen a channel_update for this direction... */
|
2018-05-10 16:00:38 +02:00
|
|
|
if (!is_halfchan_defined(c))
|
2018-03-02 09:59:17 +01:00
|
|
|
return;
|
|
|
|
|
2018-09-27 02:19:24 +02:00
|
|
|
e = tal_arr_expand(entries);
|
2018-03-02 03:27:30 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Our 'struct chan' contains two nodes: they are in pubkey_cmp order
|
|
|
|
* (ie. chan->nodes[0] is the lesser pubkey) and this is the same as
|
|
|
|
* the direction bit in `channel_update`s `channel_flags`.
|
|
|
|
*
|
|
|
|
* The halfchans are arranged so that half[0] src == nodes[0], and we
|
|
|
|
* use that here. We also avoid using libsecp256k1 to translate the
|
|
|
|
* pubkeys to DER and back: that proves quite expensive, and we assume
|
|
|
|
* we're on the same architecture as lightningd, so we just send them
|
|
|
|
* raw in this case. */
|
2018-10-19 03:17:49 +02:00
|
|
|
raw_pubkey(e->source, &chan->nodes[idx]->id);
|
|
|
|
raw_pubkey(e->destination, &chan->nodes[!idx]->id);
|
2018-03-05 23:52:26 +01:00
|
|
|
e->satoshis = chan->satoshis;
|
2018-09-20 02:59:46 +02:00
|
|
|
e->channel_flags = c->channel_flags;
|
|
|
|
e->message_flags = c->message_flags;
|
2018-07-26 23:21:37 +02:00
|
|
|
e->local_disabled = chan->local_disabled;
|
2018-05-10 16:00:38 +02:00
|
|
|
e->public = is_chan_public(chan);
|
2018-03-04 03:26:56 +01:00
|
|
|
e->short_channel_id = chan->scid;
|
2018-05-10 16:00:38 +02:00
|
|
|
e->last_update_timestamp = c->last_timestamp;
|
|
|
|
e->base_fee_msat = c->base_fee;
|
|
|
|
e->fee_per_millionth = c->proportional_fee;
|
|
|
|
e->delay = c->delay;
|
2018-03-02 03:27:30 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Marshal (possibly) both channel directions into entries */
|
2018-03-02 09:59:16 +01:00
|
|
|
static void append_channel(struct gossip_getchannels_entry **entries,
|
2018-03-04 03:26:59 +01:00
|
|
|
const struct chan *chan)
|
2018-03-02 09:59:16 +01:00
|
|
|
{
|
2018-03-04 03:26:56 +01:00
|
|
|
append_half_channel(entries, chan, 0);
|
|
|
|
append_half_channel(entries, chan, 1);
|
2018-03-02 09:59:16 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where lightningd asks for all channels we know about. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static struct io_plan *getchannels_req(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-22 13:30:09 +01:00
|
|
|
{
|
|
|
|
u8 *out;
|
|
|
|
struct gossip_getchannels_entry *entries;
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *chan;
|
2018-01-16 20:44:32 +01:00
|
|
|
struct short_channel_id *scid;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note: scid is marked optional in gossip_wire.csv */
|
2018-09-29 08:33:51 +02:00
|
|
|
if (!fromwire_gossip_getchannels_request(msg, msg, &scid))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GETCHANNELS_REQUEST, msg);
|
2017-03-22 13:30:09 +01:00
|
|
|
|
2018-03-02 03:27:30 +01:00
|
|
|
entries = tal_arr(tmpctx, struct gossip_getchannels_entry, 0);
|
2018-11-21 01:36:08 +01:00
|
|
|
/* They can ask about a particular channel by short_channel_id */
|
2018-03-02 09:59:16 +01:00
|
|
|
if (scid) {
|
|
|
|
chan = get_channel(daemon->rstate, scid);
|
|
|
|
if (chan)
|
|
|
|
append_channel(&entries, chan);
|
|
|
|
} else {
|
|
|
|
u64 idx;
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* For the more general case, we just iterate through every
|
|
|
|
* short channel id. */
|
2018-03-04 03:26:59 +01:00
|
|
|
for (chan = uintmap_first(&daemon->rstate->chanmap, &idx);
|
2018-03-02 09:59:16 +01:00
|
|
|
chan;
|
2018-03-04 03:26:59 +01:00
|
|
|
chan = uintmap_after(&daemon->rstate->chanmap, &idx)) {
|
2018-03-02 09:59:16 +01:00
|
|
|
append_channel(&entries, chan);
|
2017-03-22 13:30:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-15 07:10:22 +01:00
|
|
|
out = towire_gossip_getchannels_reply(NULL, entries);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master, take(out));
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-22 13:30:09 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Similarly, lightningd asks us for all nodes when it gets `listnodes` */
|
|
|
|
/* We keep pointers into n, assuming it won't change. */
|
2018-09-29 08:33:47 +02:00
|
|
|
static void append_node(const struct gossip_getnodes_entry ***entries,
|
2018-01-16 20:44:32 +01:00
|
|
|
const struct node *n)
|
|
|
|
{
|
2018-09-29 08:33:47 +02:00
|
|
|
struct gossip_getnodes_entry *e;
|
|
|
|
|
|
|
|
*tal_arr_expand(entries) = e
|
|
|
|
= tal(*entries, struct gossip_getnodes_entry);
|
2018-10-19 03:17:49 +02:00
|
|
|
raw_pubkey(e->nodeid, &n->id);
|
2018-09-29 08:33:47 +02:00
|
|
|
e->last_timestamp = n->last_timestamp;
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Timestamp on wire is an unsigned 32 bit: we use a 64-bit signed, so
|
|
|
|
* -1 means "we never received a channel_update". */
|
2018-09-29 08:33:47 +02:00
|
|
|
if (e->last_timestamp < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
e->globalfeatures = n->globalfeatures;
|
|
|
|
e->addresses = n->addresses;
|
|
|
|
BUILD_ASSERT(ARRAY_SIZE(e->alias) == ARRAY_SIZE(n->alias));
|
|
|
|
BUILD_ASSERT(ARRAY_SIZE(e->color) == ARRAY_SIZE(n->rgb_color));
|
|
|
|
memcpy(e->alias, n->alias, ARRAY_SIZE(e->alias));
|
|
|
|
memcpy(e->color, n->rgb_color, ARRAY_SIZE(e->color));
|
2018-01-16 20:44:32 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Simply routine when they ask for `listnodes` */
|
2018-01-16 20:44:32 +01:00
|
|
|
static struct io_plan *getnodes(struct io_conn *conn, struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-12 13:39:23 +01:00
|
|
|
{
|
2017-03-16 05:05:26 +01:00
|
|
|
u8 *out;
|
2017-03-12 13:39:23 +01:00
|
|
|
struct node *n;
|
2018-02-08 02:24:46 +01:00
|
|
|
const struct gossip_getnodes_entry **nodes;
|
2018-07-09 13:17:49 +02:00
|
|
|
struct pubkey *id;
|
2017-03-12 13:39:23 +01:00
|
|
|
|
2018-09-29 08:33:51 +02:00
|
|
|
if (!fromwire_gossip_getnodes_request(tmpctx, msg, &id))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GETNODES_REQUEST, msg);
|
2018-01-16 20:44:32 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Format of reply is the same whether they ask for a specific node
|
|
|
|
* (0 or one responses) or all nodes (0 or more) */
|
2018-02-08 02:24:46 +01:00
|
|
|
nodes = tal_arr(tmpctx, const struct gossip_getnodes_entry *, 0);
|
2018-07-09 13:17:49 +02:00
|
|
|
if (id) {
|
|
|
|
n = get_node(daemon->rstate, id);
|
|
|
|
if (n)
|
2018-09-29 08:33:47 +02:00
|
|
|
append_node(&nodes, n);
|
2018-01-16 20:44:32 +01:00
|
|
|
} else {
|
|
|
|
struct node_map_iter i;
|
|
|
|
n = node_map_first(daemon->rstate->nodes, &i);
|
|
|
|
while (n != NULL) {
|
2018-09-29 08:33:47 +02:00
|
|
|
append_node(&nodes, n);
|
2018-01-16 20:44:32 +01:00
|
|
|
n = node_map_next(daemon->rstate->nodes, &i);
|
|
|
|
}
|
2017-03-12 13:39:23 +01:00
|
|
|
}
|
2018-03-15 07:10:22 +01:00
|
|
|
out = towire_gossip_getnodes_reply(NULL, nodes);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master, take(out));
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-12 13:39:23 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We currently have a JSON command to ping a peer: it ends up here, where
|
|
|
|
* gossipd generates the actual ping and sends it like any other gossip. */
|
2017-04-12 20:20:48 +02:00
|
|
|
static struct io_plan *ping_req(struct io_conn *conn, struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
struct pubkey id;
|
2017-04-12 20:20:48 +02:00
|
|
|
u16 num_pong_bytes, len;
|
|
|
|
struct peer *peer;
|
|
|
|
u8 *ping;
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_gossip_ping(msg, &id, &num_pong_bytes, &len))
|
2017-09-12 06:55:52 +02:00
|
|
|
master_badmsg(WIRE_GOSSIP_PING, msg);
|
2017-04-12 20:20:48 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Even if lightningd were to check for valid ids, there's a race
|
|
|
|
* where it might vanish before we read this command; cleaner to
|
|
|
|
* handle it here with 'sent' = false. */
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-09-11 21:57:11 +02:00
|
|
|
take(towire_gossip_ping_reply(NULL, &id,
|
|
|
|
false, 0)));
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2017-04-12 20:20:48 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* It should never ask for an oversize ping. */
|
2017-04-12 20:20:48 +02:00
|
|
|
ping = make_ping(peer, num_pong_bytes, len);
|
2018-07-28 08:00:16 +02:00
|
|
|
if (tal_count(ping) > 65535)
|
2017-09-12 06:55:52 +02:00
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "Oversize ping");
|
2017-04-12 20:20:48 +02:00
|
|
|
|
2018-07-24 02:26:43 +02:00
|
|
|
queue_peer_msg(peer, take(ping));
|
2017-04-12 20:20:48 +02:00
|
|
|
status_trace("sending ping expecting %sresponse",
|
|
|
|
num_pong_bytes >= 65532 ? "no " : "");
|
|
|
|
|
|
|
|
/* BOLT #1:
|
|
|
|
*
|
2018-06-17 12:13:44 +02:00
|
|
|
* A node receiving a `ping` message:
|
|
|
|
*...
|
|
|
|
* - if `num_pong_bytes` is less than 65532:
|
|
|
|
* - MUST respond by sending a `pong` message, with `byteslen` equal
|
|
|
|
* to `num_pong_bytes`.
|
|
|
|
* - otherwise (`num_pong_bytes` is **not** less than 65532):
|
|
|
|
* - MUST ignore the `ping`.
|
2017-04-12 20:20:48 +02:00
|
|
|
*/
|
|
|
|
if (num_pong_bytes >= 65532)
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-09-11 21:57:11 +02:00
|
|
|
take(towire_gossip_ping_reply(NULL, &id,
|
|
|
|
true, 0)));
|
2017-04-12 20:20:48 +02:00
|
|
|
else
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We'll respond to lightningd once the pong comes in */
|
2018-07-24 02:26:43 +02:00
|
|
|
peer->num_pings_outstanding++;
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-04-12 20:20:48 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ For routeboost, we offer payers a hint of what incoming channels might
|
|
|
|
* have capacity for their payment. To do this, lightningd asks for the
|
|
|
|
* information about all channels to this node; but gossipd doesn't know about
|
|
|
|
* current capacities, so lightningd selects which to use. */
|
2018-09-27 07:29:17 +02:00
|
|
|
static struct io_plan *get_incoming_channels(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct node *node;
|
|
|
|
struct route_info *r = tal_arr(tmpctx, struct route_info, 0);
|
|
|
|
|
|
|
|
if (!fromwire_gossip_get_incoming_channels(msg))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GET_INCOMING_CHANNELS, msg);
|
|
|
|
|
|
|
|
node = get_node(daemon->rstate, &daemon->rstate->local_id);
|
|
|
|
if (node) {
|
|
|
|
for (size_t i = 0; i < tal_count(node->chans); i++) {
|
|
|
|
const struct chan *c = node->chans[i];
|
|
|
|
const struct half_chan *hc;
|
|
|
|
struct route_info *ri;
|
|
|
|
|
2018-10-15 06:53:55 +02:00
|
|
|
/* Don't leak private channels. */
|
|
|
|
if (!is_chan_public(c))
|
|
|
|
continue;
|
|
|
|
|
2018-09-27 07:29:17 +02:00
|
|
|
hc = &c->half[half_chan_to(node, c)];
|
|
|
|
|
|
|
|
if (!is_halfchan_enabled(hc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ri = tal_arr_expand(&r);
|
|
|
|
ri->pubkey = other_node(node, c)->id;
|
|
|
|
ri->short_channel_id = c->scid;
|
|
|
|
ri->fee_base_msat = hc->base_fee;
|
|
|
|
ri->fee_proportional_millionths = hc->proportional_fee;
|
|
|
|
ri->cltv_expiry_delta = hc->delay;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msg = towire_gossip_get_incoming_channels_reply(NULL, r);
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master, take(msg));
|
2018-09-27 07:29:17 +02:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-09-27 07:29:17 +02:00
|
|
|
}
|
|
|
|
|
2018-08-14 09:13:00 +02:00
|
|
|
#if DEVELOPER
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: One day this will be called internally; for now it's just for
|
|
|
|
* testing with dev_query_scids. */
|
2018-06-04 06:22:25 +02:00
|
|
|
static struct io_plan *query_scids_req(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct pubkey id;
|
|
|
|
struct short_channel_id *scids;
|
|
|
|
struct peer *peer;
|
|
|
|
u8 *encoded;
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`32`:`chain_hash`]
|
|
|
|
* * [`2`:`len`]
|
|
|
|
* * [`len`:`encoded_short_ids`]
|
|
|
|
*/
|
|
|
|
const size_t reply_overhead = 32 + 2;
|
|
|
|
const size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_query_scids(msg, msg, &id, &scids))
|
|
|
|
master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
|
|
|
status_broken("query_scids: unknown peer %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &id));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
if (!peer->gossip_queries_feature) {
|
2018-06-04 06:22:25 +02:00
|
|
|
status_broken("query_scids: no gossip_query support in peer %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &id));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
encoded = encode_short_channel_ids_start(tmpctx);
|
|
|
|
for (size_t i = 0; i < tal_count(scids); i++)
|
|
|
|
encode_add_short_channel_id(&encoded, &scids[i]);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Because this is a dev command, we simply say this case is
|
|
|
|
* "too hard". */
|
2018-06-04 06:22:25 +02:00
|
|
|
if (!encode_short_channel_ids_end(&encoded, max_encoded_bytes)) {
|
|
|
|
status_broken("query_short_channel_ids: %zu is too many",
|
|
|
|
tal_count(scids));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
msg = towire_query_short_channel_ids(NULL, &daemon->chain_hash,
|
2018-06-04 06:22:25 +02:00
|
|
|
encoded);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
peer->num_scid_queries_outstanding++;
|
|
|
|
|
|
|
|
status_trace("sending query for %zu scids", tal_count(scids));
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:22:25 +02:00
|
|
|
|
|
|
|
fail:
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-06-04 06:22:25 +02:00
|
|
|
take(towire_gossip_scids_reply(NULL, false, false)));
|
|
|
|
goto out;
|
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* ### The `gossip_timestamp_filter` Message
|
|
|
|
*...
|
|
|
|
* This message allows a node to constrain future gossip messages to
|
|
|
|
* a specific range. A node which wants any gossip messages would have
|
|
|
|
* to send this, otherwise `gossip_queries` negotiation means no gossip
|
|
|
|
* messages would be received.
|
|
|
|
*
|
|
|
|
* Note that this filter replaces any previous one, so it can be used
|
|
|
|
* multiple times to change the gossip from a peer. */
|
|
|
|
/* This is the entry point for dev_send_timestamp_filter testing. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static struct io_plan *send_timestamp_filter(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct pubkey id;
|
|
|
|
u32 first, range;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_send_timestamp_filter(msg, &id, &first, &range))
|
|
|
|
master_badmsg(WIRE_GOSSIP_SEND_TIMESTAMP_FILTER, msg);
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
|
|
|
status_broken("send_timestamp_filter: unknown peer %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &id));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
if (!peer->gossip_queries_feature) {
|
2018-06-04 06:28:02 +02:00
|
|
|
status_broken("send_timestamp_filter: no gossip_query support in peer %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &id));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-11-21 04:10:03 +01:00
|
|
|
msg = towire_gossip_timestamp_filter(NULL, &daemon->chain_hash,
|
2018-06-04 06:28:02 +02:00
|
|
|
first, range);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* FIXME: One day this will be called internally; for now it's just for
|
|
|
|
* testing with dev_query_channel_range. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static struct io_plan *query_channel_range(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct pubkey id;
|
|
|
|
u32 first_blocknum, number_of_blocks;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_query_channel_range(msg, &id, &first_blocknum,
|
|
|
|
&number_of_blocks))
|
|
|
|
master_badmsg(WIRE_GOSSIP_QUERY_SCIDS, msg);
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
|
|
|
status_broken("query_channel_range: unknown peer %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &id));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-07-24 08:18:58 +02:00
|
|
|
if (!peer->gossip_queries_feature) {
|
2018-06-04 06:28:02 +02:00
|
|
|
status_broken("query_channel_range: no gossip_query support in peer %s",
|
|
|
|
type_to_string(tmpctx, struct pubkey, &id));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (peer->query_channel_blocks) {
|
|
|
|
status_broken("query_channel_range: previous query active");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_debug("sending query_channel_range for blocks %u+%u",
|
|
|
|
first_blocknum, number_of_blocks);
|
2018-11-21 04:10:03 +01:00
|
|
|
msg = towire_query_channel_range(NULL, &daemon->chain_hash,
|
2018-06-04 06:28:02 +02:00
|
|
|
first_blocknum, number_of_blocks);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
2018-11-13 05:03:49 +01:00
|
|
|
peer->range_first_blocknum = first_blocknum;
|
|
|
|
peer->range_end_blocknum = first_blocknum + number_of_blocks;
|
|
|
|
peer->range_blocks_remaining = number_of_blocks;
|
|
|
|
peer->query_channel_blocks = tal_arrz(peer, bitmap,
|
|
|
|
BITMAP_NWORDS(number_of_blocks));
|
2018-06-04 06:28:02 +02:00
|
|
|
peer->query_channel_scids = tal_arr(peer, struct short_channel_id, 0);
|
|
|
|
|
|
|
|
out:
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
fail:
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-06-04 06:28:02 +02:00
|
|
|
take(towire_gossip_query_channel_range_reply(NULL,
|
|
|
|
0, 0,
|
|
|
|
false,
|
|
|
|
NULL)));
|
|
|
|
goto out;
|
|
|
|
}
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is a testing hack to allow us to artificially lower the maximum bytes
|
|
|
|
* of short_channel_ids we'll encode, using dev_set_max_scids_encode_size. */
|
2018-06-04 06:28:02 +02:00
|
|
|
static struct io_plan *dev_set_max_scids_encode_size(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
if (!fromwire_gossip_dev_set_max_scids_encode_size(msg,
|
|
|
|
&max_scids_encode_bytes))
|
|
|
|
master_badmsg(WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE, msg);
|
|
|
|
|
2018-07-02 07:23:56 +02:00
|
|
|
status_trace("Set max_scids_encode_bytes to %u", max_scids_encode_bytes);
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-06-04 06:28:02 +02:00
|
|
|
}
|
2018-07-26 23:27:37 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Another testing hack */
|
2018-07-26 23:27:37 +02:00
|
|
|
static struct io_plan *dev_gossip_suppress(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
if (!fromwire_gossip_dev_suppress(msg))
|
|
|
|
master_badmsg(WIRE_GOSSIP_DEV_SUPPRESS, msg);
|
|
|
|
|
|
|
|
status_unusual("Suppressing all gossip");
|
|
|
|
suppress_gossip = true;
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-07-26 23:27:37 +02:00
|
|
|
}
|
2018-06-04 06:22:25 +02:00
|
|
|
#endif /* DEVELOPER */
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ lightningd: so, tell me about this channel, so we can forward to it. */
|
2018-10-15 06:57:22 +02:00
|
|
|
static struct io_plan *get_channel_peer(struct io_conn *conn,
|
|
|
|
struct daemon *daemon, const u8 *msg)
|
2017-04-30 23:49:15 +02:00
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *chan;
|
2018-10-15 06:57:22 +02:00
|
|
|
const struct pubkey *key;
|
|
|
|
int direction;
|
2017-05-05 08:41:44 +02:00
|
|
|
|
2018-10-15 06:57:22 +02:00
|
|
|
if (!fromwire_gossip_get_channel_peer(msg, &scid))
|
|
|
|
master_badmsg(WIRE_GOSSIP_GET_CHANNEL_PEER, msg);
|
2017-05-05 08:41:44 +02:00
|
|
|
|
2018-03-02 09:59:16 +01:00
|
|
|
chan = get_channel(daemon->rstate, &scid);
|
|
|
|
if (!chan) {
|
2017-06-06 01:47:10 +02:00
|
|
|
status_trace("Failed to resolve channel %s",
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct short_channel_id, &scid));
|
2018-10-15 06:57:22 +02:00
|
|
|
key = NULL;
|
|
|
|
} else if (local_direction(daemon, chan, &direction)) {
|
|
|
|
key = &chan->nodes[!direction]->id;
|
2017-04-30 23:49:15 +02:00
|
|
|
} else {
|
2018-10-15 06:57:22 +02:00
|
|
|
status_trace("Resolved channel %s was not local",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&scid));
|
|
|
|
key = NULL;
|
2017-04-30 23:49:15 +02:00
|
|
|
}
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon_conn_send(daemon->master,
|
2018-10-15 06:57:22 +02:00
|
|
|
take(towire_gossip_get_channel_peer_reply(NULL, key)));
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-04-30 23:49:15 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We queue incoming channel_announcement pending confirmation from lightningd
|
|
|
|
* that it really is an unspent output. Here's its reply. */
|
2018-01-04 12:40:58 +01:00
|
|
|
static struct io_plan *handle_txout_reply(struct io_conn *conn,
|
|
|
|
struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
|
|
|
u8 *outscript;
|
2018-03-05 23:25:00 +01:00
|
|
|
u64 satoshis;
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-03-05 23:25:00 +01:00
|
|
|
if (!fromwire_gossip_get_txout_reply(msg, msg, &scid, &satoshis, &outscript))
|
2018-01-04 12:40:58 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_GET_TXOUT_REPLY, msg);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Outscript is NULL if it's not an unspent output */
|
2018-06-04 06:15:25 +02:00
|
|
|
handle_pending_cannouncement(daemon->rstate, &scid, satoshis, outscript);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Anywhere we might have announced a channel, we check if it's time to
|
|
|
|
* announce ourselves (ie. if we just announced our own first channel) */
|
2018-06-04 06:38:39 +02:00
|
|
|
maybe_send_own_node_announce(daemon);
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ lightningd tells us when a payment has failed; we mark the channel (or
|
|
|
|
* node) unusable here (maybe temporarily), and unpack and channel_update
|
|
|
|
* contained in the error. */
|
2018-01-18 00:32:36 +01:00
|
|
|
static struct io_plan *handle_routing_failure(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct pubkey erring_node;
|
|
|
|
struct short_channel_id erring_channel;
|
|
|
|
u16 failcode;
|
2018-01-21 01:36:41 +01:00
|
|
|
u8 *channel_update;
|
2018-01-18 00:32:36 +01:00
|
|
|
|
2018-01-21 01:36:41 +01:00
|
|
|
if (!fromwire_gossip_routing_failure(msg,
|
2018-02-20 21:59:09 +01:00
|
|
|
msg,
|
2018-01-18 00:32:36 +01:00
|
|
|
&erring_node,
|
|
|
|
&erring_channel,
|
2018-01-21 01:36:41 +01:00
|
|
|
&failcode,
|
|
|
|
&channel_update))
|
2018-01-18 00:32:36 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_ROUTING_FAILURE, msg);
|
|
|
|
|
|
|
|
routing_failure(daemon->rstate,
|
|
|
|
&erring_node,
|
|
|
|
&erring_channel,
|
2018-01-21 01:36:41 +01:00
|
|
|
(enum onion_type) failcode,
|
|
|
|
channel_update);
|
2018-01-18 00:32:36 +01:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-01-18 00:32:36 +01:00
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/*~ This allows lightningd to explicitly mark a channel temporarily unroutable.
|
|
|
|
* This is used when we get an unparsable error, and we don't know who to blame;
|
|
|
|
* lightningd uses this to marking routes unroutable at random... */
|
2018-02-06 16:32:06 +01:00
|
|
|
static struct io_plan *
|
|
|
|
handle_mark_channel_unroutable(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct short_channel_id channel;
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_gossip_mark_channel_unroutable(msg, &channel))
|
2018-02-06 16:32:06 +01:00
|
|
|
master_badmsg(WIRE_GOSSIP_MARK_CHANNEL_UNROUTABLE, msg);
|
|
|
|
|
|
|
|
mark_channel_unroutable(daemon->rstate, &channel);
|
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-02-06 16:32:06 +01:00
|
|
|
}
|
2018-01-23 22:13:19 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where lightningd tells us that a channel's funding transaction has
|
|
|
|
* been spent. */
|
2018-03-26 20:10:03 +02:00
|
|
|
static struct io_plan *handle_outpoint_spent(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2018-03-28 12:14:01 +02:00
|
|
|
struct short_channel_id scid;
|
|
|
|
struct chan *chan;
|
|
|
|
struct routing_state *rstate = daemon->rstate;
|
|
|
|
if (!fromwire_gossip_outpoint_spent(msg, &scid))
|
|
|
|
master_badmsg(WIRE_GOSSIP_ROUTING_FAILURE, msg);
|
|
|
|
|
|
|
|
chan = get_channel(rstate, &scid);
|
|
|
|
if (chan) {
|
|
|
|
status_trace(
|
|
|
|
"Deleting channel %s due to the funding outpoint being "
|
|
|
|
"spent",
|
|
|
|
type_to_string(msg, struct short_channel_id, &scid));
|
|
|
|
/* Freeing is sufficient since everything else is allocated off
|
|
|
|
* of the channel and the destructor takes care of unregistering
|
|
|
|
* the channel */
|
|
|
|
tal_free(chan);
|
2018-11-21 01:36:08 +01:00
|
|
|
/* We put a tombstone marker in the channel store, so we don't
|
|
|
|
* have to replay blockchain spends on restart. */
|
2018-03-28 12:54:09 +02:00
|
|
|
gossip_store_add_channel_delete(rstate->store, &scid);
|
2018-03-28 12:14:01 +02:00
|
|
|
}
|
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-03-26 20:10:03 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is sent by lightningd when it kicks off 'closingd': we disable it
|
|
|
|
* in both directions.
|
2018-05-26 15:19:24 +02:00
|
|
|
*
|
|
|
|
* We'll leave it to handle_outpoint_spent to delete the channel from our view
|
|
|
|
* once the close gets confirmed. This avoids having strange states in which the
|
|
|
|
* channel is list in our peer list but won't be returned when listing public
|
|
|
|
* channels. This does not send out updates since that's triggered by the peer
|
|
|
|
* connection closing.
|
|
|
|
*/
|
|
|
|
static struct io_plan *handle_local_channel_close(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
|
|
|
struct chan *chan;
|
|
|
|
struct routing_state *rstate = daemon->rstate;
|
|
|
|
if (!fromwire_gossip_local_channel_close(msg, &scid))
|
|
|
|
master_badmsg(WIRE_GOSSIP_ROUTING_FAILURE, msg);
|
|
|
|
|
|
|
|
chan = get_channel(rstate, &scid);
|
2018-05-29 13:36:57 +02:00
|
|
|
if (chan)
|
2018-09-25 07:43:56 +02:00
|
|
|
chan->local_disabled = true;
|
2018-10-25 01:43:05 +02:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2018-05-26 15:19:24 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This routine handles all the commands from lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static struct io_plan *recv_req(struct io_conn *conn,
|
|
|
|
const u8 *msg,
|
|
|
|
struct daemon *daemon)
|
2017-01-10 06:08:33 +01:00
|
|
|
{
|
2018-10-25 01:43:05 +02:00
|
|
|
enum gossip_wire_type t = fromwire_peektype(msg);
|
2017-01-10 06:08:33 +01:00
|
|
|
|
|
|
|
switch (t) {
|
2017-04-24 14:31:26 +02:00
|
|
|
case WIRE_GOSSIPCTL_INIT:
|
2018-10-25 01:43:05 +02:00
|
|
|
return gossip_init(conn, daemon, msg);
|
2017-04-24 14:31:26 +02:00
|
|
|
|
2017-03-12 13:39:23 +01:00
|
|
|
case WIRE_GOSSIP_GETNODES_REQUEST:
|
2018-10-25 01:43:05 +02:00
|
|
|
return getnodes(conn, daemon, msg);
|
2017-03-12 13:39:23 +01:00
|
|
|
|
2017-03-15 11:36:52 +01:00
|
|
|
case WIRE_GOSSIP_GETROUTE_REQUEST:
|
2018-10-25 01:43:05 +02:00
|
|
|
return getroute_req(conn, daemon, msg);
|
2017-03-15 11:36:52 +01:00
|
|
|
|
2017-03-22 13:30:09 +01:00
|
|
|
case WIRE_GOSSIP_GETCHANNELS_REQUEST:
|
2018-10-25 01:43:05 +02:00
|
|
|
return getchannels_req(conn, daemon, msg);
|
2017-03-22 13:30:09 +01:00
|
|
|
|
2018-10-15 06:57:22 +02:00
|
|
|
case WIRE_GOSSIP_GET_CHANNEL_PEER:
|
2018-10-25 01:43:05 +02:00
|
|
|
return get_channel_peer(conn, daemon, msg);
|
2017-04-30 23:49:15 +02:00
|
|
|
|
2018-01-04 12:40:58 +01:00
|
|
|
case WIRE_GOSSIP_GET_TXOUT_REPLY:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_txout_reply(conn, daemon, msg);
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-01-18 00:32:36 +01:00
|
|
|
case WIRE_GOSSIP_ROUTING_FAILURE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_routing_failure(conn, daemon, msg);
|
2018-01-18 00:32:36 +01:00
|
|
|
|
2018-02-06 16:32:06 +01:00
|
|
|
case WIRE_GOSSIP_MARK_CHANNEL_UNROUTABLE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_mark_channel_unroutable(conn, daemon, msg);
|
2018-02-06 16:32:06 +01:00
|
|
|
|
2018-03-26 20:10:03 +02:00
|
|
|
case WIRE_GOSSIP_OUTPOINT_SPENT:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_outpoint_spent(conn, daemon, msg);
|
2018-05-26 15:19:24 +02:00
|
|
|
|
|
|
|
case WIRE_GOSSIP_LOCAL_CHANNEL_CLOSE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return handle_local_channel_close(conn, daemon, msg);
|
2018-03-05 17:16:20 +01:00
|
|
|
|
2018-06-04 06:22:25 +02:00
|
|
|
case WIRE_GOSSIP_PING:
|
2018-10-25 01:43:05 +02:00
|
|
|
return ping_req(conn, daemon, msg);
|
2018-06-04 06:22:25 +02:00
|
|
|
|
2018-09-27 07:29:17 +02:00
|
|
|
case WIRE_GOSSIP_GET_INCOMING_CHANNELS:
|
2018-10-25 01:43:05 +02:00
|
|
|
return get_incoming_channels(conn, daemon, msg);
|
2018-09-27 07:29:17 +02:00
|
|
|
|
2018-08-14 09:13:00 +02:00
|
|
|
#if DEVELOPER
|
2018-06-04 06:22:25 +02:00
|
|
|
case WIRE_GOSSIP_QUERY_SCIDS:
|
2018-10-25 01:43:05 +02:00
|
|
|
return query_scids_req(conn, daemon, msg);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
2018-10-25 01:43:05 +02:00
|
|
|
return send_timestamp_filter(conn, daemon, msg);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
|
|
|
case WIRE_GOSSIP_QUERY_CHANNEL_RANGE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return query_channel_range(conn, daemon, msg);
|
2018-06-04 06:28:02 +02:00
|
|
|
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE:
|
2018-10-25 01:43:05 +02:00
|
|
|
return dev_set_max_scids_encode_size(conn, daemon, msg);
|
2018-07-26 23:27:37 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SUPPRESS:
|
2018-10-25 01:43:05 +02:00
|
|
|
return dev_gossip_suppress(conn, daemon, msg);
|
2018-06-04 06:22:25 +02:00
|
|
|
#else
|
|
|
|
case WIRE_GOSSIP_QUERY_SCIDS:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_QUERY_CHANNEL_RANGE:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SET_MAX_SCIDS_ENCODE_SIZE:
|
2018-07-26 23:27:37 +02:00
|
|
|
case WIRE_GOSSIP_DEV_SUPPRESS:
|
2018-06-04 06:22:25 +02:00
|
|
|
break;
|
|
|
|
#endif /* !DEVELOPER */
|
|
|
|
|
2017-12-15 15:16:42 +01:00
|
|
|
/* We send these, we don't receive them */
|
2017-03-12 13:39:23 +01:00
|
|
|
case WIRE_GOSSIP_GETNODES_REPLY:
|
2017-03-15 11:36:52 +01:00
|
|
|
case WIRE_GOSSIP_GETROUTE_REPLY:
|
2017-03-22 13:30:09 +01:00
|
|
|
case WIRE_GOSSIP_GETCHANNELS_REPLY:
|
2017-04-12 20:20:48 +02:00
|
|
|
case WIRE_GOSSIP_PING_REPLY:
|
2018-06-04 06:22:25 +02:00
|
|
|
case WIRE_GOSSIP_SCIDS_REPLY:
|
2018-06-04 06:28:02 +02:00
|
|
|
case WIRE_GOSSIP_QUERY_CHANNEL_RANGE_REPLY:
|
2018-10-15 06:57:22 +02:00
|
|
|
case WIRE_GOSSIP_GET_CHANNEL_PEER_REPLY:
|
2018-09-27 07:29:17 +02:00
|
|
|
case WIRE_GOSSIP_GET_INCOMING_CHANNELS_REPLY:
|
2018-01-04 12:40:58 +01:00
|
|
|
case WIRE_GOSSIP_GET_TXOUT:
|
2017-12-15 15:16:42 +01:00
|
|
|
break;
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
|
|
|
|
2017-09-12 06:55:52 +02:00
|
|
|
/* Master shouldn't give bad requests. */
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "%i: %s",
|
2018-10-25 01:43:05 +02:00
|
|
|
t, tal_hex(tmpctx, msg));
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This is called when lightningd closes its connection to us. We simply
|
|
|
|
* exit. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static void master_gone(struct daemon_conn *master UNUSED)
|
2017-06-06 05:08:42 +02:00
|
|
|
{
|
2018-11-13 05:03:53 +01:00
|
|
|
daemon_shutdown();
|
2017-06-06 05:08:42 +02:00
|
|
|
/* Can't tell master, it's gone. */
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
|
2017-01-10 06:08:33 +01:00
|
|
|
int main(int argc, char *argv[])
|
|
|
|
{
|
2018-04-25 12:55:34 +02:00
|
|
|
setup_locale();
|
|
|
|
|
2017-01-10 06:08:33 +01:00
|
|
|
struct daemon *daemon;
|
|
|
|
|
2018-01-08 11:01:09 +01:00
|
|
|
subdaemon_setup(argc, argv);
|
2017-01-10 06:08:33 +01:00
|
|
|
|
|
|
|
daemon = tal(NULL, struct daemon);
|
|
|
|
list_head_init(&daemon->peers);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* Note the use of time_mono() here. That's a monotonic clock, which
|
|
|
|
* is really useful: it can only be used to measure relative events
|
|
|
|
* (there's no correspondence to time-since-Ken-grew-a-beard or
|
|
|
|
* anything), but unlike time_now(), this will never jump backwards by
|
|
|
|
* half a second and leave me wondering how my tests failed CI! */
|
2017-02-04 16:28:35 +01:00
|
|
|
timers_init(&daemon->timers, time_mono());
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Our daemons always use STDIN for commands from lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon->master = daemon_conn_new(daemon, STDIN_FILENO,
|
|
|
|
recv_req, NULL, daemon);
|
|
|
|
tal_add_destructor(daemon->master, master_gone);
|
2018-10-24 03:39:31 +02:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
status_setup_async(daemon->master);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* connectd is already started, and uses this fd to ask us things. */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon->connectd = daemon_conn_new(daemon, CONNECTD_FD,
|
|
|
|
connectd_req, NULL, daemon);
|
2017-04-12 08:12:15 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* This loop never exits. io_loop() only returns if a timer has
|
|
|
|
* expired, or io_break() is called, or all fds are closed. We don't
|
|
|
|
* use io_break and closing the lightningd fd calls master_gone()
|
|
|
|
* which exits. */
|
2017-02-04 16:28:35 +01:00
|
|
|
for (;;) {
|
|
|
|
struct timer *expired = NULL;
|
|
|
|
io_loop(&daemon->timers, &expired);
|
|
|
|
|
2018-11-13 05:03:53 +01:00
|
|
|
timer_expired(daemon, expired);
|
2017-02-04 16:28:35 +01:00
|
|
|
}
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/*~ Note that the actual routing stuff is in routing.c; you might want to
|
|
|
|
* check that out later.
|
|
|
|
*
|
|
|
|
* But that's the last of the global daemons. We now move on to the first of
|
|
|
|
* the per-peer daemons: openingd/openingd.c.
|
|
|
|
*/
|