2018-11-21 01:36:08 +01:00
|
|
|
/*~ Welcome to the gossip daemon: keeper of maps!
|
|
|
|
*
|
|
|
|
* This is the last "global" daemon; it has three purposes.
|
|
|
|
*
|
|
|
|
* 1. To determine routes for payments when lightningd asks.
|
|
|
|
* 2. The second purpose is to receive gossip from peers (via their
|
|
|
|
* per-peer daemons) and send it out to them.
|
|
|
|
* 3. Talk to `connectd` to to answer address queries for nodes.
|
|
|
|
*
|
|
|
|
* The gossip protocol itself is fairly simple, but has some twists which
|
|
|
|
* add complexity to this daemon.
|
|
|
|
*/
|
2021-12-04 12:23:56 +01:00
|
|
|
#include "config.h"
|
2018-02-08 02:24:46 +01:00
|
|
|
#include <ccan/cast/cast.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/daemon_conn.h>
|
2021-01-07 19:42:47 +01:00
|
|
|
#include <common/ecdh_hsmd.h>
|
2021-06-10 22:30:19 +02:00
|
|
|
#include <common/lease_rates.h>
|
2018-11-21 23:39:31 +01:00
|
|
|
#include <common/memleak.h>
|
2018-04-25 14:39:38 +02:00
|
|
|
#include <common/pseudorand.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/status.h>
|
2018-01-08 11:01:09 +01:00
|
|
|
#include <common/subdaemon.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/timeout.h>
|
2017-08-28 18:03:01 +02:00
|
|
|
#include <common/type_to_string.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <common/wire_error.h>
|
2022-02-23 19:11:03 +01:00
|
|
|
#include <common/wireaddr.h>
|
2020-08-25 04:16:22 +02:00
|
|
|
#include <connectd/connectd_gossipd_wiregen.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <errno.h>
|
2020-10-20 05:59:30 +02:00
|
|
|
#include <gossipd/gossip_store_wiregen.h>
|
2019-09-16 12:43:59 +02:00
|
|
|
#include <gossipd/gossipd.h>
|
2020-08-25 04:05:45 +02:00
|
|
|
#include <gossipd/gossipd_peerd_wiregen.h>
|
|
|
|
#include <gossipd/gossipd_wiregen.h>
|
2024-01-31 06:32:25 +01:00
|
|
|
#include <gossipd/gossmap_manage.h>
|
2019-09-22 07:53:42 +02:00
|
|
|
#include <gossipd/queries.h>
|
2021-12-04 12:27:32 +01:00
|
|
|
#include <gossipd/routing.h>
|
2019-10-08 03:15:24 +02:00
|
|
|
#include <gossipd/seeker.h>
|
2021-01-07 19:42:47 +01:00
|
|
|
#include <sodium/crypto_aead_chacha20poly1305.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
|
2023-06-29 21:23:17 +02:00
|
|
|
const struct node_id *peer_node_id(const struct peer *peer)
|
|
|
|
{
|
|
|
|
return &peer->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool peer_node_id_eq(const struct peer *peer, const struct node_id *node_id)
|
|
|
|
{
|
|
|
|
return node_id_eq(&peer->id, node_id);
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Destroy a peer, usually because the per-peer daemon has exited.
|
|
|
|
*
|
|
|
|
* Were you wondering why we call this "destroy_peer" and not "peer_destroy"?
|
|
|
|
* I thought not! But while CCAN modules are required to keep to their own
|
|
|
|
* prefix namespace, leading to unnatural word order, we couldn't stomach that
|
|
|
|
* for our own internal use. We use 'find_foo', 'destroy_foo' and 'new_foo'.
|
|
|
|
*/
|
2017-01-10 06:08:33 +01:00
|
|
|
static void destroy_peer(struct peer *peer)
|
|
|
|
{
|
2023-06-29 21:23:17 +02:00
|
|
|
/* Remove it from the peers table */
|
2024-01-31 04:16:18 +01:00
|
|
|
peer_node_id_map_del(peer->daemon->peers, peer);
|
2023-07-06 09:36:49 +02:00
|
|
|
|
2024-01-31 04:16:18 +01:00
|
|
|
/* Sorry seeker, this one is gone. */
|
2023-07-06 09:36:49 +02:00
|
|
|
seeker_peer_gone(peer->daemon->seeker, peer);
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Search for a peer. */
|
2019-09-16 12:43:59 +02:00
|
|
|
struct peer *find_peer(struct daemon *daemon, const struct node_id *id)
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
{
|
2023-06-29 21:23:17 +02:00
|
|
|
return peer_node_id_map_get(daemon->peers, id);
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:26:24 +02:00
|
|
|
/* Increase a peer's gossip_counter, if peer not NULL */
|
2023-07-06 09:35:54 +02:00
|
|
|
void peer_supplied_good_gossip(struct daemon *daemon,
|
|
|
|
const struct node_id *source_peer,
|
|
|
|
size_t amount)
|
2019-10-08 03:13:24 +02:00
|
|
|
{
|
2023-07-06 09:35:54 +02:00
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!source_peer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
peer = find_peer(daemon, source_peer);
|
|
|
|
if (!peer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
peer->gossip_counter += amount;
|
2019-10-08 03:13:24 +02:00
|
|
|
}
|
|
|
|
|
2022-01-24 21:07:52 +01:00
|
|
|
/* Queue a gossip message for the peer: connectd simply forwards it to
|
|
|
|
* the peer. */
|
2024-01-31 05:27:33 +01:00
|
|
|
void queue_peer_msg(struct daemon *daemon,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const u8 *msg TAKES)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
2024-01-31 05:27:33 +01:00
|
|
|
u8 *outermsg = towire_gossipd_send_gossip(NULL, peer, msg);
|
|
|
|
daemon_conn_send(daemon->connectd, take(outermsg));
|
2022-01-24 21:07:52 +01:00
|
|
|
|
2022-01-24 21:09:52 +01:00
|
|
|
if (taken(msg))
|
|
|
|
tal_free(msg);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2019-06-03 20:15:25 +02:00
|
|
|
/*~ We have a helper for messages from the store. */
|
2019-09-22 07:53:42 +02:00
|
|
|
void queue_peer_from_store(struct peer *peer,
|
|
|
|
const struct broadcastable *bcast)
|
2019-04-11 07:15:22 +02:00
|
|
|
{
|
2024-01-31 05:32:33 +01:00
|
|
|
struct gossip_store *gs = peer->daemon->gs;
|
2024-01-31 05:27:33 +01:00
|
|
|
queue_peer_msg(peer->daemon, &peer->id,
|
|
|
|
take(gossip_store_get(NULL, gs, bcast->index)));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
/*~ We don't actually keep node_announcements in memory; we keep them in
|
|
|
|
* a file called `gossip_store`. If we need some node details, we reload
|
|
|
|
* and reparse. It's slow, but generally rare. */
|
|
|
|
static bool get_node_announcement(const tal_t *ctx,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const struct node *n,
|
|
|
|
u8 rgb_color[3],
|
|
|
|
u8 alias[32],
|
|
|
|
u8 **features,
|
2021-05-27 23:35:45 +02:00
|
|
|
struct wireaddr **wireaddrs,
|
|
|
|
struct lease_rates **rates)
|
2018-09-24 03:42:00 +02:00
|
|
|
{
|
2019-04-11 07:15:22 +02:00
|
|
|
const u8 *msg;
|
|
|
|
struct node_id id;
|
2019-04-11 07:15:21 +02:00
|
|
|
secp256k1_ecdsa_signature signature;
|
|
|
|
u32 timestamp;
|
2019-04-11 07:15:22 +02:00
|
|
|
u8 *addresses;
|
2021-06-10 19:54:08 +02:00
|
|
|
struct tlv_node_ann_tlvs *na_tlvs;
|
2018-09-24 03:42:00 +02:00
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!n->bcast.index)
|
2018-09-24 03:42:00 +02:00
|
|
|
return false;
|
|
|
|
|
2024-01-31 05:32:33 +01:00
|
|
|
msg = gossip_store_get(tmpctx, daemon->gs, n->bcast.index);
|
2019-04-11 07:15:22 +02:00
|
|
|
|
2019-04-11 07:15:21 +02:00
|
|
|
/* Note: validity of node_id is already checked. */
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!fromwire_node_announcement(ctx, msg,
|
|
|
|
&signature, features,
|
2019-04-11 07:15:22 +02:00
|
|
|
×tamp,
|
2019-04-11 07:15:22 +02:00
|
|
|
&id, rgb_color, alias,
|
2021-06-10 19:54:08 +02:00
|
|
|
&addresses,
|
2022-03-23 00:31:14 +01:00
|
|
|
&na_tlvs)) {
|
2019-04-11 07:15:22 +02:00
|
|
|
status_broken("Bad local node_announcement @%u: %s",
|
|
|
|
n->bcast.index, tal_hex(tmpctx, msg));
|
2019-04-11 07:15:21 +02:00
|
|
|
return false;
|
|
|
|
}
|
2019-06-20 04:55:52 +02:00
|
|
|
|
|
|
|
if (!node_id_eq(&id, &n->id) || timestamp != n->bcast.timestamp) {
|
|
|
|
status_broken("Wrong node_announcement @%u:"
|
|
|
|
" expected %s timestamp %u "
|
|
|
|
" got %s timestamp %u",
|
|
|
|
n->bcast.index,
|
|
|
|
type_to_string(tmpctx, struct node_id, &n->id),
|
|
|
|
timestamp,
|
|
|
|
type_to_string(tmpctx, struct node_id, &id),
|
|
|
|
n->bcast.timestamp);
|
|
|
|
return false;
|
|
|
|
}
|
2019-04-11 07:15:22 +02:00
|
|
|
|
2021-06-14 23:07:38 +02:00
|
|
|
*wireaddrs = fromwire_wireaddr_array(ctx, addresses);
|
2021-05-27 23:35:45 +02:00
|
|
|
*rates = tal_steal(ctx, na_tlvs->option_will_fund);
|
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
tal_free(addresses);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Version which also does nodeid lookup */
|
|
|
|
static bool get_node_announcement_by_id(const tal_t *ctx,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const struct node_id *node_id,
|
|
|
|
u8 rgb_color[3],
|
|
|
|
u8 alias[32],
|
|
|
|
u8 **features,
|
2021-05-27 23:35:45 +02:00
|
|
|
struct wireaddr **wireaddrs,
|
|
|
|
struct lease_rates **rates)
|
2019-04-11 07:15:22 +02:00
|
|
|
{
|
|
|
|
struct node *n = get_node(daemon->rstate, node_id);
|
|
|
|
if (!n)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return get_node_announcement(ctx, daemon, n, rgb_color, alias,
|
2021-05-27 23:35:45 +02:00
|
|
|
features, wireaddrs, rates);
|
2019-04-11 07:15:22 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~Routines to handle gossip messages from peer, forwarded by subdaemons.
|
|
|
|
*-----------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* It's not the subdaemon's fault if they're malformed or invalid; so these
|
|
|
|
* all return an error packet which gets sent back to the subdaemon in that
|
|
|
|
* case.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* The routing code checks that it's basically valid, returning an
|
|
|
|
* error message for the peer or NULL. NULL means it's OK, but the
|
|
|
|
* message might be redundant, in which case scid is also NULL.
|
|
|
|
* Otherwise `scid` gives us the short_channel_id claimed by the
|
|
|
|
* message, and puts the announcemnt on an internal 'pending'
|
|
|
|
* queue. We'll send a request to lightningd to look it up, and continue
|
|
|
|
* processing in `handle_txout_reply`. */
|
2021-02-02 06:16:20 +01:00
|
|
|
static const u8 *handle_channel_announcement_msg(struct daemon *daemon,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer,
|
2018-11-05 02:21:51 +01:00
|
|
|
const u8 *msg)
|
2017-11-24 15:47:14 +01:00
|
|
|
{
|
2018-11-05 02:21:51 +01:00
|
|
|
const struct short_channel_id *scid;
|
|
|
|
const u8 *err;
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2019-06-12 01:28:07 +02:00
|
|
|
/* If it's OK, tells us the short_channel_id to lookup; it notes
|
|
|
|
* if this is the unknown channel the peer was looking for (in
|
|
|
|
* which case, it frees and NULLs that ptr) */
|
2021-02-02 06:16:20 +01:00
|
|
|
err = handle_channel_announcement(daemon->rstate, msg,
|
|
|
|
daemon->current_blockheight,
|
2023-07-06 09:35:54 +02:00
|
|
|
&scid, source_peer);
|
2018-11-05 02:21:51 +01:00
|
|
|
if (err)
|
|
|
|
return err;
|
2019-09-22 04:21:19 +02:00
|
|
|
else if (scid) {
|
|
|
|
/* We give them some grace period, in case we don't know about
|
|
|
|
* block yet. */
|
2021-02-02 06:16:20 +01:00
|
|
|
if (daemon->current_blockheight == 0
|
2019-09-22 04:21:19 +02:00
|
|
|
|| !is_scid_depth_announceable(scid,
|
2021-02-02 06:16:20 +01:00
|
|
|
daemon->current_blockheight)) {
|
|
|
|
tal_arr_expand(&daemon->deferred_txouts, *scid);
|
2019-09-22 04:21:19 +02:00
|
|
|
} else {
|
2021-02-02 06:16:20 +01:00
|
|
|
daemon_conn_send(daemon->master,
|
2020-08-25 04:05:45 +02:00
|
|
|
take(towire_gossipd_get_txout(NULL,
|
2019-09-22 04:21:19 +02:00
|
|
|
scid)));
|
|
|
|
}
|
|
|
|
}
|
2018-11-05 02:21:51 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2017-03-11 14:45:54 +01:00
|
|
|
|
2018-11-05 02:21:51 +01:00
|
|
|
static u8 *handle_channel_update_msg(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
2019-06-12 01:28:07 +02:00
|
|
|
struct short_channel_id unknown_scid;
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Hand the channel_update to the routing code */
|
2019-06-12 01:28:07 +02:00
|
|
|
u8 *err;
|
|
|
|
|
|
|
|
unknown_scid.u64 = 0;
|
2023-07-06 09:35:54 +02:00
|
|
|
err = handle_channel_update(peer->daemon->rstate, msg, &peer->id,
|
2021-01-29 01:00:09 +01:00
|
|
|
&unknown_scid, false);
|
2022-01-22 05:49:32 +01:00
|
|
|
if (err)
|
2018-11-05 02:21:51 +01:00
|
|
|
return err;
|
2022-01-22 05:49:32 +01:00
|
|
|
|
|
|
|
/* If it's an unknown channel, ask someone about it */
|
|
|
|
if (unknown_scid.u64 != 0)
|
2024-01-31 05:28:33 +01:00
|
|
|
query_unknown_channel(peer->daemon, &peer->id, unknown_scid);
|
2018-03-18 14:57:15 +01:00
|
|
|
|
|
|
|
return NULL;
|
2017-03-11 14:45:54 +01:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:32:24 +02:00
|
|
|
static u8 *handle_node_announce(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
bool was_unknown = false;
|
|
|
|
u8 *err;
|
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
err = handle_node_announcement(peer->daemon->rstate, msg, &peer->id,
|
2019-10-08 03:32:24 +02:00
|
|
|
&was_unknown);
|
|
|
|
if (was_unknown)
|
2024-01-31 05:28:33 +01:00
|
|
|
query_unknown_node(peer->daemon, &peer->id, NULL);
|
2019-10-08 03:32:24 +02:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-08-11 05:05:49 +02:00
|
|
|
/* Statistically, how many peers to we tell about each channel? */
|
|
|
|
#define GOSSIP_SPAM_REDUNDANCY 5
|
|
|
|
|
2023-03-21 22:10:53 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages it did not generate itself,
|
|
|
|
* unless explicitly requested.
|
|
|
|
*/
|
|
|
|
/* i.e. the strong implication is that we spam our own gossip aggressively!
|
|
|
|
* "Look at me!" "Look at me!!!!".
|
|
|
|
*/
|
|
|
|
static void dump_our_gossip(struct daemon *daemon, struct peer *peer)
|
|
|
|
{
|
|
|
|
struct node *me;
|
2023-08-11 05:05:49 +02:00
|
|
|
struct chan_map_iter it;
|
|
|
|
const struct chan *chan, **chans = tal_arr(tmpctx, const struct chan *, 0);
|
|
|
|
size_t num_to_send;
|
2023-03-21 22:10:53 +01:00
|
|
|
|
|
|
|
/* Find ourselves; if no channels, nothing to send */
|
|
|
|
me = get_node(daemon->rstate, &daemon->id);
|
|
|
|
if (!me)
|
|
|
|
return;
|
|
|
|
|
2023-08-11 05:05:49 +02:00
|
|
|
for (chan = first_chan(me, &it); chan; chan = next_chan(me, &it)) {
|
|
|
|
tal_arr_expand(&chans, chan);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Just in case we have many peers and not all are connecting or
|
|
|
|
* some other corner case, send everything to first few. */
|
|
|
|
if (peer_node_id_map_count(daemon->peers) <= GOSSIP_SPAM_REDUNDANCY)
|
|
|
|
num_to_send = tal_count(chans);
|
|
|
|
else {
|
|
|
|
if (tal_count(chans) < GOSSIP_SPAM_REDUNDANCY)
|
|
|
|
num_to_send = tal_count(chans);
|
|
|
|
else {
|
|
|
|
/* Pick victims at random */
|
|
|
|
tal_arr_randomize(chans, const struct chan *);
|
|
|
|
num_to_send = GOSSIP_SPAM_REDUNDANCY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num_to_send; i++) {
|
|
|
|
chan = chans[i];
|
|
|
|
|
2023-07-18 23:06:22 +02:00
|
|
|
/* Send channel_announce */
|
|
|
|
queue_peer_from_store(peer, &chan->bcast);
|
|
|
|
|
2023-07-20 04:41:14 +02:00
|
|
|
/* Send both channel_updates (if they exist): both help people
|
|
|
|
* use our channel, so we care! */
|
|
|
|
for (int dir = 0; dir < 2; dir++) {
|
|
|
|
if (is_halfchan_defined(&chan->half[dir]))
|
|
|
|
queue_peer_from_store(peer, &chan->half[dir].bcast);
|
|
|
|
}
|
2023-03-21 22:10:53 +01:00
|
|
|
}
|
2023-07-20 04:40:52 +02:00
|
|
|
|
|
|
|
/* If we have one, we should send our own node_announcement */
|
|
|
|
if (me->bcast.index)
|
|
|
|
queue_peer_from_store(peer, &me->bcast);
|
2023-03-21 22:10:53 +01:00
|
|
|
}
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
/*~ This is where connectd tells us about a new peer we might want to
|
|
|
|
* gossip with. */
|
|
|
|
static void connectd_new_peer(struct daemon *daemon, const u8 *msg)
|
2018-07-24 08:18:58 +02:00
|
|
|
{
|
2022-01-29 04:33:05 +01:00
|
|
|
struct peer *peer = tal(daemon, struct peer);
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2020-08-25 04:16:22 +02:00
|
|
|
if (!fromwire_gossipd_new_peer(msg, &peer->id,
|
2022-01-08 14:29:29 +01:00
|
|
|
&peer->gossip_queries_feature)) {
|
2022-01-29 04:33:05 +01:00
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Bad new_peer msg from connectd: %s",
|
2018-07-24 08:18:58 +02:00
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
if (find_peer(daemon, &peer->id)) {
|
|
|
|
status_broken("Peer %s already here?",
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id));
|
|
|
|
tal_free(find_peer(daemon, &peer->id));
|
2018-07-24 08:18:58 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Populate the rest of the peer info. */
|
2018-07-24 08:18:58 +02:00
|
|
|
peer->daemon = daemon;
|
2019-10-08 03:13:24 +02:00
|
|
|
peer->gossip_counter = 0;
|
2018-07-24 08:18:58 +02:00
|
|
|
peer->scid_queries = NULL;
|
|
|
|
peer->scid_query_idx = 0;
|
|
|
|
peer->scid_query_nodes = NULL;
|
|
|
|
peer->scid_query_nodes_idx = 0;
|
2019-06-12 01:26:07 +02:00
|
|
|
peer->scid_query_outstanding = false;
|
2020-11-09 10:30:01 +01:00
|
|
|
peer->range_replies = NULL;
|
2019-09-30 06:57:12 +02:00
|
|
|
peer->query_channel_range_cb = NULL;
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2023-06-29 21:23:17 +02:00
|
|
|
/* We keep a htable so we can find peer by id */
|
|
|
|
peer_node_id_map_add(daemon->peers, peer);
|
2018-07-24 08:18:58 +02:00
|
|
|
tal_add_destructor(peer, destroy_peer);
|
|
|
|
|
2023-03-21 22:10:53 +01:00
|
|
|
/* Send everything we know about our own channels */
|
|
|
|
dump_our_gossip(daemon, peer);
|
|
|
|
|
2019-10-10 03:28:39 +02:00
|
|
|
/* This sends the initial timestamp filter. */
|
|
|
|
seeker_setup_peer_gossip(daemon->seeker, peer);
|
2022-01-29 04:33:05 +01:00
|
|
|
}
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
static void connectd_peer_gone(struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossipd_peer_gone(msg, &id)) {
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Bad peer_gone msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
}
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer)
|
|
|
|
status_broken("Peer %s already gone?",
|
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
|
|
|
tal_free(peer);
|
2018-07-24 08:18:58 +02:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:48 +01:00
|
|
|
/*~ lightningd asks us if we know any addresses for a given id. */
|
|
|
|
static struct io_plan *handle_get_address(struct io_conn *conn,
|
|
|
|
struct daemon *daemon,
|
|
|
|
const u8 *msg)
|
2017-03-10 13:06:51 +01:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2019-04-11 07:15:22 +02:00
|
|
|
u8 rgb_color[3];
|
|
|
|
u8 alias[32];
|
|
|
|
u8 *features;
|
|
|
|
struct wireaddr *addrs;
|
2021-05-27 23:35:45 +02:00
|
|
|
struct lease_rates *rates;
|
2018-06-04 06:19:25 +02:00
|
|
|
|
2022-01-11 02:15:48 +01:00
|
|
|
if (!fromwire_gossipd_get_addrs(msg, &id))
|
|
|
|
master_badmsg(WIRE_GOSSIPD_GET_ADDRS, msg);
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!get_node_announcement_by_id(tmpctx, daemon, &id,
|
2021-05-27 23:35:45 +02:00
|
|
|
rgb_color, alias, &features, &addrs,
|
|
|
|
&rates))
|
2018-11-05 02:16:48 +01:00
|
|
|
addrs = NULL;
|
|
|
|
|
2022-01-11 02:15:48 +01:00
|
|
|
daemon_conn_send(daemon->master,
|
2020-08-25 04:16:22 +02:00
|
|
|
take(towire_gossipd_get_addrs_reply(NULL, addrs)));
|
2022-01-11 02:15:48 +01:00
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-03-10 13:06:51 +01:00
|
|
|
}
|
|
|
|
|
2022-01-24 21:07:52 +01:00
|
|
|
static void handle_recv_gossip(struct daemon *daemon, const u8 *outermsg)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
|
|
|
u8 *msg;
|
|
|
|
const u8 *err;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_gossipd_recv_gossip(outermsg, outermsg, &id, &msg)) {
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Bad gossipd_recv_gossip msg from connectd: %s",
|
|
|
|
tal_hex(tmpctx, outermsg));
|
|
|
|
}
|
|
|
|
|
|
|
|
peer = find_peer(daemon, &id);
|
|
|
|
if (!peer) {
|
2022-02-07 04:32:32 +01:00
|
|
|
status_broken("connectd sent gossip msg %s from unknown peer %s",
|
2022-01-29 04:33:05 +01:00
|
|
|
peer_wire_name(fromwire_peektype(msg)),
|
|
|
|
type_to_string(tmpctx, struct node_id, &id));
|
2022-01-24 21:07:52 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* These are messages relayed from peer */
|
|
|
|
switch ((enum peer_wire)fromwire_peektype(msg)) {
|
|
|
|
case WIRE_CHANNEL_ANNOUNCEMENT:
|
2023-07-06 09:35:54 +02:00
|
|
|
err = handle_channel_announcement_msg(peer->daemon, &id, msg);
|
2022-01-24 21:07:52 +01:00
|
|
|
goto handled_msg;
|
|
|
|
case WIRE_CHANNEL_UPDATE:
|
|
|
|
err = handle_channel_update_msg(peer, msg);
|
|
|
|
goto handled_msg;
|
|
|
|
case WIRE_NODE_ANNOUNCEMENT:
|
|
|
|
err = handle_node_announce(peer, msg);
|
|
|
|
goto handled_msg;
|
|
|
|
case WIRE_QUERY_CHANNEL_RANGE:
|
|
|
|
err = handle_query_channel_range(peer, msg);
|
|
|
|
goto handled_msg;
|
|
|
|
case WIRE_REPLY_CHANNEL_RANGE:
|
|
|
|
err = handle_reply_channel_range(peer, msg);
|
|
|
|
goto handled_msg;
|
|
|
|
case WIRE_QUERY_SHORT_CHANNEL_IDS:
|
|
|
|
err = handle_query_short_channel_ids(peer, msg);
|
|
|
|
goto handled_msg;
|
|
|
|
case WIRE_REPLY_SHORT_CHANNEL_IDS_END:
|
|
|
|
err = handle_reply_short_channel_ids_end(peer, msg);
|
|
|
|
goto handled_msg;
|
|
|
|
|
|
|
|
/* These are non-gossip messages (!is_msg_for_gossipd()) */
|
|
|
|
case WIRE_WARNING:
|
|
|
|
case WIRE_INIT:
|
|
|
|
case WIRE_ERROR:
|
|
|
|
case WIRE_PING:
|
|
|
|
case WIRE_PONG:
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
case WIRE_ACCEPT_CHANNEL:
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
case WIRE_FUNDING_SIGNED:
|
2022-09-10 04:10:31 +02:00
|
|
|
case WIRE_CHANNEL_READY:
|
2022-01-24 21:07:52 +01:00
|
|
|
case WIRE_SHUTDOWN:
|
|
|
|
case WIRE_CLOSING_SIGNED:
|
|
|
|
case WIRE_UPDATE_ADD_HTLC:
|
|
|
|
case WIRE_UPDATE_FULFILL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_MALFORMED_HTLC:
|
|
|
|
case WIRE_COMMITMENT_SIGNED:
|
|
|
|
case WIRE_REVOKE_AND_ACK:
|
|
|
|
case WIRE_UPDATE_FEE:
|
|
|
|
case WIRE_UPDATE_BLOCKHEIGHT:
|
|
|
|
case WIRE_CHANNEL_REESTABLISH:
|
|
|
|
case WIRE_ANNOUNCEMENT_SIGNATURES:
|
|
|
|
case WIRE_GOSSIP_TIMESTAMP_FILTER:
|
|
|
|
case WIRE_TX_ADD_INPUT:
|
|
|
|
case WIRE_TX_REMOVE_INPUT:
|
|
|
|
case WIRE_TX_ADD_OUTPUT:
|
|
|
|
case WIRE_TX_REMOVE_OUTPUT:
|
|
|
|
case WIRE_TX_COMPLETE:
|
2022-07-11 16:14:51 +02:00
|
|
|
case WIRE_TX_ABORT:
|
2022-01-24 21:07:52 +01:00
|
|
|
case WIRE_TX_SIGNATURES:
|
2022-07-11 16:14:51 +02:00
|
|
|
case WIRE_TX_INIT_RBF:
|
|
|
|
case WIRE_TX_ACK_RBF:
|
2022-01-24 21:07:52 +01:00
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
2022-01-29 04:32:32 +01:00
|
|
|
case WIRE_ONION_MESSAGE:
|
2023-02-02 11:01:22 +01:00
|
|
|
case WIRE_PEER_STORAGE:
|
|
|
|
case WIRE_YOUR_PEER_STORAGE:
|
2022-01-24 21:07:52 +01:00
|
|
|
case WIRE_STFU:
|
2023-07-27 23:37:52 +02:00
|
|
|
case WIRE_SPLICE:
|
|
|
|
case WIRE_SPLICE_ACK:
|
|
|
|
case WIRE_SPLICE_LOCKED:
|
2022-01-24 21:07:52 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"connectd sent unexpected gossip msg %s for peer %s",
|
|
|
|
peer_wire_name(fromwire_peektype(msg)),
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id));
|
|
|
|
|
|
|
|
handled_msg:
|
|
|
|
if (err)
|
2024-01-31 05:27:33 +01:00
|
|
|
queue_peer_msg(peer->daemon, &peer->id, take(err));
|
2024-01-31 05:31:33 +01:00
|
|
|
else
|
|
|
|
/* Some peer gave us gossip, so we're not at zero. */
|
|
|
|
peer->daemon->gossip_store_populated = true;
|
2022-01-24 21:07:52 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ connectd's input handler is very simple. */
|
2018-11-05 02:16:48 +01:00
|
|
|
static struct io_plan *connectd_req(struct io_conn *conn,
|
|
|
|
const u8 *msg,
|
|
|
|
struct daemon *daemon)
|
2017-03-15 11:36:52 +01:00
|
|
|
{
|
2020-08-25 04:16:22 +02:00
|
|
|
enum connectd_gossipd_wire t = fromwire_peektype(msg);
|
2017-03-15 13:46:29 +01:00
|
|
|
|
2022-01-24 21:07:52 +01:00
|
|
|
switch (t) {
|
|
|
|
case WIRE_GOSSIPD_RECV_GOSSIP:
|
|
|
|
handle_recv_gossip(daemon, msg);
|
|
|
|
goto handled;
|
|
|
|
|
|
|
|
case WIRE_GOSSIPD_NEW_PEER:
|
2022-01-29 04:33:05 +01:00
|
|
|
connectd_new_peer(daemon, msg);
|
|
|
|
goto handled;
|
|
|
|
|
|
|
|
case WIRE_GOSSIPD_PEER_GONE:
|
|
|
|
connectd_peer_gone(daemon, msg);
|
|
|
|
goto handled;
|
|
|
|
|
2022-01-24 21:07:52 +01:00
|
|
|
/* We send these, don't receive them. */
|
|
|
|
case WIRE_GOSSIPD_SEND_GOSSIP:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Bad msg from connectd2: %s", tal_hex(tmpctx, msg));
|
|
|
|
|
|
|
|
handled:
|
2022-01-29 04:33:05 +01:00
|
|
|
return daemon_conn_read_next(conn, daemon->connectd);
|
2022-01-24 21:07:52 +01:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* A node:
|
2022-09-14 04:46:34 +02:00
|
|
|
* - if the `timestamp` of the latest `channel_update` in
|
|
|
|
* either direction is older than two weeks (1209600 seconds):
|
2018-11-21 01:36:08 +01:00
|
|
|
* - MAY prune the channel.
|
|
|
|
* - MAY ignore the channel.
|
|
|
|
*/
|
2018-11-05 02:16:48 +01:00
|
|
|
static void gossip_refresh_network(struct daemon *daemon)
|
|
|
|
{
|
2019-09-18 03:05:05 +02:00
|
|
|
/* Schedule next run now */
|
2018-11-21 23:39:31 +01:00
|
|
|
notleak(new_reltimer(&daemon->timers, daemon,
|
2024-01-31 05:29:33 +01:00
|
|
|
time_from_sec(GOSSIP_PRUNE_INTERVAL(daemon->dev_fast_gossip_prune)/4),
|
2018-11-21 23:39:31 +01:00
|
|
|
gossip_refresh_network, daemon));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2024-01-31 04:16:18 +01:00
|
|
|
/* Prune: I hope lightningd is keeping up with our own channel
|
|
|
|
* refreshes! */
|
2018-11-05 02:16:48 +01:00
|
|
|
route_prune(daemon->rstate);
|
|
|
|
}
|
|
|
|
|
2024-01-31 05:24:33 +01:00
|
|
|
void tell_lightningd_peer_update(struct daemon *daemon,
|
|
|
|
const struct node_id *source_peer,
|
|
|
|
struct short_channel_id scid,
|
|
|
|
u32 fee_base_msat,
|
|
|
|
u32 fee_ppm,
|
|
|
|
u16 cltv_delta,
|
|
|
|
struct amount_msat htlc_minimum,
|
|
|
|
struct amount_msat htlc_maximum)
|
|
|
|
{
|
|
|
|
struct peer_update remote_update;
|
|
|
|
u8* msg;
|
|
|
|
remote_update.scid = scid;
|
|
|
|
remote_update.fee_base = fee_base_msat;
|
|
|
|
remote_update.fee_ppm = fee_ppm;
|
|
|
|
remote_update.cltv_delta = cltv_delta;
|
|
|
|
remote_update.htlc_minimum_msat = htlc_minimum;
|
|
|
|
remote_update.htlc_maximum_msat = htlc_maximum;
|
|
|
|
msg = towire_gossipd_remote_channel_update(NULL, source_peer, &remote_update);
|
|
|
|
daemon_conn_send(daemon->master, take(msg));
|
|
|
|
}
|
|
|
|
|
2023-07-28 07:36:18 +02:00
|
|
|
static void tell_master_local_cupdates(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
struct node *me;
|
|
|
|
|
|
|
|
me = get_node(daemon->rstate, &daemon->id);
|
|
|
|
if (!me)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (c = first_chan(me, &i); c; c = next_chan(me, &i)) {
|
|
|
|
struct half_chan *hc;
|
|
|
|
int direction;
|
|
|
|
const u8 *cupdate;
|
|
|
|
|
|
|
|
if (!local_direction(daemon->rstate, c, &direction))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hc = &c->half[direction];
|
|
|
|
if (!is_halfchan_defined(hc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cupdate = gossip_store_get(tmpctx,
|
2024-01-31 05:32:33 +01:00
|
|
|
daemon->gs,
|
2023-07-28 07:36:18 +02:00
|
|
|
hc->bcast.index);
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossipd_init_cupdate(NULL,
|
|
|
|
&c->scid,
|
|
|
|
cupdate)));
|
|
|
|
}
|
2024-01-31 04:16:18 +01:00
|
|
|
|
|
|
|
/* Tell lightningd about our current node_announcement, if any */
|
|
|
|
if (me->bcast.index) {
|
|
|
|
const u8 *nannounce;
|
|
|
|
nannounce = gossip_store_get(tmpctx,
|
2024-01-31 05:32:33 +01:00
|
|
|
daemon->gs,
|
2024-01-31 04:16:18 +01:00
|
|
|
me->bcast.index);
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossipd_init_nannounce(NULL,
|
|
|
|
nannounce)));
|
|
|
|
}
|
2023-07-28 07:36:18 +02:00
|
|
|
}
|
|
|
|
|
2023-06-29 21:23:17 +02:00
|
|
|
struct peer *first_random_peer(struct daemon *daemon,
|
|
|
|
struct peer_node_id_map_iter *it)
|
2019-06-12 01:29:12 +02:00
|
|
|
{
|
2023-06-29 21:23:17 +02:00
|
|
|
return peer_node_id_map_pick(daemon->peers, pseudorand_u64(), it);
|
|
|
|
}
|
2019-06-12 01:29:12 +02:00
|
|
|
|
2023-06-29 21:23:17 +02:00
|
|
|
struct peer *next_random_peer(struct daemon *daemon,
|
|
|
|
const struct peer *first,
|
|
|
|
struct peer_node_id_map_iter *it)
|
|
|
|
{
|
|
|
|
struct peer *p;
|
2019-10-08 03:10:24 +02:00
|
|
|
|
2023-06-29 21:23:17 +02:00
|
|
|
p = peer_node_id_map_next(daemon->peers, it);
|
|
|
|
if (!p)
|
|
|
|
p = peer_node_id_map_first(daemon->peers, it);
|
2019-10-08 03:10:24 +02:00
|
|
|
|
2023-06-29 21:23:17 +02:00
|
|
|
/* Full circle? */
|
|
|
|
if (p == first)
|
|
|
|
return NULL;
|
|
|
|
return p;
|
2019-06-12 01:29:12 +02:00
|
|
|
}
|
|
|
|
|
2022-03-23 00:01:36 +01:00
|
|
|
/* This is called when lightningd or connectd closes its connection to
|
|
|
|
* us. We simply exit. */
|
|
|
|
static void master_or_connectd_gone(struct daemon_conn *dc UNUSED)
|
|
|
|
{
|
|
|
|
daemon_shutdown();
|
|
|
|
/* Can't tell master, it's gone. */
|
|
|
|
exit(2);
|
|
|
|
}
|
|
|
|
|
2024-01-31 05:29:33 +01:00
|
|
|
struct timeabs gossip_time_now(const struct daemon *daemon)
|
|
|
|
{
|
|
|
|
if (daemon->dev_gossip_time)
|
|
|
|
return *daemon->dev_gossip_time;
|
|
|
|
|
|
|
|
return time_now();
|
|
|
|
}
|
|
|
|
|
2024-01-31 05:30:33 +01:00
|
|
|
/* We don't check this when loading from the gossip_store: that would break
|
|
|
|
* our canned tests, and usually old gossip is better than no gossip */
|
|
|
|
bool timestamp_reasonable(const struct daemon *daemon, u32 timestamp)
|
|
|
|
{
|
|
|
|
u64 now = gossip_time_now(daemon).ts.tv_sec;
|
|
|
|
|
|
|
|
/* More than one day ahead? */
|
|
|
|
if (timestamp > now + 24*60*60)
|
|
|
|
return false;
|
|
|
|
/* More than 2 weeks behind? */
|
|
|
|
if (timestamp < now - GOSSIP_PRUNE_INTERVAL(daemon->dev_fast_gossip_prune))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ Parse init message from lightningd: starts the daemon properly. */
|
2021-12-29 04:26:43 +01:00
|
|
|
static void gossip_init(struct daemon *daemon, const u8 *msg)
|
2018-11-05 02:16:48 +01:00
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
u32 *dev_gossip_time;
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2020-08-25 04:05:45 +02:00
|
|
|
if (!fromwire_gossipd_init(daemon, msg,
|
2019-09-25 22:38:45 +02:00
|
|
|
&chainparams,
|
2020-04-03 02:03:59 +02:00
|
|
|
&daemon->our_features,
|
2019-10-04 03:14:00 +02:00
|
|
|
&daemon->id,
|
2019-09-26 04:00:20 +02:00
|
|
|
&dev_gossip_time,
|
2024-01-31 05:29:33 +01:00
|
|
|
&daemon->dev_fast_gossip,
|
|
|
|
&daemon->dev_fast_gossip_prune)) {
|
2020-08-25 04:05:45 +02:00
|
|
|
master_badmsg(WIRE_GOSSIPD_INIT, msg);
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
2024-01-31 05:29:33 +01:00
|
|
|
if (dev_gossip_time) {
|
|
|
|
assert(daemon->developer);
|
|
|
|
daemon->dev_gossip_time = tal(daemon, struct timeabs);
|
|
|
|
daemon->dev_gossip_time->ts.tv_sec = *dev_gossip_time;
|
|
|
|
daemon->dev_gossip_time->ts.tv_nsec = 0;
|
|
|
|
tal_free(dev_gossip_time);
|
|
|
|
}
|
|
|
|
|
2024-01-31 05:32:33 +01:00
|
|
|
daemon->gs = gossip_store_new(daemon);
|
2024-01-31 05:29:33 +01:00
|
|
|
daemon->rstate = new_routing_state(daemon, daemon);
|
2019-04-08 01:52:06 +02:00
|
|
|
|
2024-01-31 06:32:25 +01:00
|
|
|
daemon->gm = gossmap_manage_new_gossmap_only(daemon, daemon);
|
|
|
|
|
2024-01-31 05:31:33 +01:00
|
|
|
/* Load stored gossip messages (FIXME: API sucks)*/
|
|
|
|
daemon->gossip_store_populated =
|
2024-01-31 05:32:33 +01:00
|
|
|
(gossip_store_load(daemon->gs) != 0);
|
2019-10-08 03:30:24 +02:00
|
|
|
|
2019-09-18 03:05:05 +02:00
|
|
|
/* Start the twice- weekly refresh timer. */
|
2018-11-21 23:39:31 +01:00
|
|
|
notleak(new_reltimer(&daemon->timers, daemon,
|
2024-01-31 05:29:33 +01:00
|
|
|
time_from_sec(GOSSIP_PRUNE_INTERVAL(daemon->dev_fast_gossip_prune) / 4),
|
2018-11-21 23:39:31 +01:00
|
|
|
gossip_refresh_network, daemon));
|
2018-11-05 02:16:48 +01:00
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
/* Fire up the seeker! */
|
2019-10-08 03:30:24 +02:00
|
|
|
daemon->seeker = new_seeker(daemon);
|
2019-10-08 03:16:24 +02:00
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
/* connectd is already started, and uses this fd to feed/recv gossip. */
|
2021-04-16 06:31:26 +02:00
|
|
|
daemon->connectd = daemon_conn_new(daemon, CONNECTD_FD,
|
2022-01-24 21:09:52 +01:00
|
|
|
connectd_req,
|
|
|
|
maybe_send_query_responses, daemon);
|
2022-03-23 00:01:36 +01:00
|
|
|
tal_add_destructor(daemon->connectd, master_or_connectd_gone);
|
2021-04-16 06:31:26 +02:00
|
|
|
|
2023-07-28 07:36:18 +02:00
|
|
|
/* Tell it about all our local (public) channel_update messages,
|
2024-01-31 04:16:18 +01:00
|
|
|
* and node_announcement, so it doesn't unnecessarily regenerate them. */
|
2023-07-28 07:36:18 +02:00
|
|
|
tell_master_local_cupdates(daemon);
|
|
|
|
|
2021-06-14 23:07:39 +02:00
|
|
|
/* OK, we are ready. */
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossipd_init_reply(NULL)));
|
2018-11-05 02:16:48 +01:00
|
|
|
}
|
|
|
|
|
2021-12-29 04:26:43 +01:00
|
|
|
static void new_blockheight(struct daemon *daemon, const u8 *msg)
|
2019-09-22 04:06:43 +02:00
|
|
|
{
|
2020-08-25 04:05:45 +02:00
|
|
|
if (!fromwire_gossipd_new_blockheight(msg, &daemon->current_blockheight))
|
|
|
|
master_badmsg(WIRE_GOSSIPD_NEW_BLOCKHEIGHT, msg);
|
2019-09-22 04:21:19 +02:00
|
|
|
|
|
|
|
/* Check if we can now send any deferred queries. */
|
|
|
|
for (size_t i = 0; i < tal_count(daemon->deferred_txouts); i++) {
|
|
|
|
const struct short_channel_id *scid
|
|
|
|
= &daemon->deferred_txouts[i];
|
|
|
|
|
|
|
|
if (!is_scid_depth_announceable(scid,
|
|
|
|
daemon->current_blockheight))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* short_channel_id is deep enough, now ask about it. */
|
|
|
|
daemon_conn_send(daemon->master,
|
2020-08-25 04:05:45 +02:00
|
|
|
take(towire_gossipd_get_txout(NULL, scid)));
|
2019-09-22 04:21:19 +02:00
|
|
|
|
|
|
|
tal_arr_remove(&daemon->deferred_txouts, i);
|
|
|
|
i--;
|
|
|
|
}
|
2022-01-08 14:21:29 +01:00
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
routing_expire_channels(daemon->rstate, daemon->current_blockheight);
|
|
|
|
|
2022-01-08 14:21:29 +01:00
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossipd_new_blockheight_reply(NULL)));
|
2019-09-22 04:06:43 +02:00
|
|
|
}
|
|
|
|
|
2021-12-29 04:26:43 +01:00
|
|
|
static void dev_gossip_memleak(struct daemon *daemon, const u8 *msg)
|
2018-11-21 23:41:49 +01:00
|
|
|
{
|
|
|
|
struct htable *memtable;
|
|
|
|
bool found_leak;
|
|
|
|
|
2022-09-16 05:15:03 +02:00
|
|
|
memtable = memleak_start(tmpctx);
|
|
|
|
memleak_ptr(memtable, msg);
|
2018-11-21 23:41:49 +01:00
|
|
|
/* Now delete daemon and those which it has pointers to. */
|
2022-09-16 05:14:39 +02:00
|
|
|
memleak_scan_obj(memtable, daemon);
|
2023-06-29 21:23:17 +02:00
|
|
|
memleak_scan_htable(memtable, &daemon->peers->raw);
|
2024-01-31 04:16:20 +01:00
|
|
|
dev_seeker_memleak(memtable, daemon->seeker);
|
2018-11-21 23:41:49 +01:00
|
|
|
|
2023-10-03 04:28:55 +02:00
|
|
|
found_leak = dump_memleak(memtable, memleak_status_broken, NULL);
|
2018-11-21 23:41:49 +01:00
|
|
|
daemon_conn_send(daemon->master,
|
2020-08-25 04:05:45 +02:00
|
|
|
take(towire_gossipd_dev_memleak_reply(NULL,
|
2018-11-21 23:41:49 +01:00
|
|
|
found_leak)));
|
|
|
|
}
|
2019-04-08 01:52:19 +02:00
|
|
|
|
2021-12-29 04:26:43 +01:00
|
|
|
static void dev_gossip_set_time(struct daemon *daemon, const u8 *msg)
|
2019-09-12 02:24:00 +02:00
|
|
|
{
|
|
|
|
u32 time;
|
|
|
|
|
2020-08-25 04:05:45 +02:00
|
|
|
if (!fromwire_gossipd_dev_set_time(msg, &time))
|
|
|
|
master_badmsg(WIRE_GOSSIPD_DEV_SET_TIME, msg);
|
2024-01-31 05:29:33 +01:00
|
|
|
if (!daemon->dev_gossip_time)
|
|
|
|
daemon->dev_gossip_time = tal(daemon, struct timeabs);
|
|
|
|
daemon->dev_gossip_time->ts.tv_sec = time;
|
|
|
|
daemon->dev_gossip_time->ts.tv_nsec = 0;
|
2019-09-12 02:24:00 +02:00
|
|
|
}
|
2018-06-04 06:22:25 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ We queue incoming channel_announcement pending confirmation from lightningd
|
|
|
|
* that it really is an unspent output. Here's its reply. */
|
2021-12-29 04:26:43 +01:00
|
|
|
static void handle_txout_reply(struct daemon *daemon, const u8 *msg)
|
2018-01-04 12:40:58 +01:00
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
|
|
|
u8 *outscript;
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_sat sat;
|
2019-10-08 03:18:24 +02:00
|
|
|
bool good;
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2020-08-25 04:05:45 +02:00
|
|
|
if (!fromwire_gossipd_get_txout_reply(msg, msg, &scid, &sat, &outscript))
|
|
|
|
master_badmsg(WIRE_GOSSIPD_GET_TXOUT_REPLY, msg);
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Outscript is NULL if it's not an unspent output */
|
2019-10-08 03:18:24 +02:00
|
|
|
good = handle_pending_cannouncement(daemon, daemon->rstate,
|
|
|
|
&scid, sat, outscript);
|
|
|
|
|
|
|
|
/* If we looking specifically for this, we no longer are. */
|
|
|
|
remove_unknown_scid(daemon->seeker, &scid, good);
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
|
|
|
|
2021-02-02 06:16:20 +01:00
|
|
|
/*~ lightningd tells us when about a gossip message directly, when told to by
|
|
|
|
* the addgossip RPC call. That's usually used when a plugin gets an update
|
|
|
|
* returned in an payment error. */
|
2021-12-29 04:26:43 +01:00
|
|
|
static void inject_gossip(struct daemon *daemon, const u8 *msg)
|
2021-02-02 06:16:20 +01:00
|
|
|
{
|
|
|
|
u8 *goss;
|
|
|
|
const u8 *errmsg;
|
|
|
|
const char *err;
|
2024-01-31 05:33:12 +01:00
|
|
|
struct amount_sat *known_amount;
|
2021-02-02 06:16:20 +01:00
|
|
|
|
2024-01-31 05:33:12 +01:00
|
|
|
if (!fromwire_gossipd_addgossip(msg, msg, &goss, &known_amount))
|
2021-02-02 06:16:20 +01:00
|
|
|
master_badmsg(WIRE_GOSSIPD_ADDGOSSIP, msg);
|
|
|
|
|
|
|
|
switch (fromwire_peektype(goss)) {
|
|
|
|
case WIRE_CHANNEL_ANNOUNCEMENT:
|
|
|
|
errmsg = handle_channel_announcement_msg(daemon, NULL, goss);
|
|
|
|
break;
|
|
|
|
case WIRE_NODE_ANNOUNCEMENT:
|
|
|
|
errmsg = handle_node_announcement(daemon->rstate, goss,
|
|
|
|
NULL, NULL);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNEL_UPDATE:
|
|
|
|
errmsg = handle_channel_update(daemon->rstate, goss,
|
|
|
|
NULL, NULL, true);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = tal_fmt(tmpctx, "unknown gossip type %i",
|
|
|
|
fromwire_peektype(goss));
|
|
|
|
goto err_extracted;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The APIs above are designed to send error messages back to peers:
|
|
|
|
* we extract the raw string instead. */
|
|
|
|
if (errmsg) {
|
|
|
|
err = sanitize_error(tmpctx, errmsg, NULL);
|
|
|
|
tal_free(errmsg);
|
|
|
|
} else
|
|
|
|
/* Send empty string if no error. */
|
|
|
|
err = "";
|
|
|
|
|
|
|
|
err_extracted:
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_gossipd_addgossip_reply(NULL, err)));
|
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This is where lightningd tells us that a channel's funding transaction has
|
|
|
|
* been spent. */
|
2022-09-14 05:50:32 +02:00
|
|
|
static void handle_outpoints_spent(struct daemon *daemon, const u8 *msg)
|
2018-03-26 20:10:03 +02:00
|
|
|
{
|
2022-09-14 05:50:32 +02:00
|
|
|
struct short_channel_id *scids;
|
|
|
|
u32 blockheight;
|
2018-03-28 12:14:01 +02:00
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
if (!fromwire_gossipd_outpoints_spent(msg, msg, &blockheight, &scids))
|
|
|
|
master_badmsg(WIRE_GOSSIPD_OUTPOINTS_SPENT, msg);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(scids); i++) {
|
2022-09-14 05:50:32 +02:00
|
|
|
struct chan *chan = get_channel(daemon->rstate, &scids[i]);
|
2022-09-14 05:50:32 +02:00
|
|
|
|
|
|
|
if (!chan)
|
|
|
|
continue;
|
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
/* We have a current_blockheight, but it's not necessarily
|
|
|
|
* updated first. */
|
|
|
|
routing_channel_spent(daemon->rstate, blockheight, chan);
|
2018-03-28 12:14:01 +02:00
|
|
|
}
|
2018-03-26 20:10:03 +02:00
|
|
|
}
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/*~ This routine handles all the commands from lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
static struct io_plan *recv_req(struct io_conn *conn,
|
|
|
|
const u8 *msg,
|
|
|
|
struct daemon *daemon)
|
2017-01-10 06:08:33 +01:00
|
|
|
{
|
2020-08-25 04:05:45 +02:00
|
|
|
enum gossipd_wire t = fromwire_peektype(msg);
|
2017-01-10 06:08:33 +01:00
|
|
|
|
|
|
|
switch (t) {
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_INIT:
|
2021-12-29 04:26:43 +01:00
|
|
|
gossip_init(daemon, msg);
|
|
|
|
goto done;
|
2017-04-24 14:31:26 +02:00
|
|
|
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_GET_TXOUT_REPLY:
|
2021-12-29 04:26:43 +01:00
|
|
|
handle_txout_reply(daemon, msg);
|
|
|
|
goto done;
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
case WIRE_GOSSIPD_OUTPOINTS_SPENT:
|
|
|
|
handle_outpoints_spent(daemon, msg);
|
2021-12-29 04:26:43 +01:00
|
|
|
goto done;
|
2018-05-26 15:19:24 +02:00
|
|
|
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_NEW_BLOCKHEIGHT:
|
2021-12-29 04:26:43 +01:00
|
|
|
new_blockheight(daemon, msg);
|
|
|
|
goto done;
|
2019-09-22 04:06:43 +02:00
|
|
|
|
2021-02-02 06:16:20 +01:00
|
|
|
case WIRE_GOSSIPD_ADDGOSSIP:
|
2021-12-29 04:26:43 +01:00
|
|
|
inject_gossip(daemon, msg);
|
|
|
|
goto done;
|
2021-02-02 06:16:20 +01:00
|
|
|
|
2022-01-11 02:15:48 +01:00
|
|
|
case WIRE_GOSSIPD_GET_ADDRS:
|
|
|
|
return handle_get_address(conn, daemon, msg);
|
|
|
|
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_DEV_SET_MAX_SCIDS_ENCODE_SIZE:
|
2023-09-21 07:36:27 +02:00
|
|
|
if (daemon->developer) {
|
|
|
|
dev_set_max_scids_encode_size(daemon, msg);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/* fall thru */
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_DEV_MEMLEAK:
|
2023-09-21 07:36:27 +02:00
|
|
|
if (daemon->developer) {
|
|
|
|
dev_gossip_memleak(daemon, msg);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/* fall thru */
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_DEV_SET_TIME:
|
2023-09-21 07:36:27 +02:00
|
|
|
if (daemon->developer) {
|
|
|
|
dev_gossip_set_time(daemon, msg);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/* fall thru */
|
2018-06-04 06:22:25 +02:00
|
|
|
|
2017-12-15 15:16:42 +01:00
|
|
|
/* We send these, we don't receive them */
|
2023-07-28 07:36:18 +02:00
|
|
|
case WIRE_GOSSIPD_INIT_CUPDATE:
|
2024-01-31 04:16:18 +01:00
|
|
|
case WIRE_GOSSIPD_INIT_NANNOUNCE:
|
2021-06-14 23:07:39 +02:00
|
|
|
case WIRE_GOSSIPD_INIT_REPLY:
|
2020-08-25 04:05:45 +02:00
|
|
|
case WIRE_GOSSIPD_GET_TXOUT:
|
|
|
|
case WIRE_GOSSIPD_DEV_MEMLEAK_REPLY:
|
2021-02-02 06:16:20 +01:00
|
|
|
case WIRE_GOSSIPD_ADDGOSSIP_REPLY:
|
2022-01-08 14:21:29 +01:00
|
|
|
case WIRE_GOSSIPD_NEW_BLOCKHEIGHT_REPLY:
|
2022-01-11 02:15:48 +01:00
|
|
|
case WIRE_GOSSIPD_GET_ADDRS_REPLY:
|
2023-12-06 21:14:05 +01:00
|
|
|
case WIRE_GOSSIPD_REMOTE_CHANNEL_UPDATE:
|
2017-12-15 15:16:42 +01:00
|
|
|
break;
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
|
|
|
|
2017-09-12 06:55:52 +02:00
|
|
|
/* Master shouldn't give bad requests. */
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "%i: %s",
|
2018-10-25 01:43:05 +02:00
|
|
|
t, tal_hex(tmpctx, msg));
|
2021-12-29 04:26:43 +01:00
|
|
|
|
|
|
|
done:
|
|
|
|
return daemon_conn_read_next(conn, daemon->master);
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char *argv[])
|
|
|
|
{
|
|
|
|
struct daemon *daemon;
|
2023-09-21 07:36:27 +02:00
|
|
|
bool developer;
|
|
|
|
|
|
|
|
setup_locale();
|
2017-01-10 06:08:33 +01:00
|
|
|
|
2023-09-21 07:36:27 +02:00
|
|
|
developer = subdaemon_setup(argc, argv);
|
2017-01-10 06:08:33 +01:00
|
|
|
|
|
|
|
daemon = tal(NULL, struct daemon);
|
2023-09-21 07:36:27 +02:00
|
|
|
daemon->developer = developer;
|
2024-01-31 05:29:33 +01:00
|
|
|
daemon->dev_gossip_time = NULL;
|
2023-06-29 21:23:17 +02:00
|
|
|
daemon->peers = tal(daemon, struct peer_node_id_map);
|
|
|
|
peer_node_id_map_init(daemon->peers);
|
2019-09-22 04:21:19 +02:00
|
|
|
daemon->deferred_txouts = tal_arr(daemon, struct short_channel_id, 0);
|
2019-09-22 04:06:43 +02:00
|
|
|
daemon->current_blockheight = 0; /* i.e. unknown */
|
2018-11-21 01:36:08 +01:00
|
|
|
|
2021-01-07 19:42:47 +01:00
|
|
|
/* Tell the ecdh() function how to talk to hsmd */
|
|
|
|
ecdh_hsmd_setup(HSM_FD, status_failed);
|
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Note the use of time_mono() here. That's a monotonic clock, which
|
|
|
|
* is really useful: it can only be used to measure relative events
|
|
|
|
* (there's no correspondence to time-since-Ken-grew-a-beard or
|
|
|
|
* anything), but unlike time_now(), this will never jump backwards by
|
|
|
|
* half a second and leave me wondering how my tests failed CI! */
|
2017-02-04 16:28:35 +01:00
|
|
|
timers_init(&daemon->timers, time_mono());
|
2018-07-24 08:18:58 +02:00
|
|
|
|
2018-11-21 01:36:08 +01:00
|
|
|
/* Our daemons always use STDIN for commands from lightningd. */
|
2018-10-25 01:43:05 +02:00
|
|
|
daemon->master = daemon_conn_new(daemon, STDIN_FILENO,
|
|
|
|
recv_req, NULL, daemon);
|
2022-03-23 00:01:36 +01:00
|
|
|
tal_add_destructor(daemon->master, master_or_connectd_gone);
|
2018-10-24 03:39:31 +02:00
|
|
|
|
2018-10-25 01:43:05 +02:00
|
|
|
status_setup_async(daemon->master);
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/* This loop never exits. io_loop() only returns if a timer has
|
|
|
|
* expired, or io_break() is called, or all fds are closed. We don't
|
|
|
|
* use io_break and closing the lightningd fd calls master_gone()
|
|
|
|
* which exits. */
|
2017-02-04 16:28:35 +01:00
|
|
|
for (;;) {
|
|
|
|
struct timer *expired = NULL;
|
|
|
|
io_loop(&daemon->timers, &expired);
|
|
|
|
|
2021-12-06 15:45:50 +01:00
|
|
|
timer_expired(expired);
|
2017-02-04 16:28:35 +01:00
|
|
|
}
|
2017-01-10 06:08:33 +01:00
|
|
|
}
|
2018-11-21 01:36:08 +01:00
|
|
|
|
|
|
|
/*~ Note that the actual routing stuff is in routing.c; you might want to
|
|
|
|
* check that out later.
|
|
|
|
*
|
2019-09-01 22:14:50 +02:00
|
|
|
* But that's the last of the global daemons. We now move on to the first of
|
2018-11-21 01:36:08 +01:00
|
|
|
* the per-peer daemons: openingd/openingd.c.
|
|
|
|
*/
|