2019-10-08 03:15:24 +02:00
|
|
|
/* This contains the code which actively seeks out gossip from peers */
|
|
|
|
#include <bitcoin/short_channel_id.h>
|
2019-10-08 03:29:24 +02:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2019-10-15 02:07:29 +02:00
|
|
|
#include <ccan/asort/asort.h>
|
2019-10-08 03:15:24 +02:00
|
|
|
#include <ccan/list/list.h>
|
2019-10-08 03:20:24 +02:00
|
|
|
#include <ccan/mem/mem.h>
|
2019-10-08 03:15:24 +02:00
|
|
|
#include <ccan/tal/tal.h>
|
2019-10-08 03:21:24 +02:00
|
|
|
#include <common/decode_array.h>
|
2019-10-08 03:19:24 +02:00
|
|
|
#include <common/pseudorand.h>
|
2019-10-08 03:15:24 +02:00
|
|
|
#include <common/status.h>
|
|
|
|
#include <common/timeout.h>
|
|
|
|
#include <common/type_to_string.h>
|
|
|
|
#include <gossipd/gossipd.h>
|
|
|
|
#include <gossipd/queries.h>
|
2019-10-08 03:16:24 +02:00
|
|
|
#include <gossipd/routing.h>
|
2019-10-08 03:15:24 +02:00
|
|
|
#include <gossipd/seeker.h>
|
2019-10-08 03:16:24 +02:00
|
|
|
#include <wire/gen_peer_wire.h>
|
|
|
|
|
|
|
|
#define GOSSIP_SEEKER_INTERVAL(seeker) \
|
|
|
|
DEV_FAST_GOSSIP((seeker)->daemon->rstate->dev_fast_gossip, 5, 60)
|
|
|
|
|
|
|
|
enum seeker_state {
|
|
|
|
/* Still streaming gossip from single peer. */
|
2019-10-08 03:26:24 +02:00
|
|
|
STARTING_UP,
|
2019-10-08 03:17:24 +02:00
|
|
|
|
|
|
|
/* Probing: checking our startup really is finished. */
|
|
|
|
PROBING_SCIDS,
|
|
|
|
|
2019-10-08 03:20:24 +02:00
|
|
|
/* Probing: check that we have node_announcements. */
|
|
|
|
PROBING_NANNOUNCES,
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
/* Normal running. */
|
|
|
|
NORMAL,
|
2019-10-08 03:19:24 +02:00
|
|
|
|
|
|
|
/* Asking a peer for unknown scids. */
|
|
|
|
ASKING_FOR_UNKNOWN_SCIDS,
|
2019-10-08 03:25:24 +02:00
|
|
|
|
|
|
|
/* Asking a peer for stale scids. */
|
|
|
|
ASKING_FOR_STALE_SCIDS,
|
2019-10-08 03:16:24 +02:00
|
|
|
};
|
2019-10-08 03:15:24 +02:00
|
|
|
|
2019-10-08 03:34:24 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
bool dev_suppress_gossip;
|
|
|
|
#endif
|
|
|
|
|
2019-10-08 03:15:24 +02:00
|
|
|
/* Gossip we're seeking at the moment. */
|
|
|
|
struct seeker {
|
2019-10-08 03:16:24 +02:00
|
|
|
struct daemon *daemon;
|
|
|
|
|
|
|
|
enum seeker_state state;
|
|
|
|
|
|
|
|
/* Timer which checks on progress every minute */
|
|
|
|
struct oneshot *check_timer;
|
2019-10-08 03:15:24 +02:00
|
|
|
|
2019-10-10 08:01:59 +02:00
|
|
|
/* Channels we've heard about, but don't know (by scid). */
|
|
|
|
UINTMAP(bool) unknown_scids;
|
2019-10-08 03:16:24 +02:00
|
|
|
|
2019-10-10 08:01:59 +02:00
|
|
|
/* Channels we've heard about newer timestamps for (by scid). u8 is
|
|
|
|
* query_flags. */
|
|
|
|
UINTMAP(u8 *) stale_scids;
|
2019-10-08 03:24:24 +02:00
|
|
|
|
2019-10-08 03:17:24 +02:00
|
|
|
/* Range of scid blocks we've probed. */
|
|
|
|
size_t scid_probe_start, scid_probe_end;
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
/* During startup, we ask a single peer for gossip. */
|
|
|
|
struct peer *random_peer_softref;
|
|
|
|
|
2019-10-08 03:19:24 +02:00
|
|
|
/* This checks progress of our random peer */
|
2019-10-08 03:16:24 +02:00
|
|
|
size_t prev_gossip_count;
|
2019-10-08 03:20:24 +02:00
|
|
|
|
|
|
|
/* Array of scids for node announcements. */
|
|
|
|
struct short_channel_id *nannounce_scids;
|
2019-10-08 03:21:24 +02:00
|
|
|
u8 *nannounce_query_flags;
|
2019-10-08 03:29:24 +02:00
|
|
|
|
2019-10-08 03:32:24 +02:00
|
|
|
/* Are there any node_ids we didn't know? Implies we're
|
|
|
|
* missing channels. */
|
|
|
|
bool unknown_nodes;
|
|
|
|
|
2019-10-08 03:29:24 +02:00
|
|
|
/* Peers we've asked to stream us gossip */
|
2019-10-15 02:08:33 +02:00
|
|
|
struct peer *gossiper_softref[5];
|
2019-10-08 03:31:24 +02:00
|
|
|
|
|
|
|
/* A peer that told us about unknown gossip. */
|
|
|
|
struct peer *preferred_peer_softref;
|
|
|
|
|
2019-10-08 03:15:24 +02:00
|
|
|
};
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
/* Mutual recursion */
|
|
|
|
static void seeker_check(struct seeker *seeker);
|
2019-10-08 03:26:24 +02:00
|
|
|
static void probe_some_random_scids(struct seeker *seeker);
|
2019-10-08 03:19:24 +02:00
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
static void begin_check_timer(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
const u32 polltime = GOSSIP_SEEKER_INTERVAL(seeker);
|
|
|
|
|
|
|
|
seeker->check_timer = new_reltimer(&seeker->daemon->timers,
|
|
|
|
seeker,
|
|
|
|
time_from_sec(polltime),
|
|
|
|
seeker_check, seeker);
|
|
|
|
}
|
|
|
|
|
2019-10-10 02:34:36 +02:00
|
|
|
/* Set this peer as our random peer; return false if NULL. */
|
|
|
|
static bool selected_peer(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
|
|
|
if (!peer)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
set_softref(seeker, &seeker->random_peer_softref, peer);
|
|
|
|
|
|
|
|
/* Give it some grace in case we immediately hit timer */
|
|
|
|
seeker->prev_gossip_count
|
|
|
|
= peer->gossip_counter - GOSSIP_SEEKER_INTERVAL(seeker);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define set_state(seeker, state, peer, ...) \
|
|
|
|
set_state_((seeker), (state), (peer), stringify(state), __VA_ARGS__)
|
2019-10-08 03:19:24 +02:00
|
|
|
|
2019-10-10 02:34:36 +02:00
|
|
|
static void PRINTF_FMT(5,6)
|
|
|
|
set_state_(struct seeker *seeker, enum seeker_state state,
|
|
|
|
struct peer *peer,
|
|
|
|
const char *statename, const char *fmt, ...)
|
2019-10-08 03:19:24 +02:00
|
|
|
{
|
2019-10-10 02:34:36 +02:00
|
|
|
va_list ap;
|
|
|
|
va_start(ap, fmt);
|
|
|
|
status_debug("seeker: state = %s %s%s %s",
|
|
|
|
statename, peer ? "from " : "",
|
|
|
|
peer ? type_to_string(tmpctx, struct node_id, &peer->id)
|
|
|
|
: "",
|
|
|
|
tal_vfmt(tmpctx, fmt, ap));
|
|
|
|
va_end(ap);
|
2019-10-08 03:19:24 +02:00
|
|
|
seeker->state = state;
|
2019-10-10 02:34:36 +02:00
|
|
|
selected_peer(seeker, peer);
|
2019-10-08 03:19:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:30:24 +02:00
|
|
|
struct seeker *new_seeker(struct daemon *daemon)
|
2019-10-08 03:15:24 +02:00
|
|
|
{
|
|
|
|
struct seeker *seeker = tal(daemon, struct seeker);
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
seeker->daemon = daemon;
|
2019-10-10 08:01:59 +02:00
|
|
|
uintmap_init(&seeker->unknown_scids);
|
|
|
|
uintmap_init(&seeker->stale_scids);
|
2019-10-08 03:26:24 +02:00
|
|
|
seeker->random_peer_softref = NULL;
|
2019-10-08 03:29:24 +02:00
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(seeker->gossiper_softref); i++)
|
|
|
|
seeker->gossiper_softref[i] = NULL;
|
2019-10-08 03:31:24 +02:00
|
|
|
seeker->preferred_peer_softref = NULL;
|
2019-10-08 03:32:24 +02:00
|
|
|
seeker->unknown_nodes = false;
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, STARTING_UP, NULL, "New seeker");
|
2019-10-08 03:16:24 +02:00
|
|
|
begin_check_timer(seeker);
|
2019-10-08 03:15:24 +02:00
|
|
|
return seeker;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:31:24 +02:00
|
|
|
static void set_preferred_peer(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
|
|
|
if (seeker->preferred_peer_softref
|
|
|
|
&& seeker->preferred_peer_softref != peer) {
|
|
|
|
clear_softref(seeker, &seeker->preferred_peer_softref);
|
|
|
|
set_softref(seeker, &seeker->preferred_peer_softref, peer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get a random peer, but try our preferred peer first, if any. This
|
|
|
|
* biasses us to the peer that told us of unexpected gossip. */
|
|
|
|
static struct peer *random_seeker(struct seeker *seeker,
|
|
|
|
bool (*check_peer)(const struct peer *peer))
|
|
|
|
{
|
|
|
|
struct peer *peer = seeker->preferred_peer_softref;
|
|
|
|
|
2019-10-08 03:32:24 +02:00
|
|
|
/* 80% chance of immediately choosing a peer who reported the missing
|
|
|
|
* stuff: they presumably can tell us more about it. We don't
|
|
|
|
* *always* choose it because it could be simply spamming us with
|
|
|
|
* invalid announcements to get chosen, and we don't handle that case
|
|
|
|
* well yet. */
|
|
|
|
if (peer && check_peer(peer) && pseudorand(5) != 0) {
|
2019-10-08 03:31:24 +02:00
|
|
|
clear_softref(seeker, &seeker->random_peer_softref);
|
|
|
|
return peer;
|
|
|
|
}
|
|
|
|
|
|
|
|
return random_peer(seeker->daemon, check_peer);
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
static bool peer_made_progress(struct seeker *seeker)
|
2019-10-08 03:15:24 +02:00
|
|
|
{
|
2019-10-08 03:16:24 +02:00
|
|
|
const struct peer *peer = seeker->random_peer_softref;
|
|
|
|
|
|
|
|
/* Has it made progress (at least one valid update per second)? If
|
|
|
|
* not, we assume it's finished, and if it hasn't, we'll end up
|
|
|
|
* querying backwards in next steps. */
|
|
|
|
if (peer->gossip_counter
|
|
|
|
>= seeker->prev_gossip_count + GOSSIP_SEEKER_INTERVAL(seeker)) {
|
|
|
|
seeker->prev_gossip_count = peer->gossip_counter;
|
|
|
|
return true;
|
2019-10-08 03:15:24 +02:00
|
|
|
}
|
2019-10-08 03:16:24 +02:00
|
|
|
|
|
|
|
return false;
|
2019-10-08 03:15:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:29:24 +02:00
|
|
|
static void disable_gossip_stream(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
|
|
|
|
status_debug("seeker: disabling gossip from %s",
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id));
|
|
|
|
|
|
|
|
/* This is allowed even if they don't understand it (odd) */
|
|
|
|
msg = towire_gossip_timestamp_filter(NULL,
|
|
|
|
&seeker->daemon->chain_hash,
|
|
|
|
UINT32_MAX,
|
|
|
|
UINT32_MAX);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enable_gossip_stream(struct seeker *seeker, struct peer *peer)
|
2019-10-08 03:15:24 +02:00
|
|
|
{
|
2019-10-08 03:30:24 +02:00
|
|
|
/* We seek some way back, to take into account propagation time */
|
|
|
|
const u32 polltime = GOSSIP_SEEKER_INTERVAL(seeker) * 10;
|
|
|
|
u32 start = seeker->daemon->rstate->last_timestamp;
|
2019-10-08 03:16:24 +02:00
|
|
|
u8 *msg;
|
|
|
|
|
2019-10-08 03:34:24 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
if (dev_suppress_gossip)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
2019-10-08 03:30:24 +02:00
|
|
|
if (start > polltime)
|
|
|
|
start -= polltime;
|
|
|
|
else
|
|
|
|
start = 0;
|
2019-10-08 03:16:24 +02:00
|
|
|
|
2019-10-08 03:29:24 +02:00
|
|
|
status_debug("seeker: starting gossip from %s",
|
2019-10-08 03:16:24 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id));
|
|
|
|
|
|
|
|
/* This is allowed even if they don't understand it (odd) */
|
|
|
|
msg = towire_gossip_timestamp_filter(NULL,
|
|
|
|
&seeker->daemon->chain_hash,
|
|
|
|
start,
|
|
|
|
UINT32_MAX);
|
|
|
|
queue_peer_msg(peer, take(msg));
|
2019-10-08 03:15:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:29:24 +02:00
|
|
|
static void normal_gossip_start(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
|
|
|
bool enable_stream = false;
|
|
|
|
|
|
|
|
/* Make this one of our streaming gossipers if we aren't full */
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(seeker->gossiper_softref); i++) {
|
|
|
|
if (seeker->gossiper_softref[i] == NULL) {
|
|
|
|
set_softref(seeker, &seeker->gossiper_softref[i], peer);
|
|
|
|
enable_stream = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enable_stream)
|
|
|
|
enable_gossip_stream(seeker, peer);
|
|
|
|
else
|
|
|
|
disable_gossip_stream(seeker, peer);
|
|
|
|
}
|
|
|
|
|
2019-10-08 08:59:46 +02:00
|
|
|
/* Turn unknown_scids map into a flat array, removes from map. */
|
|
|
|
static struct short_channel_id *unknown_scids_remove(const tal_t *ctx,
|
|
|
|
struct seeker *seeker)
|
2019-10-08 03:18:24 +02:00
|
|
|
{
|
2019-10-10 08:01:59 +02:00
|
|
|
struct short_channel_id *scids;
|
2019-10-08 03:18:24 +02:00
|
|
|
/* Marshal into an array: we can fit 8000 comfortably. */
|
2019-10-10 08:01:59 +02:00
|
|
|
size_t i, max = 8000;
|
|
|
|
u64 scid;
|
2019-10-08 03:18:24 +02:00
|
|
|
|
|
|
|
scids = tal_arr(ctx, struct short_channel_id, max);
|
2019-10-10 08:01:59 +02:00
|
|
|
i = 0;
|
|
|
|
while (uintmap_first(&seeker->unknown_scids, &scid)) {
|
|
|
|
scids[i].u64 = scid;
|
|
|
|
(void)uintmap_del(&seeker->unknown_scids, scid);
|
|
|
|
if (++i == max)
|
|
|
|
break;
|
2019-10-08 08:59:46 +02:00
|
|
|
}
|
2019-10-10 08:01:59 +02:00
|
|
|
tal_resize(&scids, i);
|
2019-10-08 03:18:24 +02:00
|
|
|
return scids;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
/* We have selected this peer to stream us startup gossip */
|
|
|
|
static void peer_gossip_startup(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
2019-10-10 02:34:36 +02:00
|
|
|
status_debug("seeker: startup peer is %s",
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id));
|
2019-10-08 03:16:24 +02:00
|
|
|
selected_peer(seeker, peer);
|
2019-10-08 03:30:24 +02:00
|
|
|
normal_gossip_start(seeker, peer);
|
2019-10-08 03:16:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool peer_has_gossip_queries(const struct peer *peer)
|
2019-10-08 03:15:24 +02:00
|
|
|
{
|
2019-10-08 03:16:24 +02:00
|
|
|
return peer->gossip_queries_feature;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:17:24 +02:00
|
|
|
static bool peer_can_take_range_query(const struct peer *peer)
|
|
|
|
{
|
|
|
|
return peer->gossip_queries_feature
|
|
|
|
&& !peer->query_channel_blocks;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:19:24 +02:00
|
|
|
static bool peer_can_take_scid_query(const struct peer *peer)
|
|
|
|
{
|
|
|
|
return peer->gossip_queries_feature
|
|
|
|
&& !peer->scid_query_outstanding;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void scid_query_done(struct peer *peer, bool complete)
|
|
|
|
{
|
|
|
|
struct seeker *seeker = peer->daemon->seeker;
|
|
|
|
|
|
|
|
/* Peer completed! OK, start random scid probe in case we're
|
|
|
|
* still missing gossip. */
|
2019-10-08 03:26:24 +02:00
|
|
|
probe_some_random_scids(seeker);
|
2019-10-08 03:19:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:25:24 +02:00
|
|
|
/* Returns true if there were scids to seek. */
|
|
|
|
static bool seek_any_unknown_scids(struct seeker *seeker)
|
2019-10-08 03:17:24 +02:00
|
|
|
{
|
2019-10-08 03:19:24 +02:00
|
|
|
struct peer *peer;
|
|
|
|
struct short_channel_id *scids;
|
|
|
|
|
|
|
|
/* Nothing we need to know about? */
|
2019-10-10 08:01:59 +02:00
|
|
|
if (uintmap_empty(&seeker->unknown_scids))
|
2019-10-08 03:25:24 +02:00
|
|
|
return false;
|
2019-10-08 03:19:24 +02:00
|
|
|
|
|
|
|
/* No peers can answer? Try again later. */
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_can_take_scid_query);
|
2019-10-08 03:19:24 +02:00
|
|
|
if (!peer)
|
2019-10-08 03:25:24 +02:00
|
|
|
return false;
|
2019-10-08 03:19:24 +02:00
|
|
|
|
2019-10-08 08:59:46 +02:00
|
|
|
scids = unknown_scids_remove(tmpctx, seeker);
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, ASKING_FOR_UNKNOWN_SCIDS, peer,
|
|
|
|
"Asking for %zu scids", tal_count(scids));
|
2019-10-08 03:21:24 +02:00
|
|
|
if (!query_short_channel_ids(seeker->daemon, peer, scids, NULL,
|
2019-10-08 03:19:24 +02:00
|
|
|
scid_query_done))
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"seeker: quering %zu scids is too many?",
|
|
|
|
tal_count(scids));
|
2019-10-08 03:25:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Turns stale_scid_map into two arrays, and removes from map */
|
|
|
|
static struct short_channel_id *stale_scids_remove(const tal_t *ctx,
|
|
|
|
struct seeker *seeker,
|
|
|
|
u8 **query_flags)
|
|
|
|
{
|
|
|
|
struct short_channel_id *scids;
|
2019-10-10 08:01:59 +02:00
|
|
|
const u8 *qf;
|
|
|
|
/* We can fit 7000 comfortably (8 byte scid, 1 byte flag). */
|
|
|
|
size_t i, max = 7000;
|
|
|
|
u64 scid;
|
2019-10-08 03:25:24 +02:00
|
|
|
|
|
|
|
scids = tal_arr(ctx, struct short_channel_id, max);
|
|
|
|
*query_flags = tal_arr(ctx, u8, max);
|
|
|
|
|
2019-10-10 08:01:59 +02:00
|
|
|
i = 0;
|
|
|
|
while ((qf = uintmap_first(&seeker->stale_scids, &scid)) != NULL) {
|
|
|
|
scids[i].u64 = scid;
|
|
|
|
(*query_flags)[i] = *qf;
|
|
|
|
uintmap_del(&seeker->stale_scids, scid);
|
|
|
|
tal_free(qf);
|
2019-10-15 07:26:14 +02:00
|
|
|
i++;
|
2019-10-08 03:25:24 +02:00
|
|
|
}
|
2019-10-10 08:01:59 +02:00
|
|
|
tal_resize(&scids, i);
|
|
|
|
tal_resize(query_flags, i);
|
2019-10-08 03:25:24 +02:00
|
|
|
return scids;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool seek_any_stale_scids(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct short_channel_id *scids;
|
|
|
|
u8 *query_flags;
|
|
|
|
|
|
|
|
/* Nothing we need to know about? */
|
2019-10-10 08:01:59 +02:00
|
|
|
if (uintmap_empty(&seeker->stale_scids))
|
2019-10-08 03:25:24 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* No peers can answer? Try again later. */
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_can_take_scid_query);
|
2019-10-08 03:25:24 +02:00
|
|
|
if (!peer)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* This is best-effort, so this consumes them as well. */
|
|
|
|
scids = stale_scids_remove(tmpctx, seeker, &query_flags);
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, ASKING_FOR_STALE_SCIDS, peer,
|
|
|
|
"Asking for %zu scids", tal_count(scids));
|
2019-10-08 03:25:24 +02:00
|
|
|
|
|
|
|
if (!query_short_channel_ids(seeker->daemon, peer, scids, query_flags,
|
|
|
|
scid_query_done))
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"seeker: quering %zu scids is too many?",
|
|
|
|
tal_count(scids));
|
|
|
|
return true;
|
2019-10-08 03:19:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:17:24 +02:00
|
|
|
/* Returns true and sets first_blocknum and number_of_blocks if
|
|
|
|
* there's more to find. */
|
|
|
|
static bool next_block_range(struct seeker *seeker,
|
|
|
|
u32 prev_num_blocks,
|
|
|
|
u32 *first_blocknum, u32 *number_of_blocks)
|
|
|
|
{
|
|
|
|
const u32 current_height = seeker->daemon->current_blockheight;
|
|
|
|
|
|
|
|
/* We always try to get twice as many as last time. */
|
|
|
|
*number_of_blocks = prev_num_blocks * 2;
|
|
|
|
|
|
|
|
if (seeker->scid_probe_start > 0) {
|
|
|
|
/* Enlarge probe to cover prior blocks, but twice as many. */
|
|
|
|
if (*number_of_blocks > seeker->scid_probe_start) {
|
|
|
|
*number_of_blocks = seeker->scid_probe_start;
|
|
|
|
*first_blocknum = 0;
|
|
|
|
} else {
|
|
|
|
*first_blocknum
|
|
|
|
= seeker->scid_probe_start - *number_of_blocks;
|
|
|
|
}
|
|
|
|
seeker->scid_probe_start = *first_blocknum;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We allow 6 new blocks since we started; they should be empty anyway */
|
|
|
|
if (seeker->scid_probe_end + 6 < current_height) {
|
|
|
|
if (seeker->scid_probe_end + *number_of_blocks > current_height)
|
|
|
|
*number_of_blocks
|
|
|
|
= current_height - seeker->scid_probe_end;
|
|
|
|
*first_blocknum = seeker->scid_probe_end + 1;
|
|
|
|
seeker->scid_probe_end = *first_blocknum + *number_of_blocks - 1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No more to find. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-10-15 02:07:29 +02:00
|
|
|
static int cmp_scid(const struct short_channel_id *a,
|
|
|
|
const struct short_channel_id *b,
|
|
|
|
void *unused)
|
|
|
|
{
|
|
|
|
if (a->u64 > b->u64)
|
|
|
|
return 1;
|
|
|
|
else if (a->u64 < b->u64)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-10 02:34:36 +02:00
|
|
|
/* We can't ask for channels by node_id, so probe at random */
|
2019-10-08 03:21:24 +02:00
|
|
|
static bool get_unannounced_nodes(const tal_t *ctx,
|
|
|
|
struct routing_state *rstate,
|
|
|
|
size_t max,
|
|
|
|
struct short_channel_id **scids,
|
|
|
|
u8 **query_flags)
|
2019-10-08 03:20:24 +02:00
|
|
|
{
|
2019-10-10 08:01:59 +02:00
|
|
|
size_t num = 0;
|
2019-10-15 02:07:29 +02:00
|
|
|
u64 offset;
|
|
|
|
u64 threshold = pseudorand_u64();
|
2019-10-08 03:20:24 +02:00
|
|
|
|
|
|
|
/* Pick an example short_channel_id at random to query. As a
|
2019-10-10 08:01:59 +02:00
|
|
|
* side-effect this gets the node. */
|
2019-10-08 03:21:24 +02:00
|
|
|
*scids = tal_arr(ctx, struct short_channel_id, max);
|
2019-10-08 03:20:24 +02:00
|
|
|
|
2019-10-15 02:07:29 +02:00
|
|
|
/* FIXME: This is inefficient! Reuse next_block_range here! */
|
|
|
|
for (struct chan *c = uintmap_first(&rstate->chanmap, &offset);
|
2019-10-10 08:01:59 +02:00
|
|
|
c;
|
2019-10-15 02:07:29 +02:00
|
|
|
c = uintmap_after(&rstate->chanmap, &offset)) {
|
2019-10-10 08:01:59 +02:00
|
|
|
/* Local-only? Don't ask. */
|
|
|
|
if (!is_chan_public(c))
|
2019-10-08 03:20:24 +02:00
|
|
|
continue;
|
|
|
|
|
2019-10-15 02:07:29 +02:00
|
|
|
if (c->nodes[0]->bcast.index && c->nodes[1]->bcast.index)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (num < max) {
|
|
|
|
(*scids)[num++] = c->scid;
|
|
|
|
} else {
|
|
|
|
/* Maybe replace one: approx. reservoir sampling */
|
|
|
|
u64 p = pseudorand_u64();
|
|
|
|
if (p > threshold) {
|
|
|
|
(*scids)[pseudorand(max)] = c->scid;
|
|
|
|
threshold = p;
|
|
|
|
}
|
2019-10-08 03:20:24 +02:00
|
|
|
}
|
|
|
|
}
|
2019-10-08 03:21:24 +02:00
|
|
|
|
|
|
|
if (num == 0) {
|
|
|
|
*scids = tal_free(*scids);
|
|
|
|
return false;
|
|
|
|
}
|
2019-10-10 08:01:59 +02:00
|
|
|
|
2019-10-15 02:07:29 +02:00
|
|
|
if (num < max)
|
2019-10-08 03:21:24 +02:00
|
|
|
tal_resize(scids, num);
|
2019-10-10 08:01:59 +02:00
|
|
|
|
2019-10-15 02:07:29 +02:00
|
|
|
/* Sort them into order. */
|
|
|
|
asort(*scids, num, cmp_scid, NULL);
|
|
|
|
|
|
|
|
/* Now get flags. */
|
|
|
|
*query_flags = tal_arr(ctx, u8, num);
|
|
|
|
for (size_t i = 0; i < tal_count(*scids); i++) {
|
|
|
|
struct chan *c = get_channel(rstate, &(*scids)[i]);
|
|
|
|
|
|
|
|
(*query_flags)[i] = 0;
|
|
|
|
if (!c->nodes[0]->bcast.index)
|
|
|
|
(*query_flags)[i] |= SCID_QF_NODE1;
|
|
|
|
if (!c->nodes[1]->bcast.index)
|
|
|
|
(*query_flags)[i] |= SCID_QF_NODE2;
|
|
|
|
}
|
2019-10-08 03:21:24 +02:00
|
|
|
return true;
|
2019-10-08 03:20:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Mutual recursion */
|
|
|
|
static void peer_gossip_probe_nannounces(struct seeker *seeker);
|
|
|
|
|
|
|
|
static void nodeannounce_query_done(struct peer *peer, bool complete)
|
|
|
|
{
|
|
|
|
struct seeker *seeker = peer->daemon->seeker;
|
|
|
|
struct routing_state *rstate = seeker->daemon->rstate;
|
|
|
|
size_t new_nannounce = 0, num_scids;
|
|
|
|
|
2019-10-08 03:27:24 +02:00
|
|
|
/* We might have given up on them, then they replied. */
|
2019-10-10 02:34:36 +02:00
|
|
|
if (seeker->random_peer_softref != peer) {
|
|
|
|
status_debug("seeker: belated reply from %s: ignoring",
|
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id));
|
2019-10-08 03:27:24 +02:00
|
|
|
return;
|
2019-10-10 02:34:36 +02:00
|
|
|
}
|
2019-10-08 03:27:24 +02:00
|
|
|
|
2019-10-08 03:20:24 +02:00
|
|
|
clear_softref(seeker, &seeker->random_peer_softref);
|
|
|
|
|
|
|
|
num_scids = tal_count(seeker->nannounce_scids);
|
|
|
|
for (size_t i = 0; i < num_scids; i++) {
|
|
|
|
struct chan *c = get_channel(rstate,
|
|
|
|
&seeker->nannounce_scids[i]);
|
|
|
|
/* Could have closed since we asked. */
|
|
|
|
if (!c)
|
|
|
|
continue;
|
2019-10-08 03:21:24 +02:00
|
|
|
if ((seeker->nannounce_query_flags[i] & SCID_QF_NODE1)
|
|
|
|
&& c->nodes[0]->bcast.index)
|
|
|
|
new_nannounce++;
|
|
|
|
if ((seeker->nannounce_query_flags[i] & SCID_QF_NODE2)
|
|
|
|
&& c->nodes[1]->bcast.index)
|
2019-10-08 03:20:24 +02:00
|
|
|
new_nannounce++;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_debug("seeker: found %zu new node_announcements in %zu scids",
|
|
|
|
new_nannounce, num_scids);
|
|
|
|
|
|
|
|
seeker->nannounce_scids = tal_free(seeker->nannounce_scids);
|
2019-10-08 03:21:24 +02:00
|
|
|
seeker->nannounce_query_flags = tal_free(seeker->nannounce_query_flags);
|
2019-10-08 03:20:24 +02:00
|
|
|
|
|
|
|
if (!new_nannounce) {
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, NORMAL, NULL,
|
|
|
|
"No new node_announcements in %zu scids", num_scids);
|
2019-10-08 03:20:24 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:31:24 +02:00
|
|
|
/* Since they told us about new announcements, keep asking them. */
|
|
|
|
set_preferred_peer(seeker, peer);
|
|
|
|
|
2019-10-08 03:20:24 +02:00
|
|
|
/* Double every time. We may skip a few, of course, since map
|
|
|
|
* is changing. */
|
|
|
|
num_scids *= 2;
|
2019-10-08 03:21:24 +02:00
|
|
|
/* Don't try to create a query larger than 64k */
|
|
|
|
if (num_scids > 7000)
|
|
|
|
num_scids = 7000;
|
|
|
|
|
2019-10-10 08:01:59 +02:00
|
|
|
if (!get_unannounced_nodes(seeker, seeker->daemon->rstate, num_scids,
|
2019-10-08 03:21:24 +02:00
|
|
|
&seeker->nannounce_scids,
|
|
|
|
&seeker->nannounce_query_flags)) {
|
|
|
|
/* Nothing unknown at all? Great, we're done */
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, NORMAL, NULL, "No unannounced nodes");
|
2019-10-08 03:20:24 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
peer_gossip_probe_nannounces(seeker);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pick a peer, ask it for a few node announcements, to check. */
|
|
|
|
static void peer_gossip_probe_nannounces(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_can_take_scid_query);
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, PROBING_NANNOUNCES, peer,
|
2019-10-15 02:07:29 +02:00
|
|
|
"Probing for %zu scids",
|
|
|
|
tal_count(seeker->nannounce_scids));
|
2019-10-08 03:20:24 +02:00
|
|
|
if (!peer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!query_short_channel_ids(seeker->daemon, peer,
|
|
|
|
seeker->nannounce_scids,
|
2019-10-08 03:21:24 +02:00
|
|
|
seeker->nannounce_query_flags,
|
2019-10-08 03:20:24 +02:00
|
|
|
nodeannounce_query_done))
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"seeker: quering %zu scids is too many?",
|
|
|
|
tal_count(seeker->nannounce_scids));
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:24:24 +02:00
|
|
|
/* They have update with this timestamp: do we want it? */
|
2019-10-08 03:28:24 +02:00
|
|
|
static bool want_update(struct seeker *seeker,
|
|
|
|
u32 timestamp, const struct half_chan *hc)
|
2019-10-08 03:24:24 +02:00
|
|
|
{
|
|
|
|
if (!is_halfchan_defined(hc))
|
|
|
|
return timestamp != 0;
|
|
|
|
|
2019-10-08 03:28:24 +02:00
|
|
|
if (timestamp <= hc->bcast.timestamp)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return !would_ratelimit_cupdate(seeker->daemon->rstate, hc, timestamp);
|
2019-10-08 03:24:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:28:24 +02:00
|
|
|
/* They gave us timestamps. Do we want updated versions? */
|
2019-10-08 03:24:24 +02:00
|
|
|
static void check_timestamps(struct seeker *seeker,
|
|
|
|
struct chan *c,
|
2019-10-08 03:31:24 +02:00
|
|
|
const struct channel_update_timestamps *ts,
|
|
|
|
struct peer *peer)
|
2019-10-08 03:24:24 +02:00
|
|
|
{
|
2019-10-10 08:01:59 +02:00
|
|
|
u8 *stale;
|
2019-10-08 03:24:24 +02:00
|
|
|
u8 query_flag = 0;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
* * `timestamp_node_id_1` is the timestamp of the `channel_update`
|
|
|
|
* for `node_id_1`, or 0 if there was no `channel_update` from that
|
|
|
|
* node.
|
|
|
|
* * `timestamp_node_id_2` is the timestamp of the `channel_update`
|
|
|
|
* for `node_id_2`, or 0 if there was no `channel_update` from that
|
|
|
|
* node.
|
|
|
|
*/
|
2019-10-08 03:28:24 +02:00
|
|
|
if (want_update(seeker, ts->timestamp_node_id_1, &c->half[0]))
|
2019-10-08 03:24:24 +02:00
|
|
|
query_flag |= SCID_QF_UPDATE1;
|
2019-10-08 03:28:24 +02:00
|
|
|
if (want_update(seeker, ts->timestamp_node_id_2, &c->half[1]))
|
2019-10-08 03:24:24 +02:00
|
|
|
query_flag |= SCID_QF_UPDATE2;
|
|
|
|
|
|
|
|
if (!query_flag)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Add in flags if we're already getting it. */
|
2019-10-10 08:01:59 +02:00
|
|
|
stale = uintmap_get(&seeker->stale_scids, c->scid.u64);
|
|
|
|
if (!stale) {
|
|
|
|
stale = talz(seeker, u8);
|
|
|
|
uintmap_add(&seeker->stale_scids, c->scid.u64, stale);
|
2019-10-08 03:31:24 +02:00
|
|
|
set_preferred_peer(seeker, peer);
|
2019-10-08 03:24:24 +02:00
|
|
|
}
|
2019-10-10 08:01:59 +02:00
|
|
|
*stale |= query_flag;
|
2019-10-08 03:24:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:17:24 +02:00
|
|
|
static void process_scid_probe(struct peer *peer,
|
|
|
|
u32 first_blocknum, u32 number_of_blocks,
|
|
|
|
const struct short_channel_id *scids,
|
2019-10-08 03:23:24 +02:00
|
|
|
const struct channel_update_timestamps *ts,
|
2019-10-08 03:17:24 +02:00
|
|
|
bool complete)
|
|
|
|
{
|
|
|
|
struct seeker *seeker = peer->daemon->seeker;
|
|
|
|
bool new_unknown_scids = false;
|
|
|
|
|
2019-10-08 03:27:24 +02:00
|
|
|
/* We might have given up on them, then they replied. */
|
2019-10-08 03:19:24 +02:00
|
|
|
if (seeker->random_peer_softref != peer)
|
2019-10-08 03:27:24 +02:00
|
|
|
return;
|
|
|
|
|
2019-10-08 03:17:24 +02:00
|
|
|
clear_softref(seeker, &seeker->random_peer_softref);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(scids); i++) {
|
|
|
|
struct chan *c = get_channel(seeker->daemon->rstate, &scids[i]);
|
2019-10-08 03:24:24 +02:00
|
|
|
if (c) {
|
|
|
|
if (ts)
|
2019-10-08 03:31:24 +02:00
|
|
|
check_timestamps(seeker, c, ts+i, peer);
|
2019-10-08 03:17:24 +02:00
|
|
|
continue;
|
2019-10-08 03:24:24 +02:00
|
|
|
}
|
2019-10-08 03:17:24 +02:00
|
|
|
|
2019-10-08 03:31:24 +02:00
|
|
|
new_unknown_scids |= add_unknown_scid(seeker, &scids[i], peer);
|
2019-10-08 03:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No new unknown scids, or no more to ask? We give some wiggle
|
|
|
|
* room in case blocks came in since we started. */
|
|
|
|
if (new_unknown_scids
|
|
|
|
&& next_block_range(seeker, number_of_blocks,
|
|
|
|
&first_blocknum, &number_of_blocks)) {
|
|
|
|
/* This must return a peer, since we have the current peer! */
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_can_take_range_query);
|
2019-10-08 03:17:24 +02:00
|
|
|
assert(peer);
|
|
|
|
selected_peer(seeker, peer);
|
|
|
|
|
|
|
|
query_channel_range(seeker->daemon, peer,
|
|
|
|
first_blocknum, number_of_blocks,
|
2019-10-08 03:22:24 +02:00
|
|
|
QUERY_ADD_TIMESTAMPS,
|
2019-10-08 03:17:24 +02:00
|
|
|
process_scid_probe);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-15 02:07:29 +02:00
|
|
|
/* Channel probe finished, try asking for 128 unannounced nodes. */
|
|
|
|
if (!get_unannounced_nodes(seeker, seeker->daemon->rstate, 128,
|
2019-10-08 03:21:24 +02:00
|
|
|
&seeker->nannounce_scids,
|
|
|
|
&seeker->nannounce_query_flags)) {
|
|
|
|
/* No unknown nodes. Great! */
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, NORMAL, NULL, "No unannounced nodes");
|
2019-10-08 03:21:24 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:20:24 +02:00
|
|
|
peer_gossip_probe_nannounces(seeker);
|
2019-10-08 03:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Pick a peer, ask it for a few scids, to check. */
|
|
|
|
static void peer_gossip_probe_scids(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_can_take_range_query);
|
2019-10-10 02:34:36 +02:00
|
|
|
set_state(seeker, PROBING_SCIDS, peer,
|
|
|
|
"Seeking scids %zu - %zu",
|
|
|
|
seeker->scid_probe_start, seeker->scid_probe_end);
|
2019-10-08 03:17:24 +02:00
|
|
|
if (!peer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* This calls process_scid_probe when we get the reply. */
|
|
|
|
query_channel_range(seeker->daemon, peer,
|
|
|
|
seeker->scid_probe_start,
|
|
|
|
seeker->scid_probe_end - seeker->scid_probe_start + 1,
|
2019-10-08 03:22:24 +02:00
|
|
|
QUERY_ADD_TIMESTAMPS,
|
2019-10-08 03:17:24 +02:00
|
|
|
process_scid_probe);
|
2019-10-08 03:19:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:26:24 +02:00
|
|
|
static void probe_random_scids(struct seeker *seeker, size_t num_blocks)
|
2019-10-08 03:19:24 +02:00
|
|
|
{
|
2019-10-18 12:10:50 +02:00
|
|
|
u32 avail_blocks;
|
|
|
|
|
|
|
|
/* Ignore early blocks (unless we're before, which would be weird) */
|
|
|
|
if (seeker->daemon->current_blockheight
|
|
|
|
< chainparams->when_lightning_became_cool)
|
|
|
|
avail_blocks = seeker->daemon->current_blockheight;
|
|
|
|
else
|
|
|
|
avail_blocks = seeker->daemon->current_blockheight
|
|
|
|
- chainparams->when_lightning_became_cool;
|
|
|
|
|
|
|
|
if (avail_blocks < num_blocks) {
|
2019-10-08 03:19:24 +02:00
|
|
|
seeker->scid_probe_start = 0;
|
|
|
|
seeker->scid_probe_end = seeker->daemon->current_blockheight;
|
|
|
|
} else {
|
|
|
|
seeker->scid_probe_start
|
2019-10-18 12:10:50 +02:00
|
|
|
= chainparams->when_lightning_became_cool
|
|
|
|
+ pseudorand(avail_blocks - num_blocks);
|
2019-10-08 03:19:24 +02:00
|
|
|
seeker->scid_probe_end
|
|
|
|
= seeker->scid_probe_start + num_blocks - 1;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:20:24 +02:00
|
|
|
seeker->nannounce_scids = NULL;
|
2019-10-08 03:19:24 +02:00
|
|
|
peer_gossip_probe_scids(seeker);
|
2019-10-08 03:17:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:26:24 +02:00
|
|
|
/* We usually get a channel per block, so these cover a fair bit of ground */
|
|
|
|
static void probe_some_random_scids(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
return probe_random_scids(seeker, 64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void probe_many_random_scids(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
return probe_random_scids(seeker, 1008);
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
static void check_firstpeer(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
struct peer *peer = seeker->random_peer_softref, *p;
|
|
|
|
|
|
|
|
/* It might have died, pick another. */
|
|
|
|
if (!peer) {
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_has_gossip_queries);
|
2019-10-08 03:16:24 +02:00
|
|
|
/* No peer? Wait for a new one to join. */
|
|
|
|
if (!peer) {
|
|
|
|
status_debug("seeker: no peers, waiting");
|
|
|
|
return;
|
2019-10-08 03:15:24 +02:00
|
|
|
}
|
2019-10-08 03:16:24 +02:00
|
|
|
|
|
|
|
peer_gossip_startup(seeker, peer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If no progress, we assume it's finished, and if it hasn't,
|
|
|
|
* we'll end up querying backwards in next steps. */
|
|
|
|
if (peer_made_progress(seeker))
|
|
|
|
return;
|
|
|
|
|
2019-10-08 03:17:24 +02:00
|
|
|
/* Other peers can gossip now. */
|
2019-10-08 03:16:24 +02:00
|
|
|
status_debug("seeker: startup peer finished");
|
|
|
|
clear_softref(seeker, &seeker->random_peer_softref);
|
|
|
|
list_for_each(&seeker->daemon->peers, p, list) {
|
|
|
|
if (p == peer)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
normal_gossip_start(seeker, p);
|
|
|
|
}
|
2019-10-08 03:17:24 +02:00
|
|
|
|
2019-10-18 12:12:54 +02:00
|
|
|
/* Ask a random peer for all channels, in case we're missing */
|
|
|
|
seeker->scid_probe_start = chainparams->when_lightning_became_cool;
|
2019-10-08 03:17:24 +02:00
|
|
|
seeker->scid_probe_end = seeker->daemon->current_blockheight;
|
2019-10-18 12:12:54 +02:00
|
|
|
if (seeker->scid_probe_start > seeker->scid_probe_end)
|
|
|
|
seeker->scid_probe_start = 0;
|
2019-10-08 03:17:24 +02:00
|
|
|
peer_gossip_probe_scids(seeker);
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:26:24 +02:00
|
|
|
static void check_probe(struct seeker *seeker,
|
|
|
|
void (*restart)(struct seeker *seeker))
|
2019-10-08 03:17:24 +02:00
|
|
|
{
|
|
|
|
struct peer *peer = seeker->random_peer_softref;
|
|
|
|
|
|
|
|
/* It might have died, pick another. */
|
|
|
|
if (!peer) {
|
2019-10-08 03:26:24 +02:00
|
|
|
restart(seeker);
|
2019-10-08 03:20:24 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is peer making progress with responses? */
|
|
|
|
if (peer_made_progress(seeker))
|
|
|
|
return;
|
|
|
|
|
2019-10-08 03:27:24 +02:00
|
|
|
status_debug("Peer %s has only moved gossip %zu->%zu for probe, giving up on it",
|
2019-10-08 03:26:24 +02:00
|
|
|
type_to_string(tmpctx, struct node_id, &peer->id),
|
|
|
|
seeker->prev_gossip_count, peer->gossip_counter);
|
2019-10-08 03:27:24 +02:00
|
|
|
clear_softref(seeker, &seeker->random_peer_softref);
|
2019-10-08 03:26:24 +02:00
|
|
|
restart(seeker);
|
2019-10-08 03:25:24 +02:00
|
|
|
}
|
2019-10-08 03:20:24 +02:00
|
|
|
|
2019-10-08 03:29:24 +02:00
|
|
|
static bool peer_is_not_gossipper(const struct peer *peer)
|
|
|
|
{
|
|
|
|
const struct seeker *seeker = peer->daemon->seeker;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(seeker->gossiper_softref); i++) {
|
|
|
|
if (seeker->gossiper_softref[i] == peer)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: We should look at gossip performance and replace the underperforming
|
|
|
|
* peers in preference. */
|
|
|
|
static void maybe_rotate_gossipers(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* If all peers are gossiping, we're done */
|
2019-10-08 03:31:24 +02:00
|
|
|
peer = random_seeker(seeker, peer_is_not_gossipper);
|
2019-10-08 03:29:24 +02:00
|
|
|
if (!peer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* If we have a slot free, or ~ 1 per hour */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seeker->gossiper_softref); i++) {
|
2019-10-10 02:34:36 +02:00
|
|
|
if (!seeker->gossiper_softref[i]) {
|
|
|
|
status_debug("seeker: filling slot %zu with %s",
|
|
|
|
i, type_to_string(tmpctx, struct node_id,
|
|
|
|
&peer->id));
|
2019-10-08 03:29:24 +02:00
|
|
|
goto set_gossiper;
|
2019-10-10 02:34:36 +02:00
|
|
|
}
|
|
|
|
if (pseudorand(ARRAY_SIZE(seeker->gossiper_softref) * 60) == 0) {
|
|
|
|
status_debug("seeker: replacing slot %zu with %s",
|
|
|
|
i, type_to_string(tmpctx, struct node_id,
|
|
|
|
&peer->id));
|
2019-10-08 03:29:24 +02:00
|
|
|
goto clear_and_set_gossiper;
|
2019-10-10 02:34:36 +02:00
|
|
|
}
|
2019-10-08 03:29:24 +02:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
clear_and_set_gossiper:
|
|
|
|
disable_gossip_stream(seeker, seeker->gossiper_softref[i]);
|
|
|
|
clear_softref(seeker, &seeker->gossiper_softref[i]);
|
|
|
|
set_gossiper:
|
|
|
|
set_softref(seeker, &seeker->gossiper_softref[i], peer);
|
|
|
|
enable_gossip_stream(seeker, peer);
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:32:24 +02:00
|
|
|
static bool seek_any_unknown_nodes(struct seeker *seeker)
|
|
|
|
{
|
|
|
|
if (!seeker->unknown_nodes)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
seeker->unknown_nodes = false;
|
|
|
|
probe_many_random_scids(seeker);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
/* Periodic timer to see how our gossip is going. */
|
|
|
|
static void seeker_check(struct seeker *seeker)
|
|
|
|
{
|
2019-10-08 03:34:24 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
if (dev_suppress_gossip)
|
2019-10-10 03:28:39 +02:00
|
|
|
goto out;
|
2019-10-08 03:34:24 +02:00
|
|
|
#endif
|
|
|
|
|
2019-10-10 03:28:39 +02:00
|
|
|
/* We don't do anything until we're synced. */
|
|
|
|
if (seeker->daemon->current_blockheight == 0)
|
|
|
|
goto out;
|
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
switch (seeker->state) {
|
2019-10-08 03:26:24 +02:00
|
|
|
case STARTING_UP:
|
2019-10-08 03:16:24 +02:00
|
|
|
check_firstpeer(seeker);
|
|
|
|
break;
|
2019-10-08 03:17:24 +02:00
|
|
|
case PROBING_SCIDS:
|
2019-10-08 03:26:24 +02:00
|
|
|
check_probe(seeker, peer_gossip_probe_scids);
|
2019-10-08 03:17:24 +02:00
|
|
|
break;
|
2019-10-08 03:19:24 +02:00
|
|
|
case ASKING_FOR_UNKNOWN_SCIDS:
|
2019-10-08 03:26:24 +02:00
|
|
|
check_probe(seeker, probe_many_random_scids);
|
2019-10-08 03:19:24 +02:00
|
|
|
break;
|
2019-10-08 03:25:24 +02:00
|
|
|
case ASKING_FOR_STALE_SCIDS:
|
2019-10-08 03:26:24 +02:00
|
|
|
check_probe(seeker, probe_some_random_scids);
|
2019-10-08 03:20:24 +02:00
|
|
|
break;
|
|
|
|
case PROBING_NANNOUNCES:
|
2019-10-08 03:26:24 +02:00
|
|
|
check_probe(seeker, peer_gossip_probe_nannounces);
|
2019-10-08 03:20:24 +02:00
|
|
|
break;
|
2019-10-08 03:16:24 +02:00
|
|
|
case NORMAL:
|
2019-10-08 03:29:24 +02:00
|
|
|
maybe_rotate_gossipers(seeker);
|
2019-10-08 03:32:24 +02:00
|
|
|
if (!seek_any_unknown_scids(seeker)
|
|
|
|
&& !seek_any_stale_scids(seeker))
|
|
|
|
seek_any_unknown_nodes(seeker);
|
2019-10-08 03:16:24 +02:00
|
|
|
break;
|
2019-10-08 03:15:24 +02:00
|
|
|
}
|
|
|
|
|
2019-10-10 03:28:39 +02:00
|
|
|
out:
|
2019-10-08 03:16:24 +02:00
|
|
|
begin_check_timer(seeker);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We get this when we have a new peer. */
|
|
|
|
void seeker_setup_peer_gossip(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
|
|
|
/* Can't do anything useful with these peers. */
|
|
|
|
if (!peer->gossip_queries_feature)
|
|
|
|
return;
|
|
|
|
|
2019-10-08 03:34:24 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
if (dev_suppress_gossip)
|
|
|
|
return;
|
|
|
|
#endif
|
2019-10-10 03:28:39 +02:00
|
|
|
/* Don't start gossiping until we're synced. */
|
|
|
|
if (seeker->daemon->current_blockheight == 0)
|
|
|
|
return;
|
2019-10-08 03:34:24 +02:00
|
|
|
|
2019-10-08 03:16:24 +02:00
|
|
|
switch (seeker->state) {
|
2019-10-08 03:26:24 +02:00
|
|
|
case STARTING_UP:
|
2019-10-10 02:35:36 +02:00
|
|
|
if (seeker->random_peer_softref == NULL)
|
2019-10-08 03:26:24 +02:00
|
|
|
peer_gossip_startup(seeker, peer);
|
2019-10-08 03:16:24 +02:00
|
|
|
/* Waiting for seeker_check to release us */
|
|
|
|
return;
|
2019-10-08 03:17:24 +02:00
|
|
|
|
|
|
|
/* In these states, we set up peers to stream gossip normally */
|
|
|
|
case PROBING_SCIDS:
|
2019-10-08 03:20:24 +02:00
|
|
|
case PROBING_NANNOUNCES:
|
2019-10-08 03:16:24 +02:00
|
|
|
case NORMAL:
|
2019-10-08 03:19:24 +02:00
|
|
|
case ASKING_FOR_UNKNOWN_SCIDS:
|
2019-10-08 03:25:24 +02:00
|
|
|
case ASKING_FOR_STALE_SCIDS:
|
2019-10-10 02:35:36 +02:00
|
|
|
normal_gossip_start(seeker, peer);
|
|
|
|
return;
|
2019-10-08 03:16:24 +02:00
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:15:24 +02:00
|
|
|
bool remove_unknown_scid(struct seeker *seeker,
|
2019-10-08 03:18:24 +02:00
|
|
|
const struct short_channel_id *scid,
|
|
|
|
bool found /*FIXME: use this info!*/)
|
2019-10-08 03:15:24 +02:00
|
|
|
{
|
2019-10-10 08:01:59 +02:00
|
|
|
return uintmap_del(&seeker->unknown_scids, scid->u64);
|
2019-10-08 03:15:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool add_unknown_scid(struct seeker *seeker,
|
2019-10-08 03:31:24 +02:00
|
|
|
const struct short_channel_id *scid,
|
|
|
|
struct peer *peer)
|
2019-10-08 03:15:24 +02:00
|
|
|
{
|
|
|
|
/* Check we're not already getting this one. */
|
2019-10-10 08:01:59 +02:00
|
|
|
if (!uintmap_add(&seeker->unknown_scids, scid->u64, true))
|
2019-10-08 03:18:24 +02:00
|
|
|
return false;
|
2019-10-08 03:15:24 +02:00
|
|
|
|
2019-10-08 03:31:24 +02:00
|
|
|
set_preferred_peer(seeker, peer);
|
2019-10-08 03:15:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This peer told us about an update to an unknown channel. Ask it for a
|
|
|
|
* channel_announcement. */
|
|
|
|
void query_unknown_channel(struct daemon *daemon,
|
|
|
|
struct peer *peer,
|
|
|
|
const struct short_channel_id *id)
|
|
|
|
{
|
|
|
|
/* Too many, or duplicate? */
|
2019-10-08 03:31:24 +02:00
|
|
|
if (!add_unknown_scid(daemon->seeker, id, peer))
|
2019-10-08 03:15:24 +02:00
|
|
|
return;
|
|
|
|
}
|
2019-10-08 03:32:24 +02:00
|
|
|
|
|
|
|
/* This peer told us about an unknown node. Start probing it. */
|
|
|
|
void query_unknown_node(struct seeker *seeker, struct peer *peer)
|
|
|
|
{
|
|
|
|
seeker->unknown_nodes = true;
|
|
|
|
set_preferred_peer(seeker, peer);
|
|
|
|
}
|