2021-12-04 12:23:56 +01:00
|
|
|
#include "config.h"
|
2020-05-16 03:29:05 +02:00
|
|
|
#include <bitcoin/chainparams.h>
|
2018-01-04 12:40:58 +01:00
|
|
|
#include <bitcoin/script.h>
|
2016-06-28 23:19:21 +02:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2017-01-23 13:55:55 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2022-12-16 19:38:23 +01:00
|
|
|
#include <common/gossip_store.h>
|
2018-11-21 23:41:49 +01:00
|
|
|
#include <common/memleak.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/pseudorand.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/status.h>
|
2019-09-27 02:04:34 +02:00
|
|
|
#include <common/timeout.h>
|
2017-08-28 18:03:01 +02:00
|
|
|
#include <common/type_to_string.h>
|
2018-03-08 05:10:26 +01:00
|
|
|
#include <common/wire_error.h>
|
2019-09-18 03:05:14 +02:00
|
|
|
#include <gossipd/gossip_generation.h>
|
2020-08-25 04:05:45 +02:00
|
|
|
#include <gossipd/gossip_store_wiregen.h>
|
|
|
|
#include <gossipd/gossipd_wiregen.h>
|
2021-12-04 12:23:56 +01:00
|
|
|
#include <gossipd/routing.h>
|
2016-06-28 23:19:21 +02:00
|
|
|
|
2017-12-18 05:15:31 +01:00
|
|
|
#ifndef SUPERVERBOSE
|
|
|
|
#define SUPERVERBOSE(...)
|
|
|
|
#endif
|
|
|
|
|
2016-09-06 09:17:48 +02:00
|
|
|
/* 365.25 * 24 * 60 / 10 */
|
|
|
|
#define BLOCKS_PER_YEAR 52596
|
|
|
|
|
2018-02-02 19:49:12 +01:00
|
|
|
struct pending_node_announce {
|
2019-04-10 09:31:29 +02:00
|
|
|
struct routing_state *rstate;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id nodeid;
|
2019-04-10 09:31:29 +02:00
|
|
|
size_t refcount;
|
2018-02-02 19:49:12 +01:00
|
|
|
u8 *node_announcement;
|
|
|
|
u32 timestamp;
|
2019-05-29 06:35:43 +02:00
|
|
|
u32 index;
|
2023-07-06 09:35:54 +02:00
|
|
|
/* If non-NULL this is peer to credit it with */
|
|
|
|
struct node_id *source_peer;
|
2018-02-02 19:49:12 +01:00
|
|
|
};
|
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
/* As per the below BOLT #7 quote, we delay forgetting a channel until 12
|
|
|
|
* blocks after we see it close. This gives time for splicing (or even other
|
|
|
|
* opens) to replace the channel, and broadcast it after 6 blocks. */
|
|
|
|
struct dying_channel {
|
|
|
|
struct short_channel_id scid;
|
|
|
|
u32 deadline_blockheight;
|
|
|
|
/* Where the dying_channel marker is in the store. */
|
|
|
|
struct broadcastable marker;
|
|
|
|
};
|
|
|
|
|
2022-03-04 01:48:18 +01:00
|
|
|
/* We consider a reasonable gossip rate to be 2 per day, with burst of
|
2019-09-16 12:44:00 +02:00
|
|
|
* 4 per day. So we use a granularity of one hour. */
|
2022-03-04 01:48:18 +01:00
|
|
|
#define TOKENS_PER_MSG 12
|
|
|
|
#define TOKEN_MAX (12 * 4)
|
2019-09-16 12:44:00 +02:00
|
|
|
|
2019-10-08 03:28:24 +02:00
|
|
|
static u8 update_tokens(const struct routing_state *rstate,
|
|
|
|
u8 tokens, u32 prev_timestamp, u32 new_timestamp)
|
2019-09-16 12:44:00 +02:00
|
|
|
{
|
2019-10-08 03:28:24 +02:00
|
|
|
u64 num_tokens = tokens;
|
2019-09-16 12:44:00 +02:00
|
|
|
|
|
|
|
assert(new_timestamp >= prev_timestamp);
|
|
|
|
|
2019-10-08 03:28:24 +02:00
|
|
|
num_tokens += ((new_timestamp - prev_timestamp)
|
|
|
|
/ GOSSIP_TOKEN_TIME(rstate->dev_fast_gossip));
|
2019-09-16 12:44:00 +02:00
|
|
|
if (num_tokens > TOKEN_MAX)
|
|
|
|
num_tokens = TOKEN_MAX;
|
2019-10-08 03:28:24 +02:00
|
|
|
return num_tokens;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ratelimit(const struct routing_state *rstate,
|
|
|
|
u8 *tokens, u32 prev_timestamp, u32 new_timestamp)
|
|
|
|
{
|
|
|
|
*tokens = update_tokens(rstate, *tokens, prev_timestamp, new_timestamp);
|
2019-09-16 12:44:00 +02:00
|
|
|
|
|
|
|
/* Now, if we can afford it, pass this message. */
|
|
|
|
if (*tokens >= TOKENS_PER_MSG) {
|
|
|
|
*tokens -= TOKENS_PER_MSG;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
static const struct node_id *
|
2018-02-02 19:49:12 +01:00
|
|
|
pending_node_announce_keyof(const struct pending_node_announce *a)
|
|
|
|
{
|
2018-07-04 07:29:56 +02:00
|
|
|
return &a->nodeid;
|
2018-02-02 19:49:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool pending_node_announce_eq(const struct pending_node_announce *pna,
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *pc)
|
2018-02-02 19:49:12 +01:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
return node_id_eq(&pna->nodeid, pc);
|
2018-02-02 19:49:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
HTABLE_DEFINE_TYPE(struct pending_node_announce, pending_node_announce_keyof,
|
|
|
|
node_map_hash_key, pending_node_announce_eq,
|
|
|
|
pending_node_map);
|
|
|
|
|
2019-04-11 07:15:13 +02:00
|
|
|
/* We keep around announcements for channels until we have an
|
|
|
|
* update for them (which gives us their timestamp) */
|
|
|
|
struct unupdated_channel {
|
|
|
|
/* The channel_announcement message */
|
|
|
|
const u8 *channel_announce;
|
|
|
|
/* The short_channel_id */
|
|
|
|
struct short_channel_id scid;
|
|
|
|
/* The ids of the nodes */
|
|
|
|
struct node_id id[2];
|
|
|
|
/* When we added, so we can discard old ones */
|
|
|
|
struct timeabs added;
|
|
|
|
/* If we loaded from the store, this is where. */
|
|
|
|
u32 index;
|
|
|
|
/* Channel capacity */
|
|
|
|
struct amount_sat sat;
|
2023-07-06 09:35:54 +02:00
|
|
|
/* If non-NULL this is peer to credit it with */
|
|
|
|
struct node_id *source_peer;
|
2019-04-11 07:15:13 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct unupdated_channel *
|
|
|
|
get_unupdated_channel(const struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid)
|
|
|
|
{
|
|
|
|
return uintmap_get(&rstate->unupdated_chanmap, scid->u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_unupdated_channel(struct unupdated_channel *uc,
|
|
|
|
struct routing_state *rstate)
|
|
|
|
{
|
|
|
|
uintmap_del(&rstate->unupdated_chanmap, uc->scid.u64);
|
|
|
|
}
|
|
|
|
|
2019-04-14 13:54:05 +02:00
|
|
|
static struct node_map *new_node_map(const tal_t *ctx)
|
2017-09-01 06:18:55 +02:00
|
|
|
{
|
|
|
|
struct node_map *map = tal(ctx, struct node_map);
|
|
|
|
node_map_init(map);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2019-04-08 06:44:43 +02:00
|
|
|
/* We use a simple array (with NULL entries) until we have too many. */
|
2019-04-08 06:43:43 +02:00
|
|
|
static bool node_uses_chan_map(const struct node *node)
|
|
|
|
{
|
2023-01-12 02:12:17 +01:00
|
|
|
return node->chan_map;
|
2019-04-08 06:43:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* When simple array fills, use a htable. */
|
|
|
|
static void convert_node_to_chan_map(struct node *node)
|
|
|
|
{
|
2023-01-12 02:12:17 +01:00
|
|
|
assert(!node_uses_chan_map(node));
|
|
|
|
node->chan_map = tal(node, struct chan_map);
|
|
|
|
chan_map_init_sized(node->chan_map, ARRAY_SIZE(node->chan_arr) + 1);
|
2019-04-08 06:43:43 +02:00
|
|
|
assert(node_uses_chan_map(node));
|
2023-01-12 02:12:17 +01:00
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(node->chan_arr); i++) {
|
|
|
|
chan_map_add(node->chan_map, node->chan_arr[i]);
|
|
|
|
node->chan_arr[i] = NULL;
|
|
|
|
}
|
2019-04-08 06:43:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void add_chan(struct node *node, struct chan *chan)
|
|
|
|
{
|
|
|
|
if (!node_uses_chan_map(node)) {
|
2023-01-12 02:12:17 +01:00
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(node->chan_arr); i++) {
|
|
|
|
if (node->chan_arr[i] == NULL) {
|
|
|
|
node->chan_arr[i] = chan;
|
2019-04-08 06:43:43 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
convert_node_to_chan_map(node);
|
|
|
|
}
|
|
|
|
|
2023-01-12 02:12:17 +01:00
|
|
|
chan_map_add(node->chan_map, chan);
|
2019-04-08 06:43:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct chan *next_chan_arr(const struct node *node,
|
|
|
|
struct chan_map_iter *i)
|
|
|
|
{
|
2023-01-12 02:12:17 +01:00
|
|
|
while (i->i.off < ARRAY_SIZE(node->chan_arr)) {
|
|
|
|
if (node->chan_arr[i->i.off])
|
|
|
|
return node->chan_arr[i->i.off];
|
2019-04-08 06:43:43 +02:00
|
|
|
i->i.off++;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
struct chan *first_chan(const struct node *node, struct chan_map_iter *i)
|
|
|
|
{
|
2019-04-08 06:43:43 +02:00
|
|
|
if (!node_uses_chan_map(node)) {
|
|
|
|
i->i.off = 0;
|
|
|
|
return next_chan_arr(node, i);
|
|
|
|
}
|
|
|
|
|
2023-01-12 02:12:17 +01:00
|
|
|
return chan_map_first(node->chan_map, i);
|
2019-04-08 06:42:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct chan *next_chan(const struct node *node, struct chan_map_iter *i)
|
|
|
|
{
|
2019-04-08 06:43:43 +02:00
|
|
|
if (!node_uses_chan_map(node)) {
|
|
|
|
i->i.off++;
|
|
|
|
return next_chan_arr(node, i);
|
|
|
|
}
|
|
|
|
|
2023-01-12 02:12:17 +01:00
|
|
|
return chan_map_next(node->chan_map, i);
|
2019-04-08 06:42:43 +02:00
|
|
|
}
|
|
|
|
|
2019-05-21 09:14:09 +02:00
|
|
|
static void destroy_routing_state(struct routing_state *rstate)
|
|
|
|
{
|
|
|
|
/* Since we omitted destructors on these, clean up manually */
|
|
|
|
u64 idx;
|
|
|
|
for (struct chan *chan = uintmap_first(&rstate->chanmap, &idx);
|
|
|
|
chan;
|
|
|
|
chan = uintmap_after(&rstate->chanmap, &idx))
|
|
|
|
free_chan(rstate, chan);
|
|
|
|
}
|
|
|
|
|
2019-09-18 03:04:56 +02:00
|
|
|
/* We don't check this when loading from the gossip_store: that would break
|
|
|
|
* our canned tests, and usually old gossip is better than no gossip */
|
|
|
|
static bool timestamp_reasonable(struct routing_state *rstate, u32 timestamp)
|
|
|
|
{
|
|
|
|
u64 now = gossip_time_now(rstate).ts.tv_sec;
|
|
|
|
|
|
|
|
/* More than one day ahead? */
|
|
|
|
if (timestamp > now + 24*60*60)
|
|
|
|
return false;
|
|
|
|
/* More than 2 weeks behind? */
|
2019-09-26 04:00:20 +02:00
|
|
|
if (timestamp < now - GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip_prune))
|
2019-09-18 03:04:56 +02:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-06 06:42:05 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
static void memleak_help_routing_tables(struct htable *memtable,
|
|
|
|
struct routing_state *rstate)
|
|
|
|
{
|
|
|
|
struct node *n;
|
|
|
|
struct node_map_iter nit;
|
|
|
|
|
2022-09-16 05:14:39 +02:00
|
|
|
memleak_scan_htable(memtable, &rstate->nodes->raw);
|
|
|
|
memleak_scan_htable(memtable, &rstate->pending_node_map->raw);
|
2023-01-03 05:46:52 +01:00
|
|
|
memleak_scan_htable(memtable, &rstate->pending_cannouncements->raw);
|
2022-09-16 05:14:39 +02:00
|
|
|
memleak_scan_uintmap(memtable, &rstate->unupdated_chanmap);
|
2019-09-06 06:42:05 +02:00
|
|
|
|
|
|
|
for (n = node_map_first(rstate->nodes, &nit);
|
|
|
|
n;
|
|
|
|
n = node_map_next(rstate->nodes, &nit)) {
|
|
|
|
if (node_uses_chan_map(n))
|
2023-01-12 02:12:17 +01:00
|
|
|
memleak_scan_htable(memtable, &n->chan_map->raw);
|
2019-09-06 06:42:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* DEVELOPER */
|
|
|
|
|
2019-09-27 02:04:34 +02:00
|
|
|
/* Once an hour, or at 10000 entries, we expire old ones */
|
|
|
|
static void txout_failure_age(struct routing_state *rstate)
|
|
|
|
{
|
|
|
|
uintmap_clear(&rstate->txout_failures_old);
|
|
|
|
rstate->txout_failures_old = rstate->txout_failures;
|
|
|
|
uintmap_init(&rstate->txout_failures);
|
|
|
|
rstate->num_txout_failures = 0;
|
|
|
|
|
2023-07-06 09:34:54 +02:00
|
|
|
rstate->txout_failure_timer = new_reltimer(&rstate->daemon->timers,
|
2019-09-27 02:04:34 +02:00
|
|
|
rstate, time_from_sec(3600),
|
|
|
|
txout_failure_age, rstate);
|
|
|
|
}
|
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
static void add_to_txout_failures(struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid)
|
2019-09-27 02:04:34 +02:00
|
|
|
{
|
|
|
|
if (uintmap_add(&rstate->txout_failures, scid->u64, true)
|
|
|
|
&& ++rstate->num_txout_failures == 10000) {
|
|
|
|
tal_free(rstate->txout_failure_timer);
|
|
|
|
txout_failure_age(rstate);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool in_txout_failures(struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid)
|
|
|
|
{
|
|
|
|
if (uintmap_get(&rstate->txout_failures, scid->u64))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* If we were going to expire it, we no longer are. */
|
|
|
|
if (uintmap_get(&rstate->txout_failures_old, scid->u64)) {
|
|
|
|
add_to_txout_failures(rstate, scid);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-22 07:25:01 +02:00
|
|
|
struct routing_state *new_routing_state(const tal_t *ctx,
|
2023-07-06 09:34:54 +02:00
|
|
|
struct daemon *daemon,
|
2019-09-18 03:05:05 +02:00
|
|
|
const u32 *dev_gossip_time TAKES,
|
2019-09-26 04:00:20 +02:00
|
|
|
bool dev_fast_gossip,
|
|
|
|
bool dev_fast_gossip_prune)
|
2017-01-19 23:46:07 +01:00
|
|
|
{
|
|
|
|
struct routing_state *rstate = tal(ctx, struct routing_state);
|
2023-07-06 09:34:54 +02:00
|
|
|
rstate->daemon = daemon;
|
2019-04-14 13:54:05 +02:00
|
|
|
rstate->nodes = new_node_map(rstate);
|
2023-06-29 21:23:17 +02:00
|
|
|
rstate->gs = gossip_store_new(rstate);
|
2018-06-04 06:15:25 +02:00
|
|
|
rstate->local_channel_announced = false;
|
2019-10-08 03:30:24 +02:00
|
|
|
rstate->last_timestamp = 0;
|
2022-09-14 05:50:32 +02:00
|
|
|
rstate->dying_channels = tal_arr(rstate, struct dying_channel, 0);
|
2019-04-08 17:31:59 +02:00
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
rstate->pending_cannouncements = tal(rstate, struct pending_cannouncement_map);
|
|
|
|
pending_cannouncement_map_init(rstate->pending_cannouncements);
|
2019-04-08 17:31:59 +02:00
|
|
|
|
2018-03-04 03:26:59 +01:00
|
|
|
uintmap_init(&rstate->chanmap);
|
2019-04-11 07:15:13 +02:00
|
|
|
uintmap_init(&rstate->unupdated_chanmap);
|
2019-09-27 02:04:34 +02:00
|
|
|
rstate->num_txout_failures = 0;
|
2019-03-27 15:40:25 +01:00
|
|
|
uintmap_init(&rstate->txout_failures);
|
2019-09-27 02:04:34 +02:00
|
|
|
uintmap_init(&rstate->txout_failures_old);
|
|
|
|
txout_failure_age(rstate);
|
2018-02-02 19:49:12 +01:00
|
|
|
rstate->pending_node_map = tal(ctx, struct pending_node_map);
|
|
|
|
pending_node_map_init(rstate->pending_node_map);
|
|
|
|
|
2019-04-08 01:51:30 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
if (dev_gossip_time) {
|
|
|
|
rstate->gossip_time = tal(rstate, struct timeabs);
|
|
|
|
rstate->gossip_time->ts.tv_sec = *dev_gossip_time;
|
|
|
|
rstate->gossip_time->ts.tv_nsec = 0;
|
|
|
|
} else
|
|
|
|
rstate->gossip_time = NULL;
|
2019-09-18 03:05:05 +02:00
|
|
|
rstate->dev_fast_gossip = dev_fast_gossip;
|
2019-09-26 04:00:20 +02:00
|
|
|
rstate->dev_fast_gossip_prune = dev_fast_gossip_prune;
|
2019-04-08 01:51:30 +02:00
|
|
|
#endif
|
2019-05-21 09:14:09 +02:00
|
|
|
tal_add_destructor(rstate, destroy_routing_state);
|
2019-09-06 06:42:05 +02:00
|
|
|
memleak_add_helper(rstate, memleak_help_routing_tables);
|
2019-04-08 01:51:30 +02:00
|
|
|
|
2019-09-12 02:22:12 +02:00
|
|
|
if (taken(dev_gossip_time))
|
|
|
|
tal_free(dev_gossip_time);
|
|
|
|
|
2017-01-19 23:46:07 +01:00
|
|
|
return rstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *node_map_keyof_node(const struct node *n)
|
2016-06-28 23:19:21 +02:00
|
|
|
{
|
2018-07-04 07:29:56 +02:00
|
|
|
return &n->id;
|
2016-06-28 23:19:21 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
size_t node_map_hash_key(const struct node_id *pc)
|
2016-06-28 23:19:21 +02:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
return siphash24(siphash_seed(), pc->k, sizeof(pc->k));
|
2016-06-28 23:19:21 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
bool node_map_node_eq(const struct node *n, const struct node_id *pc)
|
2016-06-28 23:19:21 +02:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
return node_id_eq(&n->id, pc);
|
2016-06-28 23:19:21 +02:00
|
|
|
}
|
2016-09-28 16:52:03 +02:00
|
|
|
|
2019-04-08 06:43:43 +02:00
|
|
|
|
2018-02-27 20:59:48 +01:00
|
|
|
static void destroy_node(struct node *node, struct routing_state *rstate)
|
2016-08-18 06:55:13 +02:00
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
2018-02-27 20:59:48 +01:00
|
|
|
node_map_del(rstate->nodes, node);
|
|
|
|
|
2019-04-08 06:43:43 +02:00
|
|
|
/* These remove themselves from chans[]. */
|
2019-04-08 06:42:43 +02:00
|
|
|
while ((c = first_chan(node, &i)) != NULL)
|
2019-05-21 09:13:28 +02:00
|
|
|
free_chan(rstate, c);
|
2016-08-18 06:55:13 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node *get_node(struct routing_state *rstate,
|
|
|
|
const struct node_id *id)
|
2017-09-01 06:18:55 +02:00
|
|
|
{
|
2018-07-04 07:29:56 +02:00
|
|
|
return node_map_get(rstate->nodes, id);
|
2017-09-01 06:18:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct node *new_node(struct routing_state *rstate,
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *id)
|
2016-06-28 23:19:21 +02:00
|
|
|
{
|
|
|
|
struct node *n;
|
|
|
|
|
2017-01-19 23:46:07 +01:00
|
|
|
assert(!get_node(rstate, id));
|
2016-06-28 23:19:21 +02:00
|
|
|
|
2017-01-19 23:46:07 +01:00
|
|
|
n = tal(rstate, struct node);
|
2016-06-28 23:19:21 +02:00
|
|
|
n->id = *id;
|
2023-01-12 02:12:17 +01:00
|
|
|
memset(n->chan_arr, 0, sizeof(n->chan_arr));
|
|
|
|
n->chan_map = NULL;
|
2019-04-10 09:31:29 +02:00
|
|
|
broadcastable_init(&n->bcast);
|
2022-05-07 01:24:18 +02:00
|
|
|
broadcastable_init(&n->rgraph);
|
2019-09-16 12:44:00 +02:00
|
|
|
n->tokens = TOKEN_MAX;
|
2017-01-19 23:46:07 +01:00
|
|
|
node_map_add(rstate->nodes, n);
|
2018-02-27 20:59:48 +01:00
|
|
|
tal_add_destructor2(n, destroy_node, rstate);
|
2016-06-28 23:19:21 +02:00
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2023-03-01 19:59:44 +01:00
|
|
|
static bool is_chan_zombie(struct chan *chan)
|
|
|
|
{
|
|
|
|
if (chan->half[0].zombie || chan->half[1].zombie)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
/* We've received a channel_announce for a channel attached to this node:
|
|
|
|
* otherwise it's in the map only because it's a peer, or us. */
|
2018-06-08 08:31:55 +02:00
|
|
|
static bool node_has_public_channels(struct node *node)
|
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(node, &i); c; c = next_chan(node, &i)) {
|
2023-03-01 19:59:44 +01:00
|
|
|
if (is_chan_public(c) && !is_chan_zombie(c))
|
2018-06-08 08:31:55 +02:00
|
|
|
return true;
|
2019-04-08 01:51:30 +02:00
|
|
|
}
|
2018-06-08 08:31:55 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-12-16 19:38:23 +01:00
|
|
|
static bool is_node_zombie(struct node* node)
|
|
|
|
{
|
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
|
|
|
for (c = first_chan(node, &i); c; c = next_chan(node, &i)) {
|
|
|
|
if (!is_chan_zombie(c))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-06-08 08:31:55 +02:00
|
|
|
/* We can *send* a channel_announce for a channel attached to this node:
|
|
|
|
* we only send once we have a channel_update. */
|
2023-02-15 05:32:34 +01:00
|
|
|
bool node_has_broadcastable_channels(const struct node *node)
|
2018-06-08 08:31:55 +02:00
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
2016-08-18 06:55:13 +02:00
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(node, &i); c; c = next_chan(node, &i)) {
|
2019-04-08 01:51:30 +02:00
|
|
|
if (!is_chan_public(c))
|
2016-08-18 06:55:13 +02:00
|
|
|
continue;
|
2023-02-15 00:14:59 +01:00
|
|
|
if (is_chan_zombie(c))
|
|
|
|
continue;
|
|
|
|
if (is_halfchan_defined(&c->half[0])
|
|
|
|
|| is_halfchan_defined(&c->half[1]))
|
2019-04-08 01:51:30 +02:00
|
|
|
return true;
|
2016-08-18 06:55:13 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-06-08 08:31:55 +02:00
|
|
|
static bool node_announce_predates_channels(const struct node *node)
|
2016-08-18 06:55:13 +02:00
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
struct chan_map_iter i;
|
|
|
|
struct chan *c;
|
|
|
|
|
2019-04-08 06:42:43 +02:00
|
|
|
for (c = first_chan(node, &i); c; c = next_chan(node, &i)) {
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!is_chan_public(c))
|
2018-06-08 08:31:55 +02:00
|
|
|
continue;
|
|
|
|
|
2023-02-15 05:33:57 +01:00
|
|
|
/* Zombies don't count! */
|
|
|
|
if (is_chan_zombie(c))
|
|
|
|
continue;
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
if (c->bcast.index < node->bcast.index)
|
2018-06-08 08:31:55 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-03-03 04:59:28 +01:00
|
|
|
/* Move this node's announcement to the tail of the gossip_store, to
|
|
|
|
* make everyone send it again. */
|
2022-04-19 23:46:35 +02:00
|
|
|
static void force_node_announce_rexmit(struct routing_state *rstate,
|
|
|
|
struct node *node)
|
2022-03-03 04:59:28 +01:00
|
|
|
{
|
|
|
|
const u8 *announce;
|
|
|
|
announce = gossip_store_get(tmpctx, rstate->gs, node->bcast.index);
|
|
|
|
|
2022-07-11 00:13:09 +02:00
|
|
|
u32 initial_bcast_index = node->bcast.index;
|
2022-03-03 04:59:28 +01:00
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&node->bcast,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
|
|
|
node->bcast.index = gossip_store_add(rstate->gs,
|
|
|
|
announce,
|
|
|
|
node->bcast.timestamp,
|
2022-05-04 15:48:59 +02:00
|
|
|
false,
|
2022-12-16 19:38:23 +01:00
|
|
|
false,
|
2022-03-03 04:59:28 +01:00
|
|
|
NULL);
|
2022-07-11 00:13:09 +02:00
|
|
|
if (node->rgraph.index == initial_bcast_index){
|
|
|
|
node->rgraph.index = node->bcast.index;
|
|
|
|
} else {
|
|
|
|
announce = gossip_store_get(tmpctx, rstate->gs, node->rgraph.index);
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&node->rgraph,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
|
|
|
node->rgraph.index = gossip_store_add(rstate->gs,
|
|
|
|
announce,
|
|
|
|
node->rgraph.timestamp,
|
|
|
|
false,
|
2023-02-15 01:32:55 +01:00
|
|
|
true,
|
2022-07-11 00:13:09 +02:00
|
|
|
NULL);
|
|
|
|
}
|
2022-03-03 04:59:28 +01:00
|
|
|
}
|
|
|
|
|
2018-06-08 08:31:55 +02:00
|
|
|
static void remove_chan_from_node(struct routing_state *rstate,
|
|
|
|
struct node *node, const struct chan *chan)
|
|
|
|
{
|
2019-04-08 06:43:43 +02:00
|
|
|
size_t num_chans;
|
|
|
|
|
|
|
|
if (!node_uses_chan_map(node)) {
|
|
|
|
num_chans = 0;
|
2023-01-12 02:12:17 +01:00
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(node->chan_arr); i++) {
|
|
|
|
if (node->chan_arr[i] == chan)
|
|
|
|
node->chan_arr[i] = NULL;
|
|
|
|
else if (node->chan_arr[i] != NULL)
|
2019-04-08 06:43:43 +02:00
|
|
|
num_chans++;
|
|
|
|
}
|
|
|
|
} else {
|
2023-01-12 02:12:17 +01:00
|
|
|
if (!chan_map_del(node->chan_map, chan))
|
2019-04-08 06:43:43 +02:00
|
|
|
abort();
|
2023-01-12 02:12:17 +01:00
|
|
|
num_chans = chan_map_count(node->chan_map);
|
2019-04-08 06:43:43 +02:00
|
|
|
}
|
2018-03-02 09:59:16 +01:00
|
|
|
|
2018-06-08 08:31:55 +02:00
|
|
|
/* Last channel? Simply delete node (and associated announce) */
|
2019-04-08 06:43:43 +02:00
|
|
|
if (num_chans == 0) {
|
2022-07-11 00:13:09 +02:00
|
|
|
if(node->rgraph.index != node->bcast.index)
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&node->rgraph,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
2019-06-03 20:22:25 +02:00
|
|
|
gossip_store_delete(rstate->gs,
|
2019-06-03 20:09:25 +02:00
|
|
|
&node->bcast,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
2018-06-08 08:31:55 +02:00
|
|
|
tal_free(node);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-15 05:33:57 +01:00
|
|
|
/* Don't bother if there's no node_announcement */
|
2019-04-10 09:31:29 +02:00
|
|
|
if (!node->bcast.index)
|
2018-06-08 08:31:55 +02:00
|
|
|
return;
|
2018-03-02 09:59:16 +01:00
|
|
|
|
2018-06-08 08:31:55 +02:00
|
|
|
/* Removed only public channel? Remove node announcement. */
|
|
|
|
if (!node_has_broadcastable_channels(node)) {
|
2022-07-11 00:13:09 +02:00
|
|
|
if(node->rgraph.index != node->bcast.index)
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&node->rgraph,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
2019-06-03 20:22:25 +02:00
|
|
|
gossip_store_delete(rstate->gs,
|
2019-06-03 20:09:25 +02:00
|
|
|
&node->bcast,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
2022-07-13 00:31:43 +02:00
|
|
|
node->rgraph.index = node->bcast.index = 0;
|
|
|
|
node->rgraph.timestamp = node->bcast.timestamp = 0;
|
2018-06-08 08:31:55 +02:00
|
|
|
} else if (node_announce_predates_channels(node)) {
|
|
|
|
/* node announcement predates all channel announcements?
|
|
|
|
* Move to end (we could, in theory, move to just past next
|
|
|
|
* channel_announce, but we don't care that much about spurious
|
|
|
|
* retransmissions in this corner case */
|
2022-03-03 04:59:28 +01:00
|
|
|
force_node_announce_rexmit(rstate, node);
|
2018-06-08 08:31:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-12 02:24:01 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
/* We make sure that free_chan is called on this chan! */
|
|
|
|
static void destroy_chan_check(struct chan *chan)
|
|
|
|
{
|
2019-10-21 12:02:04 +02:00
|
|
|
assert(chan->sat.satoshis == (unsigned long)chan); /* Raw: dev-hack */
|
2019-09-12 02:24:01 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
static void free_chans_from_node(struct routing_state *rstate, struct chan *chan)
|
2018-06-08 08:31:55 +02:00
|
|
|
{
|
|
|
|
remove_chan_from_node(rstate, chan->nodes[0], chan);
|
|
|
|
remove_chan_from_node(rstate, chan->nodes[1], chan);
|
|
|
|
|
2019-09-12 02:24:01 +02:00
|
|
|
#if DEVELOPER
|
2019-10-21 12:02:04 +02:00
|
|
|
chan->sat.satoshis = (unsigned long)chan; /* Raw: dev-hack */
|
2019-09-12 02:24:01 +02:00
|
|
|
#endif
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We used to make this a tal_add_destructor2, but that costs 40 bytes per
|
|
|
|
* chan, and we only ever explicitly free it anyway. */
|
|
|
|
void free_chan(struct routing_state *rstate, struct chan *chan)
|
|
|
|
{
|
|
|
|
free_chans_from_node(rstate, chan);
|
|
|
|
uintmap_del(&rstate->chanmap, chan->scid.u64);
|
|
|
|
|
2019-05-21 09:13:28 +02:00
|
|
|
tal_free(chan);
|
2016-08-18 06:55:13 +02:00
|
|
|
}
|
|
|
|
|
2018-03-04 03:26:59 +01:00
|
|
|
static void init_half_chan(struct routing_state *rstate,
|
|
|
|
struct chan *chan,
|
2018-09-20 02:59:46 +02:00
|
|
|
int channel_idx)
|
2016-12-12 14:55:46 +01:00
|
|
|
{
|
2018-09-20 02:59:46 +02:00
|
|
|
struct half_chan *c = &chan->half[channel_idx];
|
2018-03-02 09:59:16 +01:00
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
broadcastable_init(&c->bcast);
|
2022-05-07 01:24:18 +02:00
|
|
|
broadcastable_init(&c->rgraph);
|
2019-09-16 12:44:00 +02:00
|
|
|
c->tokens = TOKEN_MAX;
|
2022-12-16 19:38:23 +01:00
|
|
|
c->zombie = false;
|
2016-12-12 14:55:46 +01:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:14:24 +02:00
|
|
|
static void bad_gossip_order(const u8 *msg,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer,
|
2018-05-18 07:49:08 +02:00
|
|
|
const char *details)
|
2018-05-17 07:09:59 +02:00
|
|
|
{
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Bad gossip order: %s before announcement %s",
|
2020-08-31 03:13:25 +02:00
|
|
|
peer_wire_name(fromwire_peektype(msg)),
|
2019-11-18 01:26:17 +01:00
|
|
|
details);
|
2018-05-17 07:09:59 +02:00
|
|
|
}
|
|
|
|
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *new_chan(struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid,
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *id1,
|
|
|
|
const struct node_id *id2,
|
2021-06-14 23:07:39 +02:00
|
|
|
struct amount_sat satoshis)
|
2018-03-02 09:59:17 +01:00
|
|
|
{
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *chan = tal(rstate, struct chan);
|
2019-04-08 11:58:32 +02:00
|
|
|
int n1idx = node_id_idx(id1, id2);
|
2018-03-02 09:59:17 +01:00
|
|
|
struct node *n1, *n2;
|
|
|
|
|
2019-09-12 02:24:01 +02:00
|
|
|
#if DEVELOPER
|
|
|
|
tal_add_destructor(chan, destroy_chan_check);
|
|
|
|
#endif
|
2018-05-01 20:18:04 +02:00
|
|
|
/* We should never add a channel twice */
|
|
|
|
assert(!uintmap_get(&rstate->chanmap, scid->u64));
|
|
|
|
|
2018-03-02 09:59:17 +01:00
|
|
|
/* Create nodes on demand */
|
|
|
|
n1 = get_node(rstate, id1);
|
|
|
|
if (!n1)
|
|
|
|
n1 = new_node(rstate, id1);
|
|
|
|
n2 = get_node(rstate, id2);
|
|
|
|
if (!n2)
|
|
|
|
n2 = new_node(rstate, id2);
|
2018-03-02 09:59:17 +01:00
|
|
|
|
|
|
|
chan->scid = *scid;
|
|
|
|
chan->nodes[n1idx] = n1;
|
|
|
|
chan->nodes[!n1idx] = n2;
|
2019-04-10 09:31:29 +02:00
|
|
|
broadcastable_init(&chan->bcast);
|
2019-06-03 20:07:25 +02:00
|
|
|
/* This is how we indicate it's not public yet. */
|
|
|
|
chan->bcast.timestamp = 0;
|
2019-02-21 04:45:55 +01:00
|
|
|
chan->sat = satoshis;
|
2018-03-02 09:59:17 +01:00
|
|
|
|
2019-04-08 06:43:43 +02:00
|
|
|
add_chan(n2, chan);
|
|
|
|
add_chan(n1, chan);
|
2018-03-02 09:59:17 +01:00
|
|
|
|
2018-03-02 09:59:17 +01:00
|
|
|
/* Populate with (inactive) connections */
|
2018-03-04 03:26:59 +01:00
|
|
|
init_half_chan(rstate, chan, n1idx);
|
|
|
|
init_half_chan(rstate, chan, !n1idx);
|
2018-03-02 09:59:17 +01:00
|
|
|
|
2018-03-04 03:26:59 +01:00
|
|
|
uintmap_add(&rstate->chanmap, scid->u64, chan);
|
2019-09-16 12:43:51 +02:00
|
|
|
|
2018-03-02 09:59:17 +01:00
|
|
|
return chan;
|
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
/* Checks that key is valid, and signed this hash */
|
|
|
|
static bool check_signed_hash_nodeid(const struct sha256_double *hash,
|
|
|
|
const secp256k1_ecdsa_signature *signature,
|
|
|
|
const struct node_id *id)
|
|
|
|
{
|
|
|
|
struct pubkey key;
|
|
|
|
|
|
|
|
return pubkey_from_node_id(&key, id)
|
|
|
|
&& check_signed_hash(hash, signature, &key);
|
|
|
|
}
|
|
|
|
|
2017-04-03 05:59:03 +02:00
|
|
|
/* Verify the signature of a channel_update message */
|
2018-03-08 05:10:33 +01:00
|
|
|
static u8 *check_channel_update(const tal_t *ctx,
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *node_id,
|
2018-03-08 05:10:33 +01:00
|
|
|
const secp256k1_ecdsa_signature *node_sig,
|
|
|
|
const u8 *update)
|
2017-04-03 05:59:03 +02:00
|
|
|
{
|
|
|
|
/* 2 byte msg type + 64 byte signatures */
|
|
|
|
int offset = 66;
|
|
|
|
struct sha256_double hash;
|
2018-07-28 08:00:16 +02:00
|
|
|
sha256_double(&hash, update + offset, tal_count(update) - offset);
|
2017-04-03 05:59:03 +02:00
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
if (!check_signed_hash_nodeid(&hash, node_sig, node_id))
|
2021-02-03 03:51:41 +01:00
|
|
|
return towire_warningfmt(ctx, NULL,
|
|
|
|
"Bad signature for %s hash %s"
|
|
|
|
" on channel_update %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
secp256k1_ecdsa_signature,
|
|
|
|
node_sig),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct sha256_double,
|
|
|
|
&hash),
|
|
|
|
tal_hex(tmpctx, update));
|
2018-03-08 05:10:33 +01:00
|
|
|
return NULL;
|
2017-04-03 05:59:03 +02:00
|
|
|
}
|
|
|
|
|
2018-03-08 05:10:31 +01:00
|
|
|
static u8 *check_channel_announcement(const tal_t *ctx,
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *node1_id, const struct node_id *node2_id,
|
2018-04-20 10:09:50 +02:00
|
|
|
const struct pubkey *bitcoin1_key, const struct pubkey *bitcoin2_key,
|
|
|
|
const secp256k1_ecdsa_signature *node1_sig,
|
|
|
|
const secp256k1_ecdsa_signature *node2_sig,
|
|
|
|
const secp256k1_ecdsa_signature *bitcoin1_sig,
|
|
|
|
const secp256k1_ecdsa_signature *bitcoin2_sig, const u8 *announcement)
|
2017-04-03 05:58:03 +02:00
|
|
|
{
|
|
|
|
/* 2 byte msg type + 256 byte signatures */
|
|
|
|
int offset = 258;
|
|
|
|
struct sha256_double hash;
|
|
|
|
sha256_double(&hash, announcement + offset,
|
2018-07-28 08:00:16 +02:00
|
|
|
tal_count(announcement) - offset);
|
2017-04-03 05:58:03 +02:00
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
if (!check_signed_hash_nodeid(&hash, node1_sig, node1_id)) {
|
2021-02-03 03:51:41 +01:00
|
|
|
return towire_warningfmt(ctx, NULL,
|
|
|
|
"Bad node_signature_1 %s hash %s"
|
|
|
|
" on channel_announcement %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
secp256k1_ecdsa_signature,
|
|
|
|
node1_sig),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct sha256_double,
|
|
|
|
&hash),
|
|
|
|
tal_hex(tmpctx, announcement));
|
2018-03-08 05:10:31 +01:00
|
|
|
}
|
2019-04-08 11:58:32 +02:00
|
|
|
if (!check_signed_hash_nodeid(&hash, node2_sig, node2_id)) {
|
2021-02-03 03:51:41 +01:00
|
|
|
return towire_warningfmt(ctx, NULL,
|
|
|
|
"Bad node_signature_2 %s hash %s"
|
|
|
|
" on channel_announcement %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
secp256k1_ecdsa_signature,
|
|
|
|
node2_sig),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct sha256_double,
|
|
|
|
&hash),
|
|
|
|
tal_hex(tmpctx, announcement));
|
2018-03-08 05:10:31 +01:00
|
|
|
}
|
|
|
|
if (!check_signed_hash(&hash, bitcoin1_sig, bitcoin1_key)) {
|
2021-02-03 03:51:41 +01:00
|
|
|
return towire_warningfmt(ctx, NULL,
|
|
|
|
"Bad bitcoin_signature_1 %s hash %s"
|
|
|
|
" on channel_announcement %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
secp256k1_ecdsa_signature,
|
|
|
|
bitcoin1_sig),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct sha256_double,
|
|
|
|
&hash),
|
|
|
|
tal_hex(tmpctx, announcement));
|
2018-03-08 05:10:31 +01:00
|
|
|
}
|
|
|
|
if (!check_signed_hash(&hash, bitcoin2_sig, bitcoin2_key)) {
|
2021-02-03 03:51:41 +01:00
|
|
|
return towire_warningfmt(ctx, NULL,
|
|
|
|
"Bad bitcoin_signature_2 %s hash %s"
|
|
|
|
" on channel_announcement %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
secp256k1_ecdsa_signature,
|
|
|
|
bitcoin2_sig),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct sha256_double,
|
|
|
|
&hash),
|
|
|
|
tal_hex(tmpctx, announcement));
|
2018-03-08 05:10:31 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
2017-04-03 05:58:03 +02:00
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
/* We allow node announcements for this node if it doesn't otherwise exist, so
|
|
|
|
* we can process them once it does exist (a channel_announce is being
|
|
|
|
* validated right now).
|
|
|
|
*
|
|
|
|
* If we attach one, remove it on destruction of @ctx.
|
|
|
|
*/
|
|
|
|
static void del_pending_node_announcement(const tal_t *ctx UNUSED,
|
|
|
|
struct pending_node_announce *pna)
|
|
|
|
{
|
|
|
|
if (--pna->refcount == 0) {
|
|
|
|
pending_node_map_del(pna->rstate->pending_node_map, pna);
|
|
|
|
tal_free(pna);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void catch_node_announcement(const tal_t *ctx,
|
|
|
|
struct routing_state *rstate,
|
|
|
|
struct node_id *nodeid)
|
2018-02-02 19:49:12 +01:00
|
|
|
{
|
2019-04-10 09:31:29 +02:00
|
|
|
struct pending_node_announce *pna;
|
|
|
|
struct node *node;
|
|
|
|
|
|
|
|
/* No need if we already know about the node. We might, however, only
|
|
|
|
* know about it because it's a peer (maybe with private or
|
|
|
|
* not-yet-announced channels), so check for that too. */
|
|
|
|
node = get_node(rstate, nodeid);
|
|
|
|
if (node && node_has_public_channels(node))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We can have multiple channels announced at same time for nodes;
|
|
|
|
* but we can only have one of these in the map. */
|
|
|
|
pna = pending_node_map_get(rstate->pending_node_map, nodeid);
|
|
|
|
if (!pna) {
|
|
|
|
pna = tal(rstate, struct pending_node_announce);
|
|
|
|
pna->rstate = rstate;
|
|
|
|
pna->nodeid = *nodeid;
|
|
|
|
pna->node_announcement = NULL;
|
|
|
|
pna->timestamp = 0;
|
2019-05-29 06:35:43 +02:00
|
|
|
pna->index = 0;
|
2019-04-10 09:31:29 +02:00
|
|
|
pna->refcount = 0;
|
2023-07-06 09:35:54 +02:00
|
|
|
pna->source_peer = NULL;
|
2019-04-10 09:31:29 +02:00
|
|
|
pending_node_map_add(rstate->pending_node_map, pna);
|
|
|
|
}
|
|
|
|
pna->refcount++;
|
|
|
|
tal_add_destructor2(ctx, del_pending_node_announcement, pna);
|
2018-02-02 19:49:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void process_pending_node_announcement(struct routing_state *rstate,
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id *nodeid)
|
2018-02-02 19:49:12 +01:00
|
|
|
{
|
2018-07-04 07:29:56 +02:00
|
|
|
struct pending_node_announce *pna = pending_node_map_get(rstate->pending_node_map, nodeid);
|
2018-02-02 19:49:12 +01:00
|
|
|
if (!pna)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pna->node_announcement) {
|
2018-02-03 17:40:09 +01:00
|
|
|
SUPERVERBOSE(
|
|
|
|
"Processing deferred node_announcement for node %s",
|
2019-04-08 11:58:32 +02:00
|
|
|
type_to_string(pna, struct node_id, nodeid));
|
2018-03-08 05:10:29 +01:00
|
|
|
|
2019-09-22 03:29:01 +02:00
|
|
|
/* Can fail it timestamp is now too old */
|
2019-05-29 06:35:43 +02:00
|
|
|
if (!routing_add_node_announcement(rstate,
|
|
|
|
pna->node_announcement,
|
2019-10-08 03:13:24 +02:00
|
|
|
pna->index,
|
2023-07-06 09:35:54 +02:00
|
|
|
pna->source_peer, NULL,
|
2022-07-11 00:13:09 +02:00
|
|
|
false))
|
2019-09-22 03:29:01 +02:00
|
|
|
status_unusual("pending node_announcement %s too old?",
|
|
|
|
tal_hex(tmpctx, pna->node_announcement));
|
2019-06-13 02:47:36 +02:00
|
|
|
/* Never send this again. */
|
|
|
|
pna->node_announcement = tal_free(pna->node_announcement);
|
2018-02-02 19:49:12 +01:00
|
|
|
}
|
2019-06-13 02:47:36 +02:00
|
|
|
|
|
|
|
/* We don't need to catch any more node_announcements, since we've
|
|
|
|
* accepted the public channel now. But other pending announcements
|
|
|
|
* may still hold a reference they use in
|
|
|
|
* del_pending_node_announcement, so simply delete it from the map. */
|
2019-10-15 04:39:13 +02:00
|
|
|
pending_node_map_del(rstate->pending_node_map, notleak(pna));
|
2018-02-02 19:49:12 +01:00
|
|
|
}
|
|
|
|
|
2018-03-02 09:59:13 +01:00
|
|
|
static struct pending_cannouncement *
|
|
|
|
find_pending_cannouncement(struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid)
|
|
|
|
{
|
2019-04-08 17:31:59 +02:00
|
|
|
struct pending_cannouncement *pann;
|
2018-03-02 09:59:13 +01:00
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
pann = pending_cannouncement_map_get(rstate->pending_cannouncements, scid);
|
2019-04-08 17:31:59 +02:00
|
|
|
|
|
|
|
return pann;
|
2018-03-02 09:59:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_pending_cannouncement(struct pending_cannouncement *pending,
|
|
|
|
struct routing_state *rstate)
|
|
|
|
{
|
2023-01-03 05:46:52 +01:00
|
|
|
pending_cannouncement_map_del(rstate->pending_cannouncements, pending);
|
2018-03-02 09:59:13 +01:00
|
|
|
}
|
|
|
|
|
2018-06-04 06:15:25 +02:00
|
|
|
static void add_channel_announce_to_broadcast(struct routing_state *rstate,
|
2018-06-04 06:24:25 +02:00
|
|
|
struct chan *chan,
|
2019-04-11 07:15:22 +02:00
|
|
|
const u8 *channel_announce,
|
2019-04-11 07:15:13 +02:00
|
|
|
u32 timestamp,
|
|
|
|
u32 index)
|
2018-06-04 06:15:25 +02:00
|
|
|
{
|
2019-05-16 21:55:17 +02:00
|
|
|
u8 *addendum = towire_gossip_store_channel_amount(tmpctx, chan->sat);
|
2021-12-29 04:26:43 +01:00
|
|
|
bool is_local = local_direction(rstate, chan, NULL);
|
2019-05-16 21:55:17 +02:00
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
chan->bcast.timestamp = timestamp;
|
2019-04-11 07:15:13 +02:00
|
|
|
/* 0, unless we're loading from store */
|
2019-06-03 20:22:25 +02:00
|
|
|
if (index)
|
|
|
|
chan->bcast.index = index;
|
|
|
|
else
|
|
|
|
chan->bcast.index = gossip_store_add(rstate->gs,
|
|
|
|
channel_announce,
|
|
|
|
chan->bcast.timestamp,
|
2022-05-04 15:48:59 +02:00
|
|
|
false,
|
2022-12-16 19:38:23 +01:00
|
|
|
false,
|
2019-06-03 20:22:25 +02:00
|
|
|
addendum);
|
2019-11-04 01:37:05 +01:00
|
|
|
rstate->local_channel_announced |= is_local;
|
2018-06-04 06:15:25 +02:00
|
|
|
}
|
|
|
|
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
static void delete_chan_messages_from_store(struct routing_state *rstate,
|
|
|
|
struct chan *chan)
|
|
|
|
{
|
|
|
|
int update_type, announcment_type;
|
|
|
|
|
|
|
|
if (is_chan_public(chan)) {
|
|
|
|
update_type = WIRE_CHANNEL_UPDATE;
|
|
|
|
announcment_type = WIRE_CHANNEL_ANNOUNCEMENT;
|
|
|
|
} else {
|
|
|
|
update_type = WIRE_GOSSIP_STORE_PRIVATE_UPDATE;
|
|
|
|
announcment_type = WIRE_GOSSIP_STORE_PRIVATE_CHANNEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If these aren't in the store, these are noops. */
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&chan->bcast, announcment_type);
|
2022-07-11 00:13:09 +02:00
|
|
|
if (chan->half[0].rgraph.index != chan->half[0].bcast.index)
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&chan->half[0].rgraph, update_type);
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&chan->half[0].bcast, update_type);
|
2022-07-11 00:13:09 +02:00
|
|
|
if (chan->half[1].rgraph.index != chan->half[1].bcast.index)
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&chan->half[1].rgraph, update_type);
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&chan->half[1].bcast, update_type);
|
|
|
|
}
|
|
|
|
|
2022-09-14 05:50:32 +02:00
|
|
|
static void remove_channel_from_store(struct routing_state *rstate,
|
|
|
|
struct chan *chan)
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
{
|
2022-12-16 19:38:23 +01:00
|
|
|
/* Put in tombstone marker. Zombie channels will have one already. */
|
|
|
|
if (!is_chan_zombie(chan))
|
|
|
|
gossip_store_mark_channel_deleted(rstate->gs, &chan->scid);
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
|
|
|
|
/* Now delete old entries. */
|
|
|
|
delete_chan_messages_from_store(rstate, chan);
|
|
|
|
}
|
|
|
|
|
2018-04-11 01:03:35 +02:00
|
|
|
bool routing_add_channel_announcement(struct routing_state *rstate,
|
2019-02-21 04:45:55 +01:00
|
|
|
const u8 *msg TAKES,
|
2019-04-10 09:31:29 +02:00
|
|
|
struct amount_sat sat,
|
2019-10-08 03:13:24 +02:00
|
|
|
u32 index,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer)
|
2018-03-22 15:11:24 +01:00
|
|
|
{
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
struct chan *oldchan;
|
2018-03-22 15:11:24 +01:00
|
|
|
secp256k1_ecdsa_signature node_signature_1, node_signature_2;
|
|
|
|
secp256k1_ecdsa_signature bitcoin_signature_1, bitcoin_signature_2;
|
|
|
|
u8 *features;
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
struct short_channel_id scid;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id node_id_1;
|
|
|
|
struct node_id node_id_2;
|
2018-03-22 15:11:24 +01:00
|
|
|
struct pubkey bitcoin_key_1;
|
|
|
|
struct pubkey bitcoin_key_2;
|
2019-04-11 07:15:13 +02:00
|
|
|
struct unupdated_channel *uc;
|
|
|
|
const u8 *private_updates[2] = { NULL, NULL };
|
2018-04-11 01:03:35 +02:00
|
|
|
|
2019-04-10 09:31:18 +02:00
|
|
|
/* Make sure we own msg, even if we don't save it. */
|
|
|
|
if (taken(msg))
|
|
|
|
tal_steal(tmpctx, msg);
|
|
|
|
|
2018-05-17 07:08:11 +02:00
|
|
|
if (!fromwire_channel_announcement(
|
|
|
|
tmpctx, msg, &node_signature_1, &node_signature_2,
|
|
|
|
&bitcoin_signature_1, &bitcoin_signature_2, &features, &chain_hash,
|
2019-04-08 11:58:44 +02:00
|
|
|
&scid, &node_id_1, &node_id_2, &bitcoin_key_1, &bitcoin_key_2))
|
2018-05-17 07:08:11 +02:00
|
|
|
return false;
|
|
|
|
|
2018-03-22 15:11:24 +01:00
|
|
|
/* The channel may already exist if it was non-public from
|
|
|
|
* local_add_channel(); normally we don't accept new
|
|
|
|
* channel_announcements. See handle_channel_announcement. */
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
oldchan = get_channel(rstate, &scid);
|
2018-03-22 15:11:24 +01:00
|
|
|
|
2019-04-11 07:15:13 +02:00
|
|
|
/* private updates will exist in the store before the announce: we
|
|
|
|
* can't index those for broadcast since they would predate it, so we
|
2019-06-03 20:24:25 +02:00
|
|
|
* add fresh ones. */
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
if (oldchan) {
|
2019-06-03 20:24:25 +02:00
|
|
|
/* If this was in the gossip_store, gossip_store is bad! */
|
|
|
|
if (index) {
|
|
|
|
status_broken("gossip_store channel_announce"
|
|
|
|
" %u replaces %u!",
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
index, oldchan->bcast.index);
|
2019-06-03 20:24:25 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-11 07:16:30 +02:00
|
|
|
/* Reload any private updates */
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
if (oldchan->half[0].bcast.index)
|
2019-04-11 07:16:30 +02:00
|
|
|
private_updates[0]
|
2019-06-03 20:05:25 +02:00
|
|
|
= gossip_store_get_private_update(NULL,
|
2019-06-03 20:22:25 +02:00
|
|
|
rstate->gs,
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
oldchan->half[0].bcast.index);
|
|
|
|
if (oldchan->half[1].bcast.index)
|
2019-04-11 07:16:30 +02:00
|
|
|
private_updates[1]
|
2019-06-03 20:05:25 +02:00
|
|
|
= gossip_store_get_private_update(NULL,
|
2019-06-03 20:22:25 +02:00
|
|
|
rstate->gs,
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
oldchan->half[1].bcast.index);
|
2019-04-11 07:15:13 +02:00
|
|
|
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
/* We don't delete it from store until *after* we've put the
|
|
|
|
* other one in! */
|
|
|
|
uintmap_del(&rstate->chanmap, oldchan->scid.u64);
|
2019-06-03 20:09:25 +02:00
|
|
|
}
|
2019-04-11 07:15:13 +02:00
|
|
|
|
|
|
|
uc = tal(rstate, struct unupdated_channel);
|
2020-02-27 03:17:01 +01:00
|
|
|
uc->channel_announce = tal_dup_talarr(uc, u8, msg);
|
2019-09-12 02:22:12 +02:00
|
|
|
uc->added = gossip_time_now(rstate);
|
2019-04-11 07:15:13 +02:00
|
|
|
uc->index = index;
|
|
|
|
uc->sat = sat;
|
|
|
|
uc->scid = scid;
|
|
|
|
uc->id[0] = node_id_1;
|
|
|
|
uc->id[1] = node_id_2;
|
2023-07-06 09:35:54 +02:00
|
|
|
uc->source_peer = tal_dup_or_null(uc, struct node_id, source_peer);
|
2019-04-11 07:15:13 +02:00
|
|
|
uintmap_add(&rstate->unupdated_chanmap, scid.u64, uc);
|
|
|
|
tal_add_destructor2(uc, destroy_unupdated_channel, rstate);
|
|
|
|
|
|
|
|
/* If a node_announcement comes along, save it for once we're updated */
|
|
|
|
catch_node_announcement(uc, rstate, &node_id_1);
|
|
|
|
catch_node_announcement(uc, rstate, &node_id_2);
|
|
|
|
|
|
|
|
/* If we had private updates, they'll immediately create the channel. */
|
|
|
|
if (private_updates[0])
|
2019-10-08 03:13:24 +02:00
|
|
|
routing_add_channel_update(rstate, take(private_updates[0]), 0,
|
2023-07-06 09:35:54 +02:00
|
|
|
source_peer, false, false, false);
|
2019-04-11 07:15:13 +02:00
|
|
|
if (private_updates[1])
|
2019-10-08 03:13:24 +02:00
|
|
|
routing_add_channel_update(rstate, take(private_updates[1]), 0,
|
2023-07-06 09:35:54 +02:00
|
|
|
source_peer, false, false, false);
|
2019-04-10 09:31:29 +02:00
|
|
|
|
gossipd: fix gossmap race.
When upgrading a channel from private to public, we would delete the
private channel then add the public one. However, this is visible in
the gossmap! In particular, the node may be removed from the gossmap
and then re-added, so it may temporarily vanish!
I was seeing an occasional assertion inside node_factory.line_graph:
```
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
> l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
tests/test_connection.py:870:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1467: in line_graph
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
contrib/pyln-testing/pyln/testing/utils.py:1460: in join_nodes
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
contrib/pyln-testing/pyln/testing/utils.py:88: in wait_for
while not success():
contrib/pyln-testing/pyln/testing/utils.py:1460: in <lambda>
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
arr = []
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
> assert len(arr) == 1
E AssertionError
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-06-26 06:38:01 +02:00
|
|
|
/* Now we can finish cleanup of gossip store, so there's no window where
|
|
|
|
* channel (or nodes) vanish. */
|
|
|
|
if (oldchan) {
|
|
|
|
/* Mark private messages deleted, but don't tombstone the channel! */
|
|
|
|
delete_chan_messages_from_store(rstate, oldchan);
|
|
|
|
free_chans_from_node(rstate, oldchan);
|
|
|
|
tal_free(oldchan);
|
|
|
|
}
|
|
|
|
|
2018-04-11 01:03:35 +02:00
|
|
|
return true;
|
2018-03-22 15:11:24 +01:00
|
|
|
}
|
|
|
|
|
2018-03-08 05:10:31 +01:00
|
|
|
u8 *handle_channel_announcement(struct routing_state *rstate,
|
|
|
|
const u8 *announce TAKES,
|
2019-09-22 04:21:19 +02:00
|
|
|
u32 current_blockheight,
|
2019-10-08 03:13:24 +02:00
|
|
|
const struct short_channel_id **scid,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer TAKES)
|
2017-02-01 15:09:26 +01:00
|
|
|
{
|
2018-01-04 12:40:58 +01:00
|
|
|
struct pending_cannouncement *pending;
|
2017-12-18 07:44:10 +01:00
|
|
|
struct bitcoin_blkid chain_hash;
|
2022-03-31 11:10:50 +02:00
|
|
|
u8 *features, *warn;
|
2018-01-04 12:40:58 +01:00
|
|
|
secp256k1_ecdsa_signature node_signature_1, node_signature_2;
|
|
|
|
secp256k1_ecdsa_signature bitcoin_signature_1, bitcoin_signature_2;
|
2018-03-04 03:26:59 +01:00
|
|
|
struct chan *chan;
|
2017-02-01 15:09:26 +01:00
|
|
|
|
2018-01-04 12:40:58 +01:00
|
|
|
pending = tal(rstate, struct pending_cannouncement);
|
2023-07-06 09:35:54 +02:00
|
|
|
pending->source_peer = tal_dup_or_null(pending, struct node_id, source_peer);
|
2018-01-12 18:55:29 +01:00
|
|
|
pending->updates[0] = NULL;
|
|
|
|
pending->updates[1] = NULL;
|
2023-07-06 09:35:54 +02:00
|
|
|
pending->update_source_peer[0] = pending->update_source_peer[1] = NULL;
|
2020-02-27 03:17:01 +01:00
|
|
|
pending->announce = tal_dup_talarr(pending, u8, announce);
|
2018-02-02 14:55:54 +01:00
|
|
|
pending->update_timestamps[0] = pending->update_timestamps[1] = 0;
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_channel_announcement(pending, pending->announce,
|
2018-01-04 12:40:58 +01:00
|
|
|
&node_signature_1,
|
|
|
|
&node_signature_2,
|
2017-02-01 15:09:26 +01:00
|
|
|
&bitcoin_signature_1,
|
|
|
|
&bitcoin_signature_2,
|
2017-08-03 03:45:04 +02:00
|
|
|
&features,
|
2017-08-22 07:25:01 +02:00
|
|
|
&chain_hash,
|
2018-01-04 12:40:58 +01:00
|
|
|
&pending->short_channel_id,
|
2019-04-08 11:58:44 +02:00
|
|
|
&pending->node_id_1,
|
|
|
|
&pending->node_id_2,
|
2018-01-04 12:40:58 +01:00
|
|
|
&pending->bitcoin_key_1,
|
|
|
|
&pending->bitcoin_key_2)) {
|
2022-03-31 11:10:50 +02:00
|
|
|
warn = towire_warningfmt(rstate, NULL,
|
2021-02-03 03:51:41 +01:00
|
|
|
"Malformed channel_announcement %s",
|
|
|
|
tal_hex(pending, pending->announce));
|
2018-03-13 00:06:00 +01:00
|
|
|
goto malformed;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
|
|
|
|
2020-11-25 01:18:43 +01:00
|
|
|
/* We don't use features */
|
|
|
|
tal_free(features);
|
|
|
|
|
2019-09-22 04:21:19 +02:00
|
|
|
/* If we know the blockheight, and it's in the future, reject
|
|
|
|
* out-of-hand. Remember, it should be 6 deep before they tell us
|
|
|
|
* anyway. */
|
|
|
|
if (current_blockheight != 0
|
|
|
|
&& short_channel_id_blocknum(&pending->short_channel_id) > current_blockheight) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(pending->source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Ignoring future channel_announcment for %s"
|
|
|
|
" (current block %u)",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&pending->short_channel_id),
|
|
|
|
current_blockheight);
|
2019-09-22 04:21:19 +02:00
|
|
|
goto ignored;
|
|
|
|
}
|
|
|
|
|
2019-03-27 15:40:25 +01:00
|
|
|
/* If a prior txout lookup failed there is little point it trying
|
2019-11-06 03:16:09 +01:00
|
|
|
* again. Just drop the announcement and walk away whistling. */
|
|
|
|
if (in_txout_failures(rstate, &pending->short_channel_id)) {
|
2019-03-27 15:40:25 +01:00
|
|
|
SUPERVERBOSE(
|
|
|
|
"Ignoring channel_announcement of %s due to a prior txout "
|
|
|
|
"query failure. The channel was likely closed on-chain.",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&pending->short_channel_id));
|
|
|
|
goto ignored;
|
|
|
|
}
|
|
|
|
|
2018-01-30 20:27:14 +01:00
|
|
|
/* Check if we know the channel already (no matter in what
|
|
|
|
* state, we stop here if yes). */
|
2018-03-01 10:22:28 +01:00
|
|
|
chan = get_channel(rstate, &pending->short_channel_id);
|
2018-05-10 16:00:38 +02:00
|
|
|
if (chan != NULL && is_chan_public(chan)) {
|
2018-03-02 09:59:13 +01:00
|
|
|
SUPERVERBOSE("%s: %s already has public channel",
|
|
|
|
__func__,
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-03-02 09:59:13 +01:00
|
|
|
&pending->short_channel_id));
|
2018-03-13 00:06:00 +01:00
|
|
|
goto ignored;
|
2018-01-30 20:27:14 +01:00
|
|
|
}
|
2019-04-11 07:15:13 +02:00
|
|
|
if (get_unupdated_channel(rstate, &pending->short_channel_id)) {
|
|
|
|
SUPERVERBOSE("%s: %s already has unupdated channel",
|
|
|
|
__func__,
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&pending->short_channel_id));
|
|
|
|
goto ignored;
|
|
|
|
}
|
2018-03-02 09:59:13 +01:00
|
|
|
|
|
|
|
/* We don't replace previous ones, since we might validate that and
|
|
|
|
* think this one is OK! */
|
|
|
|
if (find_pending_cannouncement(rstate, &pending->short_channel_id)) {
|
|
|
|
SUPERVERBOSE("%s: %s already has pending cannouncement",
|
|
|
|
__func__,
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-03-02 09:59:13 +01:00
|
|
|
&pending->short_channel_id));
|
2018-03-13 00:06:00 +01:00
|
|
|
goto ignored;
|
2018-03-02 09:59:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: Handle duplicates as per BOLT #7 */
|
2018-01-30 20:27:14 +01:00
|
|
|
|
2017-08-22 07:25:01 +02:00
|
|
|
/* BOLT #7:
|
2019-01-14 03:22:05 +01:00
|
|
|
* The receiving node:
|
2018-06-17 12:13:44 +02:00
|
|
|
*...
|
|
|
|
* - if the specified `chain_hash` is unknown to the receiver:
|
|
|
|
* - MUST ignore the message.
|
2017-08-22 07:25:01 +02:00
|
|
|
*/
|
2019-11-20 02:54:47 +01:00
|
|
|
if (!bitcoin_blkid_eq(&chain_hash, &chainparams->genesis_blockhash)) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(pending->source_peer,
|
2017-12-21 10:53:37 +01:00
|
|
|
"Received channel_announcement %s for unknown chain %s",
|
2018-02-01 03:41:19 +01:00
|
|
|
type_to_string(pending, struct short_channel_id,
|
|
|
|
&pending->short_channel_id),
|
2018-01-04 12:40:58 +01:00
|
|
|
type_to_string(pending, struct bitcoin_blkid, &chain_hash));
|
2018-03-13 00:06:00 +01:00
|
|
|
goto ignored;
|
2017-08-22 07:25:01 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:44 +02:00
|
|
|
/* Note that if node_id_1 or node_id_2 are malformed, it's caught here */
|
2022-03-31 11:10:50 +02:00
|
|
|
warn = check_channel_announcement(rstate,
|
2018-03-08 05:10:31 +01:00
|
|
|
&pending->node_id_1,
|
|
|
|
&pending->node_id_2,
|
|
|
|
&pending->bitcoin_key_1,
|
|
|
|
&pending->bitcoin_key_2,
|
|
|
|
&node_signature_1,
|
|
|
|
&node_signature_2,
|
|
|
|
&bitcoin_signature_1,
|
|
|
|
&bitcoin_signature_2,
|
|
|
|
pending->announce);
|
2022-03-31 11:10:50 +02:00
|
|
|
if (warn) {
|
2018-03-08 05:10:31 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if `bitcoin_signature_1`, `bitcoin_signature_2`,
|
|
|
|
* `node_signature_1` OR `node_signature_2` are invalid OR NOT
|
|
|
|
* correct:
|
2022-03-31 11:10:50 +02:00
|
|
|
* - SHOULD send a `warning`.
|
|
|
|
* - MAY close the connection.
|
|
|
|
* - MUST ignore the message.
|
2018-03-08 05:10:31 +01:00
|
|
|
*/
|
2018-03-13 00:06:00 +01:00
|
|
|
goto malformed;
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
2017-12-21 10:53:37 +01:00
|
|
|
|
2019-09-22 04:21:34 +02:00
|
|
|
/* Don't add an infinite number of pending announcements. If we're
|
|
|
|
* catching up with the bitcoin chain, though, they can definitely
|
|
|
|
* pile up. */
|
2023-01-03 05:46:52 +01:00
|
|
|
if (pending_cannouncement_map_count(rstate->pending_cannouncements)
|
2019-09-22 04:21:34 +02:00
|
|
|
> 100000) {
|
|
|
|
static bool warned = false;
|
|
|
|
if (!warned) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_unusual(pending->source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Flooded by channel_announcements:"
|
|
|
|
" ignoring some");
|
2019-09-22 04:21:34 +02:00
|
|
|
warned = true;
|
|
|
|
}
|
|
|
|
goto ignored;
|
|
|
|
}
|
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(pending->source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Received channel_announcement for channel %s",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&pending->short_channel_id));
|
2017-11-24 15:47:14 +01:00
|
|
|
|
2018-02-02 19:49:12 +01:00
|
|
|
/* Add both endpoints to the pending_node_map so we can stash
|
|
|
|
* node_announcements while we wait for the txout check */
|
2019-04-10 09:31:29 +02:00
|
|
|
catch_node_announcement(pending, rstate, &pending->node_id_1);
|
|
|
|
catch_node_announcement(pending, rstate, &pending->node_id_2);
|
2018-02-02 19:49:12 +01:00
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
pending_cannouncement_map_add(rstate->pending_cannouncements, pending);
|
2018-03-02 09:59:13 +01:00
|
|
|
tal_add_destructor2(pending, destroy_pending_cannouncement, rstate);
|
|
|
|
|
2018-03-13 00:06:00 +01:00
|
|
|
/* Success */
|
2019-02-10 22:18:17 +01:00
|
|
|
// MSC: Cppcheck 1.86 gets this false positive
|
|
|
|
// cppcheck-suppress autoVariables
|
2018-03-08 05:10:31 +01:00
|
|
|
*scid = &pending->short_channel_id;
|
|
|
|
return NULL;
|
2018-03-13 00:06:00 +01:00
|
|
|
|
|
|
|
malformed:
|
|
|
|
tal_free(pending);
|
|
|
|
*scid = NULL;
|
2022-03-31 11:10:50 +02:00
|
|
|
return warn;
|
2018-03-13 00:06:00 +01:00
|
|
|
|
|
|
|
ignored:
|
|
|
|
tal_free(pending);
|
|
|
|
*scid = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-10-08 03:13:24 +02:00
|
|
|
static void process_pending_channel_update(struct daemon *daemon,
|
|
|
|
struct routing_state *rstate,
|
2018-03-13 00:06:00 +01:00
|
|
|
const struct short_channel_id *scid,
|
2019-10-08 03:13:24 +02:00
|
|
|
const u8 *cupdate,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer)
|
2018-03-13 00:06:00 +01:00
|
|
|
{
|
|
|
|
u8 *err;
|
|
|
|
|
|
|
|
if (!cupdate)
|
|
|
|
return;
|
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
err = handle_channel_update(rstate, cupdate, source_peer, NULL, false);
|
2018-03-13 00:06:00 +01:00
|
|
|
if (err) {
|
2019-10-08 03:14:24 +02:00
|
|
|
/* FIXME: We could send this error back to peer if != NULL */
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Pending channel_update for %s: %s",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
scid),
|
|
|
|
sanitize_error(tmpctx, err, NULL));
|
2018-03-13 00:06:00 +01:00
|
|
|
tal_free(err);
|
|
|
|
}
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
2017-12-02 23:38:43 +01:00
|
|
|
|
2019-10-08 03:13:24 +02:00
|
|
|
bool handle_pending_cannouncement(struct daemon *daemon,
|
|
|
|
struct routing_state *rstate,
|
2018-01-04 12:40:58 +01:00
|
|
|
const struct short_channel_id *scid,
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_sat sat,
|
2018-01-04 12:40:58 +01:00
|
|
|
const u8 *outscript)
|
|
|
|
{
|
|
|
|
const u8 *s;
|
|
|
|
struct pending_cannouncement *pending;
|
2018-01-31 17:53:50 +01:00
|
|
|
|
2018-03-02 09:59:13 +01:00
|
|
|
pending = find_pending_cannouncement(rstate, scid);
|
2018-02-27 21:00:45 +01:00
|
|
|
if (!pending)
|
2019-06-12 01:27:07 +02:00
|
|
|
return false;
|
2018-01-04 12:40:58 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* The receiving node:
|
2018-06-17 12:13:44 +02:00
|
|
|
*...
|
|
|
|
* - if the `short_channel_id`'s output... is spent:
|
|
|
|
* - MUST ignore the message.
|
2018-01-04 12:40:58 +01:00
|
|
|
*/
|
2018-07-28 08:00:16 +02:00
|
|
|
if (tal_count(outscript) == 0) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(pending->source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"channel_announcement: no unspent txout %s",
|
|
|
|
type_to_string(pending,
|
|
|
|
struct short_channel_id,
|
|
|
|
scid));
|
2018-01-04 12:40:58 +01:00
|
|
|
tal_free(pending);
|
2019-09-27 02:04:34 +02:00
|
|
|
add_to_txout_failures(rstate, scid);
|
2019-06-12 01:27:07 +02:00
|
|
|
return false;
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* The receiving node:
|
2018-06-17 12:13:44 +02:00
|
|
|
*...
|
|
|
|
* - if the `short_channel_id`'s output does NOT correspond to a P2WSH
|
|
|
|
* (using `bitcoin_key_1` and `bitcoin_key_2`, as specified in
|
|
|
|
* [BOLT #3](03-transactions.md#funding-transaction-output)) ...
|
|
|
|
* - MUST ignore the message.
|
2018-01-04 12:40:58 +01:00
|
|
|
*/
|
|
|
|
s = scriptpubkey_p2wsh(pending,
|
|
|
|
bitcoin_redeem_2of2(pending,
|
|
|
|
&pending->bitcoin_key_1,
|
|
|
|
&pending->bitcoin_key_2));
|
|
|
|
|
|
|
|
if (!scripteq(s, outscript)) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(pending->source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"channel_announcement: txout %s expected %s, got %s",
|
|
|
|
type_to_string(
|
|
|
|
pending, struct short_channel_id,
|
|
|
|
scid),
|
|
|
|
tal_hex(tmpctx, s),
|
|
|
|
tal_hex(tmpctx, outscript));
|
2018-01-04 12:40:58 +01:00
|
|
|
tal_free(pending);
|
2019-06-12 01:27:07 +02:00
|
|
|
return false;
|
2017-04-03 05:58:03 +02:00
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
/* Remove pending now, so below functions don't see it. */
|
2023-01-03 05:46:52 +01:00
|
|
|
pending_cannouncement_map_del(rstate->pending_cannouncements, pending);
|
2019-04-10 09:31:29 +02:00
|
|
|
tal_del_destructor2(pending, destroy_pending_cannouncement, rstate);
|
|
|
|
|
2019-09-22 03:29:01 +02:00
|
|
|
/* Can fail if channel_announcement too old */
|
2019-10-08 03:13:24 +02:00
|
|
|
if (!routing_add_channel_announcement(rstate, pending->announce, sat, 0,
|
2023-07-06 09:35:54 +02:00
|
|
|
pending->source_peer))
|
|
|
|
status_peer_unusual(pending->source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Could not add channel_announcement %s: too old?",
|
|
|
|
tal_hex(tmpctx, pending->announce));
|
2019-09-22 03:29:01 +02:00
|
|
|
else {
|
|
|
|
/* Did we have an update waiting? If so, apply now. */
|
2019-10-08 03:13:24 +02:00
|
|
|
process_pending_channel_update(daemon, rstate, scid, pending->updates[0],
|
2023-07-06 09:35:54 +02:00
|
|
|
pending->update_source_peer[0]);
|
2019-10-08 03:13:24 +02:00
|
|
|
process_pending_channel_update(daemon, rstate, scid, pending->updates[1],
|
2023-07-06 09:35:54 +02:00
|
|
|
pending->update_source_peer[1]);
|
2019-09-22 03:29:01 +02:00
|
|
|
}
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-01-04 12:40:58 +01:00
|
|
|
tal_free(pending);
|
2019-06-12 01:27:07 +02:00
|
|
|
return true;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
|
|
|
|
2018-03-02 09:59:13 +01:00
|
|
|
static void update_pending(struct pending_cannouncement *pending,
|
2018-02-27 20:58:20 +01:00
|
|
|
u32 timestamp, const u8 *update,
|
2019-10-15 01:59:44 +02:00
|
|
|
const u8 direction,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer TAKES)
|
2018-01-04 12:40:58 +01:00
|
|
|
{
|
2019-01-15 05:11:27 +01:00
|
|
|
SUPERVERBOSE("Deferring update for pending channel %s/%d",
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-03-02 09:59:13 +01:00
|
|
|
&pending->short_channel_id), direction);
|
2018-01-04 12:40:58 +01:00
|
|
|
|
2018-05-21 06:36:55 +02:00
|
|
|
if (pending->update_timestamps[direction] < timestamp) {
|
2018-02-02 14:55:54 +01:00
|
|
|
if (pending->updates[direction]) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Replacing existing update");
|
2018-02-02 14:55:54 +01:00
|
|
|
tal_free(pending->updates[direction]);
|
|
|
|
}
|
2020-02-27 03:17:01 +01:00
|
|
|
pending->updates[direction]
|
|
|
|
= tal_dup_talarr(pending, u8, update);
|
2018-02-02 14:55:54 +01:00
|
|
|
pending->update_timestamps[direction] = timestamp;
|
2023-07-06 09:35:54 +02:00
|
|
|
tal_free(pending->update_source_peer[direction]);
|
|
|
|
pending->update_source_peer[direction]
|
|
|
|
= tal_dup_or_null(pending, struct node_id, source_peer);
|
|
|
|
} else {
|
|
|
|
/* Don't leak if we don't update! */
|
|
|
|
if (taken(source_peer))
|
|
|
|
tal_free(source_peer);
|
2018-01-12 18:55:29 +01:00
|
|
|
}
|
2018-01-04 12:40:58 +01:00
|
|
|
}
|
|
|
|
|
2022-10-18 00:12:23 +02:00
|
|
|
static void delete_spam_update(struct routing_state *rstate,
|
|
|
|
struct half_chan *hc,
|
|
|
|
bool update_is_public)
|
|
|
|
{
|
|
|
|
/* Spam updates will have a unique rgraph index */
|
|
|
|
if (hc->rgraph.index == hc->bcast.index)
|
|
|
|
return;
|
|
|
|
gossip_store_delete(rstate->gs, &hc->rgraph,
|
|
|
|
update_is_public
|
|
|
|
? WIRE_CHANNEL_UPDATE
|
|
|
|
: WIRE_GOSSIP_STORE_PRIVATE_UPDATE);
|
|
|
|
hc->rgraph.index = hc->bcast.index;
|
|
|
|
hc->rgraph.timestamp = hc->bcast.timestamp;
|
|
|
|
}
|
|
|
|
|
2018-04-11 01:03:35 +02:00
|
|
|
bool routing_add_channel_update(struct routing_state *rstate,
|
2019-04-10 09:31:29 +02:00
|
|
|
const u8 *update TAKES,
|
2019-10-08 03:13:24 +02:00
|
|
|
u32 index,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer,
|
2022-07-11 00:13:09 +02:00
|
|
|
bool ignore_timestamp,
|
2023-02-28 20:04:00 +01:00
|
|
|
bool force_spam_flag,
|
|
|
|
bool force_zombie_flag)
|
2018-03-22 15:11:24 +01:00
|
|
|
{
|
|
|
|
secp256k1_ecdsa_signature signature;
|
|
|
|
struct short_channel_id short_channel_id;
|
|
|
|
u32 timestamp;
|
2018-09-20 02:59:46 +02:00
|
|
|
u8 message_flags, channel_flags;
|
2018-03-22 15:11:24 +01:00
|
|
|
u16 expiry;
|
2019-02-21 04:45:55 +01:00
|
|
|
struct amount_msat htlc_minimum, htlc_maximum;
|
2018-03-22 15:11:24 +01:00
|
|
|
u32 fee_base_msat;
|
|
|
|
u32 fee_proportional_millionths;
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
struct chan *chan;
|
2019-04-10 09:31:29 +02:00
|
|
|
struct half_chan *hc;
|
2019-04-11 07:15:13 +02:00
|
|
|
struct unupdated_channel *uc;
|
2018-03-22 15:11:24 +01:00
|
|
|
u8 direction;
|
2019-04-11 07:15:13 +02:00
|
|
|
struct amount_sat sat;
|
2022-05-04 15:48:59 +02:00
|
|
|
bool spam;
|
2022-12-16 19:38:23 +01:00
|
|
|
bool zombie;
|
2018-03-22 15:11:24 +01:00
|
|
|
|
2019-04-10 09:31:18 +02:00
|
|
|
/* Make sure we own msg, even if we don't save it. */
|
|
|
|
if (taken(update))
|
|
|
|
tal_steal(tmpctx, update);
|
|
|
|
|
2022-09-14 05:50:31 +02:00
|
|
|
if (!fromwire_channel_update(
|
2018-10-04 22:34:53 +02:00
|
|
|
update, &signature, &chain_hash,
|
|
|
|
&short_channel_id, ×tamp,
|
|
|
|
&message_flags, &channel_flags,
|
2019-02-21 04:45:55 +01:00
|
|
|
&expiry, &htlc_minimum, &fee_base_msat,
|
2018-10-04 22:34:53 +02:00
|
|
|
&fee_proportional_millionths,
|
2019-02-21 04:45:55 +01:00
|
|
|
&htlc_maximum))
|
2018-04-11 01:03:35 +02:00
|
|
|
return false;
|
2019-04-10 09:31:29 +02:00
|
|
|
|
|
|
|
direction = channel_flags & 0x1;
|
2018-03-22 15:11:24 +01:00
|
|
|
chan = get_channel(rstate, &short_channel_id);
|
2019-04-11 07:15:13 +02:00
|
|
|
|
|
|
|
if (chan) {
|
|
|
|
uc = NULL;
|
|
|
|
sat = chan->sat;
|
2022-12-16 19:38:23 +01:00
|
|
|
zombie = is_chan_zombie(chan);
|
2019-04-11 07:15:13 +02:00
|
|
|
} else {
|
|
|
|
/* Maybe announcement was waiting for this update? */
|
|
|
|
uc = get_unupdated_channel(rstate, &short_channel_id);
|
|
|
|
if (!uc) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
sat = uc->sat;
|
2023-02-28 20:04:00 +01:00
|
|
|
/* When loading zombies from the store. */
|
|
|
|
zombie = force_zombie_flag;
|
2019-04-11 07:15:13 +02:00
|
|
|
}
|
2018-04-11 01:03:35 +02:00
|
|
|
|
2022-09-14 05:50:31 +02:00
|
|
|
/* Reject update if the `htlc_maximum_msat` is greater
|
|
|
|
* than the total available channel satoshis */
|
|
|
|
if (amount_msat_greater_sat(htlc_maximum, sat))
|
|
|
|
return false;
|
2018-10-05 21:49:09 +02:00
|
|
|
|
2019-09-18 03:04:56 +02:00
|
|
|
/* Check timestamp is sane (unless from store). */
|
|
|
|
if (!index && !timestamp_reasonable(rstate, timestamp)) {
|
2020-08-10 15:19:46 +02:00
|
|
|
SUPERVERBOSE("Ignoring update timestamp %u for %s/%u",
|
|
|
|
timestamp,
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&short_channel_id),
|
|
|
|
direction);
|
2019-09-18 03:04:56 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-11 07:15:13 +02:00
|
|
|
/* OK, we're going to accept this, so create chan if doesn't exist */
|
|
|
|
if (uc) {
|
|
|
|
assert(!chan);
|
|
|
|
chan = new_chan(rstate, &short_channel_id,
|
2021-06-14 23:07:39 +02:00
|
|
|
&uc->id[0], &uc->id[1], sat);
|
2023-02-28 20:04:00 +01:00
|
|
|
/* Assign zombie flag if loading zombie from store */
|
|
|
|
if (force_zombie_flag)
|
|
|
|
chan->half[direction].zombie = true;
|
2019-04-11 07:15:13 +02:00
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
/* Discard older updates */
|
|
|
|
hc = &chan->half[direction];
|
2019-06-03 20:24:25 +02:00
|
|
|
|
2021-01-29 01:00:09 +01:00
|
|
|
if (is_halfchan_defined(hc) && !ignore_timestamp) {
|
2022-07-11 00:13:09 +02:00
|
|
|
/* The gossip_store should contain a single broadcastable entry
|
|
|
|
* and potentially one rate-limited entry. Any more is a bug */
|
|
|
|
if (index){
|
|
|
|
if (!force_spam_flag){
|
|
|
|
status_broken("gossip_store broadcastable "
|
|
|
|
"channel_update %u replaces %u!",
|
|
|
|
index, hc->bcast.index);
|
|
|
|
return false;
|
|
|
|
} else if (hc->bcast.index != hc->rgraph.index){
|
|
|
|
status_broken("gossip_store rate-limited "
|
|
|
|
"channel_update %u replaces %u!",
|
2022-07-13 00:31:43 +02:00
|
|
|
index, hc->rgraph.index);
|
2022-07-11 00:13:09 +02:00
|
|
|
return false;
|
|
|
|
}
|
2019-09-16 12:44:00 +02:00
|
|
|
}
|
2019-06-03 20:24:25 +02:00
|
|
|
|
2022-05-07 01:24:18 +02:00
|
|
|
if (timestamp <= hc->rgraph.timestamp) {
|
2019-09-16 12:44:00 +02:00
|
|
|
SUPERVERBOSE("Ignoring outdated update.");
|
|
|
|
/* Ignoring != failing */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allow redundant updates once every 7 days */
|
2019-09-26 04:00:20 +02:00
|
|
|
if (timestamp < hc->bcast.timestamp + GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip_prune) / 2
|
2019-09-16 12:44:00 +02:00
|
|
|
&& !cupdate_different(rstate->gs, hc, update)) {
|
2020-08-10 15:19:46 +02:00
|
|
|
SUPERVERBOSE("Ignoring redundant update for %s/%u"
|
|
|
|
" (last %u, now %u)",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&short_channel_id),
|
|
|
|
direction, hc->bcast.timestamp, timestamp);
|
2019-09-16 12:44:00 +02:00
|
|
|
/* Ignoring != failing */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-10-18 00:12:23 +02:00
|
|
|
/* Make sure it's not spamming us (private channel
|
|
|
|
* updates are never considered spam) */
|
|
|
|
if (is_chan_public(chan)
|
|
|
|
&& !ratelimit(rstate,
|
|
|
|
&hc->tokens, hc->bcast.timestamp, timestamp)) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2022-05-04 15:48:59 +02:00
|
|
|
"Spammy update for %s/%u flagged"
|
2019-11-18 01:26:17 +01:00
|
|
|
" (last %u, now %u)",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&short_channel_id),
|
|
|
|
direction,
|
|
|
|
hc->bcast.timestamp, timestamp);
|
2022-05-04 15:48:59 +02:00
|
|
|
spam = true;
|
|
|
|
} else {
|
|
|
|
spam = false;
|
2019-09-16 12:44:00 +02:00
|
|
|
}
|
2022-05-04 15:48:59 +02:00
|
|
|
} else {
|
|
|
|
spam = false;
|
2019-04-10 09:31:29 +02:00
|
|
|
}
|
2022-07-11 00:13:09 +02:00
|
|
|
if (force_spam_flag)
|
|
|
|
spam = true;
|
2022-10-18 00:12:23 +02:00
|
|
|
|
|
|
|
/* Delete any prior entries (noop if they don't exist) */
|
|
|
|
delete_spam_update(rstate, hc, is_chan_public(chan));
|
|
|
|
if (!spam)
|
2022-05-07 01:24:18 +02:00
|
|
|
gossip_store_delete(rstate->gs, &hc->bcast,
|
|
|
|
is_chan_public(chan)
|
|
|
|
? WIRE_CHANNEL_UPDATE
|
|
|
|
: WIRE_GOSSIP_STORE_PRIVATE_UPDATE);
|
2022-10-18 00:12:23 +02:00
|
|
|
|
|
|
|
/* Update timestamp(s) */
|
|
|
|
hc->rgraph.timestamp = timestamp;
|
|
|
|
if (!spam)
|
|
|
|
hc->bcast.timestamp = timestamp;
|
2019-04-10 09:31:29 +02:00
|
|
|
|
2019-04-11 07:15:13 +02:00
|
|
|
/* BOLT #7:
|
|
|
|
* - MUST consider the `timestamp` of the `channel_announcement` to be
|
|
|
|
* the `timestamp` of a corresponding `channel_update`.
|
|
|
|
* - MUST consider whether to send the `channel_announcement` after
|
|
|
|
* receiving the first corresponding `channel_update`.
|
|
|
|
*/
|
|
|
|
if (uc) {
|
2019-04-11 07:15:22 +02:00
|
|
|
add_channel_announce_to_broadcast(rstate, chan,
|
|
|
|
uc->channel_announce,
|
|
|
|
timestamp,
|
2019-04-11 07:15:13 +02:00
|
|
|
uc->index);
|
2019-04-11 07:15:22 +02:00
|
|
|
} else if (!is_chan_public(chan)) {
|
2019-04-11 07:15:13 +02:00
|
|
|
/* For private channels, we get updates without an announce: don't
|
|
|
|
* broadcast them! But save local ones to store anyway. */
|
2021-12-29 04:26:43 +01:00
|
|
|
assert(local_direction(rstate, chan, NULL));
|
2019-06-03 20:23:25 +02:00
|
|
|
/* Don't save if we're loading from store */
|
2019-04-11 07:15:22 +02:00
|
|
|
if (!index) {
|
2019-06-03 20:05:25 +02:00
|
|
|
hc->bcast.index
|
2019-06-03 20:22:25 +02:00
|
|
|
= gossip_store_add_private_update(rstate->gs,
|
2019-06-03 20:05:25 +02:00
|
|
|
update);
|
2022-05-07 01:24:18 +02:00
|
|
|
/* No need to separately track spam for private
|
|
|
|
* channels. */
|
|
|
|
hc->rgraph.index = hc->bcast.index;
|
|
|
|
} else {
|
2019-04-11 07:15:13 +02:00
|
|
|
hc->bcast.index = index;
|
2022-05-07 01:24:18 +02:00
|
|
|
hc->rgraph.index = index;
|
|
|
|
}
|
2018-05-10 09:50:03 +02:00
|
|
|
return true;
|
2019-04-08 01:52:19 +02:00
|
|
|
}
|
2018-05-10 09:50:03 +02:00
|
|
|
|
2022-12-16 19:38:23 +01:00
|
|
|
/* Handle resurrection of zombie channels if the other side of the
|
|
|
|
* zombie channel has a recent timestamp. */
|
|
|
|
if (zombie && timestamp_reasonable(rstate,
|
2023-02-10 00:52:07 +01:00
|
|
|
chan->half[!direction].bcast.timestamp) &&
|
2023-02-28 19:55:45 +01:00
|
|
|
chan->half[!direction].bcast.index && !index) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2022-12-16 19:38:23 +01:00
|
|
|
"Resurrecting zombie channel %s.",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&chan->scid));
|
|
|
|
const u8 *zombie_announcement = NULL;
|
|
|
|
const u8 *zombie_addendum = NULL;
|
|
|
|
const u8 *zombie_update[2] = {NULL, NULL};
|
|
|
|
/* Resurrection is a careful process. First delete the zombie-
|
|
|
|
* flagged channel_announcement which has already been
|
|
|
|
* tombstoned, and re-add to the store without zombie flag. */
|
|
|
|
zombie_announcement = gossip_store_get(tmpctx, rstate->gs,
|
|
|
|
chan->bcast.index);
|
|
|
|
u32 offset = tal_count(zombie_announcement) +
|
|
|
|
sizeof(struct gossip_hdr);
|
|
|
|
/* The channel_announcement addendum reminds us of its size. */
|
|
|
|
zombie_addendum = gossip_store_get(tmpctx, rstate->gs,
|
|
|
|
chan->bcast.index + offset);
|
|
|
|
gossip_store_delete(rstate->gs, &chan->bcast,
|
|
|
|
is_chan_public(chan)
|
|
|
|
? WIRE_CHANNEL_ANNOUNCEMENT
|
|
|
|
: WIRE_GOSSIP_STORE_PRIVATE_CHANNEL);
|
|
|
|
chan->bcast.index =
|
|
|
|
gossip_store_add(rstate->gs, zombie_announcement,
|
|
|
|
chan->bcast.timestamp,
|
|
|
|
false, false, zombie_addendum);
|
|
|
|
/* Deletion of the old addendum is optional. */
|
|
|
|
/* This opposing channel_update has been stashed away. Now that
|
|
|
|
* there are two valid updates, this one gets restored. */
|
|
|
|
/* FIXME: Handle spam case probably needs a helper f'n */
|
|
|
|
zombie_update[0] = gossip_store_get(tmpctx, rstate->gs,
|
|
|
|
chan->half[!direction].bcast.index);
|
2023-03-03 15:30:40 +01:00
|
|
|
if (chan->half[!direction].bcast.index != chan->half[!direction].rgraph.index) {
|
2022-12-16 19:38:23 +01:00
|
|
|
/* Don't forget the spam channel_update */
|
|
|
|
zombie_update[1] = gossip_store_get(tmpctx, rstate->gs,
|
|
|
|
chan->half[!direction].rgraph.index);
|
2023-03-03 15:30:40 +01:00
|
|
|
gossip_store_delete(rstate->gs, &chan->half[!direction].rgraph,
|
|
|
|
is_chan_public(chan)
|
|
|
|
? WIRE_CHANNEL_UPDATE
|
|
|
|
: WIRE_GOSSIP_STORE_PRIVATE_UPDATE);
|
|
|
|
}
|
2022-12-16 19:38:23 +01:00
|
|
|
gossip_store_delete(rstate->gs, &chan->half[!direction].bcast,
|
|
|
|
is_chan_public(chan)
|
|
|
|
? WIRE_CHANNEL_UPDATE
|
|
|
|
: WIRE_GOSSIP_STORE_PRIVATE_UPDATE);
|
|
|
|
chan->half[!direction].bcast.index =
|
|
|
|
gossip_store_add(rstate->gs, zombie_update[0],
|
|
|
|
chan->half[!direction].bcast.timestamp,
|
|
|
|
false, false, NULL);
|
|
|
|
if (zombie_update[1])
|
|
|
|
chan->half[!direction].rgraph.index =
|
|
|
|
gossip_store_add(rstate->gs, zombie_update[1],
|
|
|
|
chan->half[!direction].rgraph.timestamp,
|
|
|
|
false, true, NULL);
|
|
|
|
else
|
|
|
|
chan->half[!direction].rgraph.index = chan->half[!direction].bcast.index;
|
|
|
|
|
|
|
|
/* It's a miracle! */
|
|
|
|
chan->half[0].zombie = false;
|
|
|
|
chan->half[1].zombie = false;
|
|
|
|
zombie = false;
|
|
|
|
}
|
|
|
|
|
2019-04-11 07:15:13 +02:00
|
|
|
/* If we're loading from store, this means we don't re-add to store. */
|
2022-05-07 01:24:18 +02:00
|
|
|
if (index) {
|
|
|
|
if (!spam)
|
|
|
|
hc->bcast.index = index;
|
|
|
|
hc->rgraph.index = index;
|
|
|
|
} else {
|
|
|
|
hc->rgraph.index
|
|
|
|
= gossip_store_add(rstate->gs, update, timestamp,
|
2022-12-16 19:38:23 +01:00
|
|
|
zombie, spam, NULL);
|
2019-10-08 03:30:24 +02:00
|
|
|
if (hc->bcast.timestamp > rstate->last_timestamp
|
|
|
|
&& hc->bcast.timestamp < time_now().ts.tv_sec)
|
|
|
|
rstate->last_timestamp = hc->bcast.timestamp;
|
2022-05-07 01:24:18 +02:00
|
|
|
if (!spam)
|
|
|
|
hc->bcast.index = hc->rgraph.index;
|
2019-10-08 03:30:24 +02:00
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
peer_supplied_good_gossip(rstate->daemon, source_peer, 1);
|
2019-10-08 03:13:24 +02:00
|
|
|
}
|
2019-04-11 07:15:13 +02:00
|
|
|
|
|
|
|
if (uc) {
|
|
|
|
/* If we were waiting for these nodes to appear (or gain a
|
|
|
|
public channel), process node_announcements now */
|
|
|
|
process_pending_node_announcement(rstate, &chan->nodes[0]->id);
|
|
|
|
process_pending_node_announcement(rstate, &chan->nodes[1]->id);
|
|
|
|
tal_free(uc);
|
|
|
|
}
|
2020-08-10 15:19:46 +02:00
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2021-01-29 01:00:09 +01:00
|
|
|
"Received %schannel_update for channel %s/%d now %s",
|
|
|
|
ignore_timestamp ? "(forced) " : "",
|
2020-08-10 15:19:46 +02:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&short_channel_id),
|
|
|
|
channel_flags & 0x01,
|
|
|
|
channel_flags & ROUTING_FLAGS_DISABLED ? "DISABLED" : "ACTIVE");
|
|
|
|
|
2018-04-11 01:03:35 +02:00
|
|
|
return true;
|
2018-03-22 15:11:24 +01:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:28:24 +02:00
|
|
|
bool would_ratelimit_cupdate(struct routing_state *rstate,
|
|
|
|
const struct half_chan *hc,
|
|
|
|
u32 timestamp)
|
|
|
|
{
|
|
|
|
return update_tokens(rstate, hc->tokens, hc->bcast.timestamp, timestamp)
|
|
|
|
>= TOKENS_PER_MSG;
|
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
static const struct node_id *get_channel_owner(struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid,
|
|
|
|
int direction)
|
|
|
|
{
|
|
|
|
struct chan *chan = get_channel(rstate, scid);
|
2019-04-11 07:15:13 +02:00
|
|
|
struct unupdated_channel *uc;
|
2019-04-10 09:31:29 +02:00
|
|
|
|
|
|
|
if (chan)
|
|
|
|
return &chan->nodes[direction]->id;
|
2019-04-11 07:15:13 +02:00
|
|
|
|
|
|
|
/* Might be unupdated channel */
|
|
|
|
uc = get_unupdated_channel(rstate, scid);
|
|
|
|
if (uc)
|
|
|
|
return &uc->id[direction];
|
2019-04-10 09:31:29 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-09-25 07:43:56 +02:00
|
|
|
u8 *handle_channel_update(struct routing_state *rstate, const u8 *update TAKES,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer,
|
2021-01-29 01:00:09 +01:00
|
|
|
struct short_channel_id *unknown_scid,
|
|
|
|
bool force)
|
2017-02-01 15:09:26 +01:00
|
|
|
{
|
|
|
|
u8 *serialized;
|
2019-04-10 09:31:29 +02:00
|
|
|
const struct node_id *owner;
|
2017-02-01 15:09:26 +01:00
|
|
|
secp256k1_ecdsa_signature signature;
|
2017-03-02 13:21:49 +01:00
|
|
|
struct short_channel_id short_channel_id;
|
2017-02-01 15:09:26 +01:00
|
|
|
u32 timestamp;
|
2018-09-20 02:59:46 +02:00
|
|
|
u8 message_flags, channel_flags;
|
2017-02-01 15:09:26 +01:00
|
|
|
u16 expiry;
|
2022-09-14 05:50:31 +02:00
|
|
|
struct amount_msat htlc_minimum, htlc_maximum;
|
2017-02-01 15:09:26 +01:00
|
|
|
u32 fee_base_msat;
|
|
|
|
u32 fee_proportional_millionths;
|
2017-12-18 07:44:10 +01:00
|
|
|
struct bitcoin_blkid chain_hash;
|
2018-01-12 18:55:29 +01:00
|
|
|
u8 direction;
|
2019-04-10 09:31:29 +02:00
|
|
|
struct pending_cannouncement *pending;
|
2022-03-31 11:10:50 +02:00
|
|
|
u8 *warn;
|
2017-02-01 15:09:26 +01:00
|
|
|
|
2022-01-22 05:49:33 +01:00
|
|
|
serialized = tal_dup_talarr(tmpctx, u8, update);
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_channel_update(serialized, &signature,
|
2017-08-22 07:25:01 +02:00
|
|
|
&chain_hash, &short_channel_id,
|
2018-09-20 02:59:46 +02:00
|
|
|
×tamp, &message_flags,
|
|
|
|
&channel_flags, &expiry,
|
2019-02-21 04:45:55 +01:00
|
|
|
&htlc_minimum, &fee_base_msat,
|
2022-09-14 05:50:31 +02:00
|
|
|
&fee_proportional_millionths,
|
|
|
|
&htlc_maximum)) {
|
2023-01-13 14:49:43 +01:00
|
|
|
/* FIXME: We removed a warning about the
|
|
|
|
* channel_update being malformed since the warning
|
|
|
|
* could cause lnd to disconnect (seems they treat
|
|
|
|
* channel-unrelated warnings as fatal?). This was
|
|
|
|
* caused by lnd not enforcing the `htlc_maximum`,
|
|
|
|
* thus the parsing would fail. We can re-add the
|
|
|
|
* warning once our assumption that `htlc_maximum`
|
|
|
|
* being set is valid. */
|
|
|
|
return NULL;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
2018-09-20 02:59:46 +02:00
|
|
|
direction = channel_flags & 0x1;
|
2017-02-01 15:09:26 +01:00
|
|
|
|
2017-08-22 07:25:01 +02:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* The receiving node:
|
2018-06-17 12:13:44 +02:00
|
|
|
*...
|
|
|
|
* - if the specified `chain_hash` value is unknown (meaning it isn't
|
|
|
|
* active on the specified chain):
|
|
|
|
* - MUST ignore the channel update.
|
|
|
|
*/
|
2019-11-20 02:54:47 +01:00
|
|
|
if (!bitcoin_blkid_eq(&chain_hash, &chainparams->genesis_blockhash)) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Received channel_update for unknown chain %s",
|
|
|
|
type_to_string(tmpctx, struct bitcoin_blkid,
|
|
|
|
&chain_hash));
|
2018-03-08 05:10:33 +01:00
|
|
|
return NULL;
|
2017-08-22 07:25:01 +02:00
|
|
|
}
|
2017-02-01 15:09:26 +01:00
|
|
|
|
2019-03-27 15:40:25 +01:00
|
|
|
/* If we dropped the matching announcement for this channel due to the
|
|
|
|
* txout query failing, don't report failure, it's just too noisy on
|
|
|
|
* mainnet */
|
2019-09-27 02:04:34 +02:00
|
|
|
if (in_txout_failures(rstate, &short_channel_id))
|
2019-03-27 15:40:25 +01:00
|
|
|
return NULL;
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
/* If we have an unvalidated channel, just queue on that */
|
|
|
|
pending = find_pending_cannouncement(rstate, &short_channel_id);
|
|
|
|
if (pending) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2019-11-18 01:26:17 +01:00
|
|
|
"Updated pending announce with update %s/%u",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&short_channel_id),
|
|
|
|
direction);
|
2023-07-06 09:35:54 +02:00
|
|
|
update_pending(pending, timestamp, serialized, direction, source_peer);
|
2018-06-04 06:25:25 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
owner = get_channel_owner(rstate, &short_channel_id, direction);
|
|
|
|
if (!owner) {
|
2019-06-12 01:27:07 +02:00
|
|
|
if (unknown_scid)
|
|
|
|
*unknown_scid = short_channel_id;
|
2019-04-10 09:31:29 +02:00
|
|
|
bad_gossip_order(serialized,
|
2023-07-06 09:35:54 +02:00
|
|
|
source_peer,
|
2019-04-10 09:31:29 +02:00
|
|
|
tal_fmt(tmpctx, "%s/%u",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct short_channel_id,
|
|
|
|
&short_channel_id),
|
|
|
|
direction));
|
2018-03-08 05:10:33 +01:00
|
|
|
return NULL;
|
2018-02-27 20:58:20 +01:00
|
|
|
}
|
|
|
|
|
2022-03-31 11:10:50 +02:00
|
|
|
warn = check_channel_update(rstate, owner, &signature, serialized);
|
|
|
|
if (warn) {
|
2018-03-08 05:10:33 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if `signature` is not a valid signature, using `node_id`
|
|
|
|
* of the double-SHA256 of the entire message following the
|
|
|
|
* `signature` field (including unknown fields following
|
|
|
|
* `fee_proportional_millionths`):
|
2022-03-31 11:10:50 +02:00
|
|
|
* - SHOULD send a `warning` and close the connection.
|
2018-03-08 05:10:33 +01:00
|
|
|
* - MUST NOT process the message further.
|
|
|
|
*/
|
2022-03-31 11:10:50 +02:00
|
|
|
return warn;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
routing_add_channel_update(rstate, take(serialized), 0, source_peer, force,
|
2023-02-28 20:04:00 +01:00
|
|
|
false, false);
|
2018-03-08 05:10:33 +01:00
|
|
|
return NULL;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
|
|
|
|
2019-04-10 09:31:29 +02:00
|
|
|
bool routing_add_node_announcement(struct routing_state *rstate,
|
|
|
|
const u8 *msg TAKES,
|
2019-10-08 03:13:24 +02:00
|
|
|
u32 index,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer TAKES,
|
2022-07-11 00:13:09 +02:00
|
|
|
bool *was_unknown,
|
|
|
|
bool force_spam_flag)
|
2018-03-22 15:11:24 +01:00
|
|
|
{
|
2018-04-20 10:09:50 +02:00
|
|
|
struct node *node;
|
|
|
|
secp256k1_ecdsa_signature signature;
|
|
|
|
u32 timestamp;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id node_id;
|
2018-04-20 10:09:50 +02:00
|
|
|
u8 rgb_color[3];
|
|
|
|
u8 alias[32];
|
|
|
|
u8 *features, *addresses;
|
2021-06-10 19:54:08 +02:00
|
|
|
struct tlv_node_ann_tlvs *na_tlv;
|
2022-05-04 15:48:59 +02:00
|
|
|
bool spam;
|
2018-05-17 07:08:11 +02:00
|
|
|
|
2019-10-08 03:32:24 +02:00
|
|
|
if (was_unknown)
|
|
|
|
*was_unknown = false;
|
|
|
|
|
2019-04-10 09:31:18 +02:00
|
|
|
/* Make sure we own msg, even if we don't save it. */
|
|
|
|
if (taken(msg))
|
|
|
|
tal_steal(tmpctx, msg);
|
|
|
|
|
2019-04-08 11:58:44 +02:00
|
|
|
/* Note: validity of node_id is already checked. */
|
2018-05-17 07:08:11 +02:00
|
|
|
if (!fromwire_node_announcement(tmpctx, msg,
|
|
|
|
&signature, &features, ×tamp,
|
2019-04-08 11:58:44 +02:00
|
|
|
&node_id, rgb_color, alias,
|
2021-06-10 19:54:08 +02:00
|
|
|
&addresses,
|
2022-03-23 00:31:14 +01:00
|
|
|
&na_tlv)) {
|
2018-05-17 07:08:11 +02:00
|
|
|
return false;
|
2019-04-11 07:15:22 +02:00
|
|
|
}
|
2018-04-20 10:09:50 +02:00
|
|
|
|
|
|
|
node = get_node(rstate, &node_id);
|
|
|
|
|
2023-02-15 05:33:58 +01:00
|
|
|
if (node == NULL || !node_has_broadcastable_channels(node)) {
|
2019-05-29 06:35:43 +02:00
|
|
|
struct pending_node_announce *pna;
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if `node_id` is NOT previously known from a
|
|
|
|
* `channel_announcement` message, OR if `timestamp` is NOT
|
|
|
|
* greater than the last-received `node_announcement` from
|
|
|
|
* this `node_id`:
|
|
|
|
* - SHOULD ignore the message.
|
|
|
|
*/
|
|
|
|
/* Check if we are currently verifying the txout for a
|
|
|
|
* matching channel */
|
|
|
|
pna = pending_node_map_get(rstate->pending_node_map,
|
|
|
|
&node_id);
|
|
|
|
if (!pna) {
|
2019-10-08 03:32:24 +02:00
|
|
|
if (was_unknown)
|
|
|
|
*was_unknown = true;
|
2023-02-15 05:33:35 +01:00
|
|
|
/* Don't complain if it's a zombie node! */
|
|
|
|
if (!node || !is_node_zombie(node)) {
|
2023-07-06 09:35:54 +02:00
|
|
|
bad_gossip_order(msg, source_peer,
|
2023-02-15 05:33:35 +01:00
|
|
|
type_to_string(tmpctx, struct node_id,
|
|
|
|
&node_id));
|
|
|
|
}
|
2019-05-29 06:35:43 +02:00
|
|
|
return false;
|
|
|
|
} else if (timestamp <= pna->timestamp)
|
2019-06-20 04:56:52 +02:00
|
|
|
/* Ignore old ones: they're OK (unless from store). */
|
|
|
|
return index == 0;
|
2019-04-11 07:15:22 +02:00
|
|
|
|
2019-05-29 06:35:43 +02:00
|
|
|
SUPERVERBOSE("Deferring node_announcement for node %s",
|
|
|
|
type_to_string(tmpctx, struct node_id, &node_id));
|
|
|
|
pna->timestamp = timestamp;
|
|
|
|
pna->index = index;
|
|
|
|
tal_free(pna->node_announcement);
|
2023-07-06 09:35:54 +02:00
|
|
|
tal_free(pna->source_peer);
|
2020-02-27 03:17:01 +01:00
|
|
|
pna->node_announcement = tal_dup_talarr(pna, u8, msg);
|
2023-07-06 09:35:54 +02:00
|
|
|
pna->source_peer = tal_dup_or_null(pna, struct node_id, source_peer);
|
2019-05-29 06:35:43 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-16 12:44:00 +02:00
|
|
|
if (node->bcast.index) {
|
2021-05-28 22:12:41 +02:00
|
|
|
bool only_tlv_diff;
|
2022-04-19 23:54:02 +02:00
|
|
|
u32 redundant_time;
|
2021-05-28 22:12:41 +02:00
|
|
|
|
2022-07-11 00:13:09 +02:00
|
|
|
/* The gossip_store should contain a single broadcastable entry
|
|
|
|
* and potentially one rate-limited entry. Any more is a bug */
|
|
|
|
if (index){
|
|
|
|
if (!force_spam_flag){
|
|
|
|
status_broken("gossip_store broadcastable "
|
|
|
|
"node_announcement %u replaces %u!",
|
|
|
|
index, node->bcast.index);
|
|
|
|
return false;
|
|
|
|
} else if (node->bcast.index != node->rgraph.index){
|
|
|
|
status_broken("gossip_store rate-limited "
|
|
|
|
"node_announcement %u replaces %u!",
|
2022-07-13 00:31:43 +02:00
|
|
|
index, node->rgraph.index);
|
2022-07-11 00:13:09 +02:00
|
|
|
return false;
|
|
|
|
}
|
2019-09-16 12:44:00 +02:00
|
|
|
}
|
|
|
|
|
2022-05-07 01:24:18 +02:00
|
|
|
if (node->rgraph.timestamp >= timestamp) {
|
2019-09-16 12:44:00 +02:00
|
|
|
SUPERVERBOSE("Ignoring node announcement, it's outdated.");
|
|
|
|
/* OK unless we're loading from store */
|
|
|
|
return index == 0;
|
|
|
|
}
|
|
|
|
|
2022-04-19 23:54:02 +02:00
|
|
|
/* Allow redundant updates once a day (faster in dev-fast-gossip-prune mode) */
|
|
|
|
redundant_time = GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip_prune) / 14;
|
|
|
|
if (timestamp < node->bcast.timestamp + redundant_time
|
2021-05-28 22:12:41 +02:00
|
|
|
&& !nannounce_different(rstate->gs, node, msg,
|
|
|
|
&only_tlv_diff)) {
|
2020-08-10 15:19:46 +02:00
|
|
|
SUPERVERBOSE(
|
|
|
|
"Ignoring redundant nannounce for %s"
|
|
|
|
" (last %u, now %u)",
|
|
|
|
type_to_string(tmpctx, struct node_id, &node_id),
|
|
|
|
node->bcast.timestamp, timestamp);
|
2019-09-16 12:44:00 +02:00
|
|
|
/* Ignoring != failing */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure it's not spamming us. */
|
2019-09-18 03:05:10 +02:00
|
|
|
if (!ratelimit(rstate,
|
|
|
|
&node->tokens, node->bcast.timestamp, timestamp)) {
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2022-05-04 15:48:59 +02:00
|
|
|
"Spammy nannounce for %s flagged"
|
2019-11-18 01:26:17 +01:00
|
|
|
" (last %u, now %u)",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct node_id,
|
|
|
|
&node_id),
|
|
|
|
node->bcast.timestamp, timestamp);
|
2022-05-04 15:48:59 +02:00
|
|
|
spam = true;
|
|
|
|
} else {
|
|
|
|
spam = false;
|
2019-09-16 12:44:00 +02:00
|
|
|
}
|
2022-05-04 15:48:59 +02:00
|
|
|
} else {
|
|
|
|
spam = false;
|
2019-05-29 06:35:43 +02:00
|
|
|
}
|
2022-07-11 00:13:09 +02:00
|
|
|
if (force_spam_flag)
|
|
|
|
spam = true;
|
2018-04-20 10:09:50 +02:00
|
|
|
|
2022-05-07 01:24:18 +02:00
|
|
|
/* Routing graph always references the latest message. */
|
|
|
|
node->rgraph.timestamp = timestamp;
|
|
|
|
if (!spam) {
|
|
|
|
node->bcast.timestamp = timestamp;
|
|
|
|
/* remove prior spam update if one exists */
|
|
|
|
if (node->rgraph.index != node->bcast.index) {
|
|
|
|
gossip_store_delete(rstate->gs, &node->rgraph,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
|
|
|
}
|
2022-07-11 00:13:09 +02:00
|
|
|
/* Harmless if it was never added */
|
2022-05-07 01:24:18 +02:00
|
|
|
gossip_store_delete(rstate->gs, &node->bcast,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
|
|
|
/* Remove prior spam update. */
|
|
|
|
} else if (node->rgraph.index != node->bcast.index) {
|
|
|
|
gossip_store_delete(rstate->gs, &node->rgraph,
|
|
|
|
WIRE_NODE_ANNOUNCEMENT);
|
|
|
|
}
|
2019-10-08 03:30:24 +02:00
|
|
|
|
2022-05-07 01:24:18 +02:00
|
|
|
/* Don't add to the store if it was loaded from the store. */
|
|
|
|
if (index) {
|
|
|
|
node->rgraph.index = index;
|
|
|
|
if (!spam)
|
|
|
|
node->bcast.index = index;
|
|
|
|
} else {
|
|
|
|
node->rgraph.index
|
|
|
|
= gossip_store_add(rstate->gs, msg, timestamp,
|
2023-03-01 19:59:44 +01:00
|
|
|
false, spam, NULL);
|
2022-05-07 01:24:18 +02:00
|
|
|
if (node->bcast.timestamp > rstate->last_timestamp
|
|
|
|
&& node->bcast.timestamp < time_now().ts.tv_sec)
|
|
|
|
rstate->last_timestamp = node->bcast.timestamp;
|
|
|
|
if (!spam)
|
|
|
|
node->bcast.index = node->rgraph.index;
|
|
|
|
|
2023-07-06 09:35:54 +02:00
|
|
|
peer_supplied_good_gossip(rstate->daemon, source_peer, 1);
|
2019-10-08 03:13:24 +02:00
|
|
|
}
|
2020-08-10 15:19:46 +02:00
|
|
|
|
|
|
|
/* Only log this if *not* loading from store. */
|
|
|
|
if (!index)
|
2023-07-06 09:35:54 +02:00
|
|
|
status_peer_debug(source_peer,
|
2020-08-10 15:19:46 +02:00
|
|
|
"Received node_announcement for node %s",
|
|
|
|
type_to_string(tmpctx, struct node_id,
|
|
|
|
&node_id));
|
|
|
|
|
2018-06-04 06:16:25 +02:00
|
|
|
return true;
|
2018-04-11 01:03:12 +02:00
|
|
|
}
|
|
|
|
|
2019-10-08 03:13:24 +02:00
|
|
|
u8 *handle_node_announcement(struct routing_state *rstate, const u8 *node_ann,
|
2023-07-06 09:35:54 +02:00
|
|
|
const struct node_id *source_peer TAKES,
|
|
|
|
bool *was_unknown)
|
2017-02-01 15:09:26 +01:00
|
|
|
{
|
|
|
|
u8 *serialized;
|
|
|
|
struct sha256_double hash;
|
|
|
|
secp256k1_ecdsa_signature signature;
|
|
|
|
u32 timestamp;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id node_id;
|
2017-02-01 15:09:26 +01:00
|
|
|
u8 rgb_color[3];
|
|
|
|
u8 alias[32];
|
|
|
|
u8 *features, *addresses;
|
2017-10-23 06:17:38 +02:00
|
|
|
struct wireaddr *wireaddrs;
|
2018-07-28 08:00:16 +02:00
|
|
|
size_t len = tal_count(node_ann);
|
2021-06-10 19:54:08 +02:00
|
|
|
struct tlv_node_ann_tlvs *na_tlv;
|
2017-02-01 15:09:26 +01:00
|
|
|
|
2019-10-08 03:32:24 +02:00
|
|
|
if (was_unknown)
|
|
|
|
*was_unknown = false;
|
|
|
|
|
2017-02-01 15:09:26 +01:00
|
|
|
serialized = tal_dup_arr(tmpctx, u8, node_ann, len, 0);
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fromwire_node_announcement(tmpctx, serialized,
|
2017-08-03 03:45:04 +02:00
|
|
|
&signature, &features, ×tamp,
|
2019-04-08 11:58:44 +02:00
|
|
|
&node_id, rgb_color, alias,
|
2021-06-10 19:54:08 +02:00
|
|
|
&addresses,
|
2022-03-23 00:31:14 +01:00
|
|
|
&na_tlv)) {
|
2018-03-08 05:10:26 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if `node_id` is NOT a valid compressed public key:
|
2022-03-31 11:10:50 +02:00
|
|
|
* - SHOULD send a `warning`.
|
|
|
|
* - MAY close the connection.
|
2018-03-08 05:10:26 +01:00
|
|
|
* - MUST NOT process the message further.
|
|
|
|
*/
|
2023-01-26 15:21:02 +01:00
|
|
|
/* FIXME: We removed a warning about the
|
|
|
|
* node_announcement being malformed since the warning
|
|
|
|
* could cause lnd to disconnect (seems they treat
|
|
|
|
* channel-unrelated warnings as fatal?).
|
|
|
|
*/
|
|
|
|
return NULL;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
sha256_double(&hash, serialized + 66, tal_count(serialized) - 66);
|
2019-04-08 11:58:44 +02:00
|
|
|
/* If node_id is invalid, it fails here */
|
2019-04-08 11:58:32 +02:00
|
|
|
if (!check_signed_hash_nodeid(&hash, &signature, &node_id)) {
|
2018-03-08 05:10:26 +01:00
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2019-01-14 03:22:05 +01:00
|
|
|
* - if `signature` is not a valid signature, using
|
|
|
|
* `node_id` of the double-SHA256 of the entire
|
|
|
|
* message following the `signature` field
|
|
|
|
* (including unknown fields following
|
|
|
|
* `fee_proportional_millionths`):
|
2022-03-31 11:10:50 +02:00
|
|
|
* - SHOULD send a `warning` and close the connection.
|
|
|
|
* - MUST NOT process the message further.
|
2018-03-08 05:10:26 +01:00
|
|
|
*/
|
2022-03-31 11:10:50 +02:00
|
|
|
u8 *warn = towire_warningfmt(rstate, NULL,
|
2021-02-03 03:51:41 +01:00
|
|
|
"Bad signature for %s hash %s"
|
|
|
|
" on node_announcement %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
secp256k1_ecdsa_signature,
|
|
|
|
&signature),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct sha256_double,
|
|
|
|
&hash),
|
|
|
|
tal_hex(tmpctx, node_ann));
|
2022-03-31 11:10:50 +02:00
|
|
|
return warn;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
2018-02-02 19:49:12 +01:00
|
|
|
|
2021-06-14 23:07:38 +02:00
|
|
|
wireaddrs = fromwire_wireaddr_array(tmpctx, addresses);
|
2018-03-08 05:10:29 +01:00
|
|
|
if (!wireaddrs) {
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - if `addrlen` is insufficient to hold the address
|
|
|
|
* descriptors of the known types:
|
2022-03-31 11:10:50 +02:00
|
|
|
* - SHOULD send a `warning`.
|
|
|
|
* - MAY close the connection.
|
2018-03-08 05:10:29 +01:00
|
|
|
*/
|
2022-03-31 11:10:50 +02:00
|
|
|
u8 *warn = towire_warningfmt(rstate, NULL,
|
2021-02-03 03:51:41 +01:00
|
|
|
"Malformed wireaddrs %s in %s.",
|
|
|
|
tal_hex(tmpctx, wireaddrs),
|
|
|
|
tal_hex(tmpctx, node_ann));
|
2022-03-31 11:10:50 +02:00
|
|
|
return warn;
|
2018-03-08 05:10:29 +01:00
|
|
|
}
|
|
|
|
|
2019-05-29 06:35:43 +02:00
|
|
|
/* May still fail, if we don't know the node. */
|
2023-07-06 09:35:54 +02:00
|
|
|
routing_add_node_announcement(rstate, serialized, 0, source_peer, was_unknown, false);
|
2018-03-08 05:10:26 +01:00
|
|
|
return NULL;
|
2017-02-01 15:09:26 +01:00
|
|
|
}
|
2017-03-15 12:44:01 +01:00
|
|
|
|
2018-03-02 09:59:17 +01:00
|
|
|
void route_prune(struct routing_state *rstate)
|
|
|
|
{
|
2019-04-08 01:51:30 +02:00
|
|
|
u64 now = gossip_time_now(rstate).ts.tv_sec;
|
2018-03-02 09:59:17 +01:00
|
|
|
/* Anything below this highwater mark ought to be pruned */
|
2019-09-26 04:00:20 +02:00
|
|
|
const s64 highwater = now - GOSSIP_PRUNE_INTERVAL(rstate->dev_fast_gossip_prune);
|
2019-05-21 09:13:28 +02:00
|
|
|
struct chan **pruned = tal_arr(tmpctx, struct chan *, 0);
|
2018-03-02 09:59:17 +01:00
|
|
|
u64 idx;
|
|
|
|
|
|
|
|
/* Now iterate through all channels and see if it is still alive */
|
2019-07-28 09:17:44 +02:00
|
|
|
for (struct chan *chan = uintmap_first(&rstate->chanmap, &idx);
|
2018-03-02 09:59:17 +01:00
|
|
|
chan;
|
2018-03-04 03:26:59 +01:00
|
|
|
chan = uintmap_after(&rstate->chanmap, &idx)) {
|
2018-03-02 09:59:17 +01:00
|
|
|
/* Local-only? Don't prune. */
|
2018-05-10 16:00:38 +02:00
|
|
|
if (!is_chan_public(chan))
|
2018-03-02 09:59:17 +01:00
|
|
|
continue;
|
2022-12-16 19:38:23 +01:00
|
|
|
/* These have been pruned already */
|
|
|
|
if (is_chan_zombie(chan))
|
|
|
|
continue;
|
2018-03-02 09:59:17 +01:00
|
|
|
|
2020-08-20 08:55:22 +02:00
|
|
|
/* BOLT #7:
|
2022-09-14 04:46:34 +02:00
|
|
|
* - if the `timestamp` of the latest `channel_update` in
|
|
|
|
* either direction is older than two weeks (1209600 seconds):
|
2020-08-20 08:55:22 +02:00
|
|
|
* - MAY prune the channel.
|
|
|
|
*/
|
|
|
|
/* This is a fancy way of saying "both ends must refresh!" */
|
|
|
|
if (!is_halfchan_defined(&chan->half[0])
|
|
|
|
|| chan->half[0].bcast.timestamp < highwater
|
|
|
|
|| !is_halfchan_defined(&chan->half[1])
|
|
|
|
|| chan->half[1].bcast.timestamp < highwater) {
|
2019-09-08 18:39:26 +02:00
|
|
|
status_debug(
|
2018-03-02 09:59:17 +01:00
|
|
|
"Pruning channel %s from network view (ages %"PRIu64" and %"PRIu64"s)",
|
2018-03-15 05:30:38 +01:00
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
2018-03-02 09:59:17 +01:00
|
|
|
&chan->scid),
|
2019-07-14 01:59:55 +02:00
|
|
|
is_halfchan_defined(&chan->half[0])
|
|
|
|
? now - chan->half[0].bcast.timestamp : 0,
|
|
|
|
is_halfchan_defined(&chan->half[1])
|
|
|
|
? now - chan->half[1].bcast.timestamp : 0);
|
2018-03-02 09:59:17 +01:00
|
|
|
|
2018-03-02 09:59:17 +01:00
|
|
|
/* This may perturb iteration so do outside loop. */
|
2019-05-21 09:13:28 +02:00
|
|
|
tal_arr_expand(&pruned, chan);
|
2018-03-02 09:59:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-11 07:15:13 +02:00
|
|
|
/* Look for channels we had an announcement for, but no update. */
|
2019-07-28 09:17:44 +02:00
|
|
|
for (struct unupdated_channel *uc
|
|
|
|
= uintmap_first(&rstate->unupdated_chanmap, &idx);
|
2019-04-11 07:15:13 +02:00
|
|
|
uc;
|
|
|
|
uc = uintmap_after(&rstate->unupdated_chanmap, &idx)) {
|
|
|
|
if (uc->added.ts.tv_sec < highwater) {
|
2019-07-28 09:17:44 +02:00
|
|
|
tal_free(uc);
|
2019-04-11 07:15:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-06 07:10:50 +01:00
|
|
|
/* Now free all the chans and maybe even nodes. */
|
2019-06-03 20:09:25 +02:00
|
|
|
for (size_t i = 0; i < tal_count(pruned); i++) {
|
2023-03-06 07:10:50 +01:00
|
|
|
remove_channel_from_store(rstate, pruned[i]);
|
|
|
|
free_chan(rstate, pruned[i]);
|
2019-06-03 20:09:25 +02:00
|
|
|
}
|
2018-03-02 09:59:17 +01:00
|
|
|
}
|
2018-04-21 12:13:33 +02:00
|
|
|
|
2020-10-20 05:59:30 +02:00
|
|
|
bool routing_add_private_channel(struct routing_state *rstate,
|
2022-01-24 21:03:52 +01:00
|
|
|
const struct node_id *id,
|
|
|
|
struct amount_sat capacity,
|
|
|
|
const u8 *chan_ann, u64 index)
|
2018-04-21 12:13:33 +02:00
|
|
|
{
|
|
|
|
struct short_channel_id scid;
|
2020-10-20 05:59:30 +02:00
|
|
|
struct node_id node_id[2];
|
|
|
|
struct pubkey ignorekey;
|
2019-06-03 20:07:25 +02:00
|
|
|
struct chan *chan;
|
2022-01-24 21:03:52 +01:00
|
|
|
u8 *features;
|
2020-10-20 05:59:30 +02:00
|
|
|
secp256k1_ecdsa_signature ignoresig;
|
|
|
|
struct bitcoin_blkid chain_hash;
|
2018-04-21 12:13:33 +02:00
|
|
|
|
2020-10-20 05:59:30 +02:00
|
|
|
if (!fromwire_channel_announcement(tmpctx, chan_ann,
|
|
|
|
&ignoresig,
|
|
|
|
&ignoresig,
|
|
|
|
&ignoresig,
|
|
|
|
&ignoresig,
|
|
|
|
&features,
|
|
|
|
&chain_hash,
|
|
|
|
&scid,
|
|
|
|
&node_id[0],
|
|
|
|
&node_id[1],
|
|
|
|
&ignorekey,
|
|
|
|
&ignorekey))
|
2018-11-05 02:21:51 +01:00
|
|
|
return false;
|
2018-04-21 12:13:33 +02:00
|
|
|
|
2022-01-24 21:03:52 +01:00
|
|
|
/* Happens on channeld restart. */
|
|
|
|
if (get_channel(rstate, &scid))
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-04-21 12:13:33 +02:00
|
|
|
|
2022-01-24 21:03:52 +01:00
|
|
|
/* Make sure this id (if any) was allowed to create this */
|
|
|
|
if (id) {
|
|
|
|
struct node_id expected[2];
|
2023-07-06 09:34:54 +02:00
|
|
|
int cmp = node_id_cmp(&rstate->daemon->id, id);
|
2022-01-24 21:03:52 +01:00
|
|
|
|
|
|
|
if (cmp < 0) {
|
2023-07-06 09:34:54 +02:00
|
|
|
expected[0] = rstate->daemon->id;
|
2022-01-24 21:03:52 +01:00
|
|
|
expected[1] = *id;
|
|
|
|
} else if (cmp > 0) {
|
|
|
|
expected[0] = *id;
|
2023-07-06 09:34:54 +02:00
|
|
|
expected[1] = rstate->daemon->id;
|
2022-01-24 21:03:52 +01:00
|
|
|
} else {
|
|
|
|
/* lightningd sets id, so this is fatal */
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO,
|
|
|
|
"private_channel peer was us?");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!node_id_eq(&node_id[0], &expected[0])
|
|
|
|
|| !node_id_eq(&node_id[1], &expected[1])) {
|
|
|
|
status_peer_broken(id, "private channel %s<->%s invalid",
|
|
|
|
type_to_string(tmpctx, struct node_id,
|
|
|
|
&node_id[0]),
|
|
|
|
type_to_string(tmpctx, struct node_id,
|
|
|
|
&node_id[1]));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2018-05-17 07:09:59 +02:00
|
|
|
|
2018-05-17 07:09:59 +02:00
|
|
|
/* Create new (unannounced) channel */
|
2022-01-24 21:03:52 +01:00
|
|
|
chan = new_chan(rstate, &scid, &node_id[0], &node_id[1], capacity);
|
|
|
|
if (!index) {
|
|
|
|
u8 *msg = towire_gossip_store_private_channel(tmpctx,
|
|
|
|
capacity,
|
|
|
|
chan_ann);
|
2022-12-16 17:25:47 +01:00
|
|
|
index = gossip_store_add(rstate->gs, msg, 0, false, false,
|
2023-03-23 23:17:13 +01:00
|
|
|
NULL);
|
2022-01-24 21:03:52 +01:00
|
|
|
}
|
2019-06-03 20:07:25 +02:00
|
|
|
chan->bcast.index = index;
|
2018-11-05 02:21:51 +01:00
|
|
|
return true;
|
2018-04-21 12:13:33 +02:00
|
|
|
}
|
2019-04-08 01:51:30 +02:00
|
|
|
|
|
|
|
struct timeabs gossip_time_now(const struct routing_state *rstate)
|
|
|
|
{
|
|
|
|
#if DEVELOPER
|
|
|
|
if (rstate->gossip_time)
|
|
|
|
return *rstate->gossip_time;
|
|
|
|
#endif
|
|
|
|
return time_now();
|
|
|
|
}
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
const char *unfinalized_entries(const tal_t *ctx, struct routing_state *rstate)
|
2019-06-14 05:30:56 +02:00
|
|
|
{
|
2019-06-20 04:57:52 +02:00
|
|
|
struct unupdated_channel *uc;
|
|
|
|
u64 index;
|
|
|
|
struct pending_node_announce *pna;
|
|
|
|
struct pending_node_map_iter it;
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
uc = uintmap_first(&rstate->unupdated_chanmap, &index);
|
|
|
|
if (uc)
|
|
|
|
return tal_fmt(ctx, "Unupdated channel_announcement at %u",
|
|
|
|
uc->index);
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
pna = pending_node_map_first(rstate->pending_node_map, &it);
|
|
|
|
if (pna)
|
|
|
|
return tal_fmt(ctx, "Waiting node_announcement at %u",
|
|
|
|
pna->index);
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
return NULL;
|
2019-06-14 05:30:56 +02:00
|
|
|
}
|
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
/* Gossip store was corrupt, forget anything we loaded. */
|
|
|
|
void remove_all_gossip(struct routing_state *rstate)
|
2019-06-14 05:30:56 +02:00
|
|
|
{
|
2019-06-20 04:57:52 +02:00
|
|
|
struct node *n;
|
|
|
|
struct node_map_iter nit;
|
|
|
|
struct chan *c;
|
2019-06-14 05:30:56 +02:00
|
|
|
struct unupdated_channel *uc;
|
|
|
|
u64 index;
|
2019-06-20 04:57:52 +02:00
|
|
|
struct pending_cannouncement *pca;
|
|
|
|
struct pending_cannouncement_map_iter pit;
|
|
|
|
struct pending_node_map_iter pnait;
|
|
|
|
|
|
|
|
/* We don't want them to try to delete from store, so do this
|
|
|
|
* manually. */
|
|
|
|
while ((n = node_map_first(rstate->nodes, &nit)) != NULL) {
|
|
|
|
tal_del_destructor2(n, destroy_node, rstate);
|
|
|
|
node_map_del(rstate->nodes, n);
|
|
|
|
tal_free(n);
|
|
|
|
}
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
/* Now free all the channels. */
|
|
|
|
while ((c = uintmap_first(&rstate->chanmap, &index)) != NULL) {
|
|
|
|
uintmap_del(&rstate->chanmap, index);
|
2019-10-17 17:15:55 +02:00
|
|
|
#if DEVELOPER
|
2020-08-05 05:55:30 +02:00
|
|
|
c->sat = amount_sat((unsigned long)c);
|
2019-10-17 17:15:55 +02:00
|
|
|
#endif
|
2019-06-20 04:57:52 +02:00
|
|
|
tal_free(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((uc = uintmap_first(&rstate->unupdated_chanmap, &index)) != NULL)
|
|
|
|
tal_free(uc);
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
while ((pca = pending_cannouncement_map_first(rstate->pending_cannouncements, &pit)) != NULL)
|
2019-06-20 04:57:52 +02:00
|
|
|
tal_free(pca);
|
2019-06-14 05:30:56 +02:00
|
|
|
|
2019-06-20 04:57:52 +02:00
|
|
|
/* Freeing unupdated chanmaps should empty this */
|
|
|
|
assert(pending_node_map_first(rstate->pending_node_map, &pnait) == NULL);
|
2019-06-14 05:30:56 +02:00
|
|
|
}
|
2022-09-14 05:50:32 +02:00
|
|
|
|
|
|
|
static void channel_spent(struct routing_state *rstate,
|
|
|
|
struct chan *chan STEALS)
|
|
|
|
{
|
|
|
|
status_debug("Deleting channel %s due to the funding outpoint being "
|
|
|
|
"spent",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id,
|
|
|
|
&chan->scid));
|
|
|
|
/* Suppress any now-obsolete updates/announcements */
|
|
|
|
add_to_txout_failures(rstate, &chan->scid);
|
|
|
|
remove_channel_from_store(rstate, chan);
|
|
|
|
/* Freeing is sufficient since everything else is allocated off
|
|
|
|
* of the channel and this takes care of unregistering
|
|
|
|
* the channel */
|
|
|
|
free_chan(rstate, chan);
|
|
|
|
}
|
|
|
|
|
|
|
|
void routing_expire_channels(struct routing_state *rstate, u32 blockheight)
|
|
|
|
{
|
|
|
|
struct chan *chan;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(rstate->dying_channels); i++) {
|
|
|
|
struct dying_channel *d = rstate->dying_channels + i;
|
|
|
|
|
|
|
|
if (blockheight < d->deadline_blockheight)
|
|
|
|
continue;
|
|
|
|
chan = get_channel(rstate, &d->scid);
|
|
|
|
if (chan)
|
|
|
|
channel_spent(rstate, chan);
|
|
|
|
/* Delete dying marker itself */
|
|
|
|
gossip_store_delete(rstate->gs,
|
|
|
|
&d->marker, WIRE_GOSSIP_STORE_CHAN_DYING);
|
|
|
|
tal_arr_remove(&rstate->dying_channels, i);
|
|
|
|
i--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void remember_chan_dying(struct routing_state *rstate,
|
|
|
|
const struct short_channel_id *scid,
|
|
|
|
u32 deadline_blockheight,
|
|
|
|
u64 index)
|
|
|
|
{
|
|
|
|
struct dying_channel d;
|
|
|
|
d.scid = *scid;
|
|
|
|
d.deadline_blockheight = deadline_blockheight;
|
|
|
|
d.marker.index = index;
|
|
|
|
tal_arr_expand(&rstate->dying_channels, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
void routing_channel_spent(struct routing_state *rstate,
|
|
|
|
u32 current_blockheight,
|
|
|
|
struct chan *chan)
|
|
|
|
{
|
|
|
|
u64 index;
|
|
|
|
u32 deadline;
|
|
|
|
u8 *msg;
|
|
|
|
|
|
|
|
/* FIXME: We should note that delay is not necessary (or even
|
|
|
|
* sensible) for local channels! */
|
|
|
|
if (local_direction(rstate, chan, NULL)) {
|
|
|
|
channel_spent(rstate, chan);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
* - once its funding output has been spent OR reorganized out:
|
|
|
|
* - SHOULD forget a channel after a 12-block delay.
|
|
|
|
*/
|
|
|
|
deadline = current_blockheight + 12;
|
|
|
|
|
|
|
|
/* Save to gossip_store in case we restart */
|
|
|
|
msg = towire_gossip_store_chan_dying(tmpctx, &chan->scid, deadline);
|
2023-03-23 23:17:13 +01:00
|
|
|
index = gossip_store_add(rstate->gs, msg, 0, false, false, NULL);
|
2022-09-14 05:50:32 +02:00
|
|
|
|
|
|
|
/* Remember locally so we can kill it in 12 blocks */
|
|
|
|
status_debug("channel %s closing soon due"
|
|
|
|
" to the funding outpoint being spent",
|
|
|
|
type_to_string(msg, struct short_channel_id, &chan->scid));
|
|
|
|
remember_chan_dying(rstate, &chan->scid, deadline, index);
|
|
|
|
}
|
|
|
|
|