mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-02-20 13:54:36 +01:00
gossipd: remove spam handling.
We weakened this progressively over time, and gossip v1.5 makes spam impossible by protocol, so we can wait until then. Removing this code simplifies things a great deal! Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Changelog-Removed: Protocol: we no longer ratelimit gossip messages by channel, making our code far simpler.
This commit is contained in:
parent
e7ceffd565
commit
07cd4a809b
13 changed files with 53 additions and 421 deletions
|
@ -34,11 +34,6 @@ struct gossip_rcvd_filter;
|
|||
*/
|
||||
#define GOSSIP_STORE_PUSH_BIT 0x4000U
|
||||
|
||||
/**
|
||||
* Bit of flags used to define a rate-limited record (do not rebroadcast)
|
||||
*/
|
||||
#define GOSSIP_STORE_RATELIMIT_BIT 0x2000U
|
||||
|
||||
/**
|
||||
* Bit of flags used to mark a channel announcement closed (not deleted for 12 blocks)
|
||||
*/
|
||||
|
|
|
@ -112,7 +112,6 @@ static bool public_msg_type(enum peer_wire type)
|
|||
u8 *gossip_store_next(const tal_t *ctx,
|
||||
int *gossip_store_fd,
|
||||
u32 timestamp_min, u32 timestamp_max,
|
||||
bool with_spam,
|
||||
size_t *off, size_t *end)
|
||||
{
|
||||
u8 *msg = NULL;
|
||||
|
@ -122,7 +121,6 @@ u8 *gossip_store_next(const tal_t *ctx,
|
|||
struct gossip_hdr hdr;
|
||||
u16 msglen, flags;
|
||||
u32 checksum, timestamp;
|
||||
bool ratelimited;
|
||||
int type, r;
|
||||
|
||||
r = pread(*gossip_store_fd, &hdr, sizeof(hdr), *off);
|
||||
|
@ -131,7 +129,6 @@ u8 *gossip_store_next(const tal_t *ctx,
|
|||
|
||||
msglen = be16_to_cpu(hdr.len);
|
||||
flags = be16_to_cpu(hdr.flags);
|
||||
ratelimited = (flags & GOSSIP_STORE_RATELIMIT_BIT);
|
||||
|
||||
/* Skip any deleted/dying entries. */
|
||||
if (flags & (GOSSIP_STORE_DELETED_BIT|GOSSIP_STORE_DYING_BIT)) {
|
||||
|
@ -172,8 +169,6 @@ u8 *gossip_store_next(const tal_t *ctx,
|
|||
/* Ignore gossipd internal messages. */
|
||||
} else if (!public_msg_type(type)) {
|
||||
msg = tal_free(msg);
|
||||
} else if (!with_spam && ratelimited) {
|
||||
msg = tal_free(msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
u8 *gossip_store_next(const tal_t *ctx,
|
||||
int *gossip_store_fd,
|
||||
u32 timestamp_min, u32 timestamp_max,
|
||||
bool with_spam,
|
||||
size_t *off, size_t *end);
|
||||
|
||||
/**
|
||||
|
|
|
@ -509,7 +509,6 @@ again:
|
|||
msg = gossip_store_next(ctx, &peer->daemon->gossip_store_fd,
|
||||
peer->gs.timestamp_min,
|
||||
peer->gs.timestamp_max,
|
||||
false,
|
||||
&peer->gs.off,
|
||||
&peer->daemon->gossip_store_end);
|
||||
/* Don't send back gossip they sent to us! */
|
||||
|
|
|
@ -16,7 +16,6 @@ GOSSIP_STORE_MAJOR_VERSION = (0 << 5)
|
|||
GOSSIP_STORE_MAJOR_VERSION_MASK = 0xE0
|
||||
GOSSIP_STORE_LEN_DELETED_BIT = 0x8000
|
||||
GOSSIP_STORE_LEN_PUSH_BIT = 0x4000
|
||||
GOSSIP_STORE_LEN_RATELIMIT_BIT = 0x2000
|
||||
|
||||
# These duplicate constants in lightning/gossipd/gossip_store_wiregen.h
|
||||
WIRE_GOSSIP_STORE_PRIVATE_CHANNEL = 4104
|
||||
|
|
|
@ -31,10 +31,6 @@ class GossmapStats(object):
|
|||
return hc0 + hc1
|
||||
|
||||
# Now a bunch of predefined specific filter methods
|
||||
def filter_nodes_ratelimited(self, nodes: Optional[Iterable[GossmapNode]] = None) -> List[GossmapNode]:
|
||||
""" Filters nodes being marked by cln as ratelimited, when they send out too many updates. """
|
||||
return self.filter_nodes(lambda n: n.hdr is not None and n.hdr.ratelimit, nodes)
|
||||
|
||||
def filter_nodes_unannounced(self, nodes: Optional[Iterable[GossmapNode]] = None) -> List[GossmapNode]:
|
||||
""" Filters nodes that are only known by a channel, i.e. missing a node_announcement.
|
||||
Usually happens when a peer has been offline for a while. """
|
||||
|
@ -124,10 +120,6 @@ class GossmapStats(object):
|
|||
""" Filters half-channels that are disabled. """
|
||||
return self.filter_halfchannels(lambda hc: hc.disabled, channels)
|
||||
|
||||
def filter_halfchannels_ratelimited(self, channels: Optional[Iterable[GossmapChannel]] = None) -> List[GossmapHalfchannel]:
|
||||
""" Filters half-channels that are being marked as ratelimited for sending out too many updates. """
|
||||
return self.filter_halfchannels(lambda hc: hc.hdr.ratelimit, channels)
|
||||
|
||||
def quantiles_nodes_channel_count(self, tiles=100, nodes: Optional[Iterable[GossmapNode]] = None) -> List[float]:
|
||||
if nodes is None:
|
||||
nodes = self.g.nodes.values()
|
||||
|
|
|
@ -68,12 +68,11 @@ int main(int argc, char *argv[])
|
|||
u16 flags = be16_to_cpu(hdr.flags);
|
||||
u16 msglen = be16_to_cpu(hdr.len);
|
||||
u8 *msg, *inner;
|
||||
bool deleted, push, ratelimit, dying;
|
||||
bool deleted, push, dying;
|
||||
u32 blockheight;
|
||||
|
||||
deleted = (flags & GOSSIP_STORE_DELETED_BIT);
|
||||
push = (flags & GOSSIP_STORE_PUSH_BIT);
|
||||
ratelimit = (flags & GOSSIP_STORE_RATELIMIT_BIT);
|
||||
dying = (flags & GOSSIP_STORE_DYING_BIT);
|
||||
|
||||
msg = tal_arr(NULL, u8, msglen);
|
||||
|
@ -84,10 +83,9 @@ int main(int argc, char *argv[])
|
|||
!= crc32c(be32_to_cpu(hdr.timestamp), msg, msglen))
|
||||
warnx("Checksum verification failed");
|
||||
|
||||
printf("%zu: %s%s%s%s", off,
|
||||
printf("%zu: %s%s%s", off,
|
||||
deleted ? "DELETED " : "",
|
||||
push ? "PUSH " : "",
|
||||
ratelimit ? "RATE-LIMITED " : "",
|
||||
dying ? "DYING " : "");
|
||||
if (print_timestamp)
|
||||
printf("T=%u ", be32_to_cpu(hdr.timestamp));
|
||||
|
|
|
@ -160,7 +160,7 @@ The flags currently defined are:
|
|||
```
|
||||
#define DELETED 0x8000
|
||||
#define PUSH 0x4000
|
||||
#define RATELIMIT 0x2000
|
||||
#define DYING 0x0800
|
||||
```
|
||||
|
||||
|
||||
|
@ -169,7 +169,7 @@ Deleted fields should be ignored: on restart, they will be removed as the gossip
|
|||
|
||||
The push flag indicates gossip which is generated locally: this is important for gossip timestamp filtering, where peers request gossip and we always send our own gossip messages even if the timestamp wasn't within their request.
|
||||
|
||||
The ratelimit flag indicates that this gossip message came too fast: we record it, but don't relay it to peers.
|
||||
The dying flag indicates that this channel has been spent, but we keep it around for 12 blocks in case it's actually a splice.
|
||||
|
||||
Other flags should be ignored.
|
||||
|
||||
|
@ -234,4 +234,4 @@ This is placed in the gossip_store file when a funding transaction is spent. `b
|
|||
If you are keeping the file open to watch for changes:
|
||||
|
||||
- The file is append-only, so you can simply try reading more records using inotify (or equivalent) or simply checking every few seconds.
|
||||
- If you see a `gossip_store_ended` message, reopen the file.
|
||||
- If you see a `gossip_store_ended` message, reopen the file.
|
||||
|
|
|
@ -62,7 +62,7 @@ static ssize_t gossip_pwritev(int fd, const struct iovec *iov, int iovcnt,
|
|||
#endif /* !HAVE_PWRITEV */
|
||||
|
||||
static bool append_msg(int fd, const u8 *msg, u32 timestamp,
|
||||
bool spam, bool dying, u64 *len)
|
||||
bool dying, u64 *len)
|
||||
{
|
||||
struct gossip_hdr hdr;
|
||||
u32 msglen;
|
||||
|
@ -74,8 +74,6 @@ static bool append_msg(int fd, const u8 *msg, u32 timestamp,
|
|||
msglen = tal_count(msg);
|
||||
hdr.len = cpu_to_be16(msglen);
|
||||
hdr.flags = 0;
|
||||
if (spam)
|
||||
hdr.flags |= CPU_TO_BE16(GOSSIP_STORE_RATELIMIT_BIT);
|
||||
if (dying)
|
||||
hdr.flags |= CPU_TO_BE16(GOSSIP_STORE_DYING_BIT);
|
||||
hdr.crc = cpu_to_be32(crc32c(timestamp, msg, msglen));
|
||||
|
@ -98,7 +96,7 @@ static bool append_msg(int fd, const u8 *msg, u32 timestamp,
|
|||
* v11 mandated channel_updates use the htlc_maximum_msat field
|
||||
* v12 added the zombie flag for expired channel updates
|
||||
* v13 removed private gossip entries
|
||||
* v14 removed zombie flags
|
||||
* v14 removed zombie and spam flags
|
||||
*/
|
||||
static bool can_upgrade(u8 oldversion)
|
||||
{
|
||||
|
@ -309,7 +307,7 @@ static u32 gossip_store_compact_offline(struct daemon *daemon)
|
|||
oldlen = lseek(old_fd, SEEK_END, 0);
|
||||
newlen = lseek(new_fd, SEEK_END, 0);
|
||||
append_msg(old_fd, towire_gossip_store_ended(tmpctx, newlen),
|
||||
0, false, false, &oldlen);
|
||||
0, false, &oldlen);
|
||||
close(old_fd);
|
||||
status_debug("gossip_store_compact_offline: %zu deleted, %zu copied",
|
||||
deleted, count);
|
||||
|
@ -368,19 +366,19 @@ struct gossip_store *gossip_store_new(struct daemon *daemon)
|
|||
|
||||
u64 gossip_store_add(struct gossip_store *gs, const u8 *gossip_msg,
|
||||
u32 timestamp,
|
||||
bool spam, bool dying, const u8 *addendum)
|
||||
bool dying, const u8 *addendum)
|
||||
{
|
||||
u64 off = gs->len;
|
||||
|
||||
/* Should never get here during loading! */
|
||||
assert(gs->writable);
|
||||
|
||||
if (!append_msg(gs->fd, gossip_msg, timestamp, spam, dying, &gs->len)) {
|
||||
if (!append_msg(gs->fd, gossip_msg, timestamp, dying, &gs->len)) {
|
||||
status_broken("Failed writing to gossip store: %s",
|
||||
strerror(errno));
|
||||
return 0;
|
||||
}
|
||||
if (addendum && !append_msg(gs->fd, addendum, 0, false, false, &gs->len)) {
|
||||
if (addendum && !append_msg(gs->fd, addendum, 0, false, &gs->len)) {
|
||||
status_broken("Failed writing addendum to gossip store: %s",
|
||||
strerror(errno));
|
||||
return 0;
|
||||
|
@ -514,7 +512,7 @@ void gossip_store_mark_channel_deleted(struct gossip_store *gs,
|
|||
const struct short_channel_id *scid)
|
||||
{
|
||||
gossip_store_add(gs, towire_gossip_store_delete_chan(tmpctx, scid),
|
||||
0, false, false, NULL);
|
||||
0, false, NULL);
|
||||
}
|
||||
|
||||
u32 gossip_store_get_timestamp(struct gossip_store *gs, u64 offset)
|
||||
|
@ -632,8 +630,6 @@ u32 gossip_store_load(struct gossip_store *gs)
|
|||
|
||||
gs->writable = false;
|
||||
while (pread(gs->fd, &hdr, sizeof(hdr), gs->len) == sizeof(hdr)) {
|
||||
bool spam;
|
||||
|
||||
msglen = be16_to_cpu(hdr.len);
|
||||
checksum = be32_to_cpu(hdr.crc);
|
||||
msg = tal_arr(tmpctx, u8, msglen);
|
||||
|
@ -654,7 +650,6 @@ u32 gossip_store_load(struct gossip_store *gs)
|
|||
deleted++;
|
||||
goto next;
|
||||
}
|
||||
spam = (be16_to_cpu(hdr.flags) & GOSSIP_STORE_RATELIMIT_BIT);
|
||||
|
||||
switch (fromwire_peektype(msg)) {
|
||||
case WIRE_GOSSIP_STORE_CHANNEL_AMOUNT:
|
||||
|
@ -700,8 +695,7 @@ u32 gossip_store_load(struct gossip_store *gs)
|
|||
case WIRE_CHANNEL_UPDATE:
|
||||
if (!routing_add_channel_update(gs->daemon->rstate,
|
||||
take(msg), gs->len,
|
||||
NULL, false,
|
||||
spam)) {
|
||||
NULL, false)) {
|
||||
bad = "Bad channel_update";
|
||||
goto badmsg;
|
||||
}
|
||||
|
@ -710,7 +704,7 @@ u32 gossip_store_load(struct gossip_store *gs)
|
|||
case WIRE_NODE_ANNOUNCEMENT:
|
||||
if (!routing_add_node_announcement(gs->daemon->rstate,
|
||||
take(msg), gs->len,
|
||||
NULL, NULL, spam)) {
|
||||
NULL, NULL)) {
|
||||
/* FIXME: This has been reported: routing.c
|
||||
* has logged, so ignore. */
|
||||
break;
|
||||
|
|
|
@ -32,13 +32,12 @@ u32 gossip_store_load(struct gossip_store *gs);
|
|||
* @gs: gossip store
|
||||
* @gossip_msg: the gossip message to insert.
|
||||
* @timestamp: the timestamp for filtering of this messsage.
|
||||
* @spam: true if this message is rate-limited and squelched to peers.
|
||||
* @dying: true if this message is for a dying channel.
|
||||
* @addendum: another message to append immediately after this
|
||||
* (for appending amounts to channel_announcements for internal use).
|
||||
*/
|
||||
u64 gossip_store_add(struct gossip_store *gs, const u8 *gossip_msg,
|
||||
u32 timestamp, bool spam, bool dying,
|
||||
u32 timestamp, bool dying,
|
||||
const u8 *addendum);
|
||||
|
||||
|
||||
|
|
|
@ -73,19 +73,6 @@ static u8 update_tokens(const struct routing_state *rstate,
|
|||
return num_tokens;
|
||||
}
|
||||
|
||||
static bool ratelimit(const struct routing_state *rstate,
|
||||
u8 *tokens, u32 prev_timestamp, u32 new_timestamp)
|
||||
{
|
||||
*tokens = update_tokens(rstate, *tokens, prev_timestamp, new_timestamp);
|
||||
|
||||
/* Now, if we can afford it, pass this message. */
|
||||
if (*tokens >= TOKENS_PER_MSG) {
|
||||
*tokens -= TOKENS_PER_MSG;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct node_id *
|
||||
pending_node_announce_keyof(const struct pending_node_announce *a)
|
||||
{
|
||||
|
@ -355,7 +342,6 @@ static void force_node_announce_rexmit(struct routing_state *rstate,
|
|||
announce,
|
||||
node->bcast.timestamp,
|
||||
false,
|
||||
false,
|
||||
NULL);
|
||||
if (node->rgraph.index == initial_bcast_index){
|
||||
node->rgraph.index = node->bcast.index;
|
||||
|
@ -367,7 +353,6 @@ static void force_node_announce_rexmit(struct routing_state *rstate,
|
|||
node->rgraph.index = gossip_store_add(rstate->daemon->gs,
|
||||
announce,
|
||||
node->rgraph.timestamp,
|
||||
true,
|
||||
false,
|
||||
NULL);
|
||||
}
|
||||
|
@ -583,29 +568,12 @@ static void process_pending_node_announcement(struct routing_state *rstate,
|
|||
if (!routing_add_node_announcement(rstate,
|
||||
pna->node_announcement,
|
||||
pna->index,
|
||||
pna->source_peer, NULL,
|
||||
false))
|
||||
pna->source_peer, NULL))
|
||||
status_unusual("pending node_announcement %s too old?",
|
||||
tal_hex(tmpctx, pna->node_announcement));
|
||||
/* Never send this again. */
|
||||
pna->node_announcement = tal_free(pna->node_announcement);
|
||||
}
|
||||
if (pna->spam.node_announcement) {
|
||||
SUPERVERBOSE(
|
||||
"Processing deferred node_announcement for node %s",
|
||||
type_to_string(pna, struct node_id, nodeid));
|
||||
|
||||
/* Can fail it timestamp is now too old */
|
||||
if (!routing_add_node_announcement(rstate,
|
||||
pna->spam.node_announcement,
|
||||
pna->spam.index,
|
||||
NULL, NULL,
|
||||
true))
|
||||
status_unusual("pending node_announcement %s too old?",
|
||||
tal_hex(tmpctx, pna->spam.node_announcement));
|
||||
/* Never send this again. */
|
||||
pna->spam.node_announcement = tal_free(pna->spam.node_announcement);
|
||||
}
|
||||
|
||||
/* We don't need to catch any more node_announcements, since we've
|
||||
* accepted the public channel now. But other pending announcements
|
||||
|
@ -648,7 +616,6 @@ static void add_channel_announce_to_broadcast(struct routing_state *rstate,
|
|||
channel_announce,
|
||||
chan->bcast.timestamp,
|
||||
false,
|
||||
false,
|
||||
addendum);
|
||||
}
|
||||
|
||||
|
@ -1084,8 +1051,7 @@ bool routing_add_channel_update(struct routing_state *rstate,
|
|||
u32 index,
|
||||
/* NULL if it's us */
|
||||
const struct node_id *source_peer,
|
||||
bool ignore_timestamp,
|
||||
bool force_spam_flag)
|
||||
bool ignore_timestamp)
|
||||
{
|
||||
secp256k1_ecdsa_signature signature;
|
||||
struct short_channel_id short_channel_id;
|
||||
|
@ -1101,7 +1067,6 @@ bool routing_add_channel_update(struct routing_state *rstate,
|
|||
struct unupdated_channel *uc;
|
||||
u8 direction;
|
||||
struct amount_sat sat;
|
||||
bool spam;
|
||||
bool dying;
|
||||
|
||||
/* Make sure we own msg, even if we don't save it. */
|
||||
|
@ -1168,20 +1133,12 @@ bool routing_add_channel_update(struct routing_state *rstate,
|
|||
hc = &chan->half[direction];
|
||||
|
||||
if (is_halfchan_defined(hc) && !ignore_timestamp) {
|
||||
/* The gossip_store should contain a single broadcastable entry
|
||||
* and potentially one rate-limited entry. Any more is a bug */
|
||||
if (index){
|
||||
if (!force_spam_flag){
|
||||
status_broken("gossip_store broadcastable "
|
||||
/* The gossip_store should contain a single broadcastable entry: any more is a bug */
|
||||
if (index) {
|
||||
status_broken("gossip_store broadcastable "
|
||||
"channel_update %u replaces %u!",
|
||||
index, hc->bcast.index);
|
||||
return false;
|
||||
} else if (hc->bcast.index != hc->rgraph.index){
|
||||
status_broken("gossip_store rate-limited "
|
||||
"channel_update %u replaces %u!",
|
||||
index, hc->rgraph.index);
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (timestamp <= hc->rgraph.timestamp) {
|
||||
|
@ -1202,39 +1159,16 @@ bool routing_add_channel_update(struct routing_state *rstate,
|
|||
/* Ignoring != failing */
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Make sure it's not spamming us */
|
||||
if (!local_direction(rstate, chan, NULL)
|
||||
&& !ratelimit(rstate,
|
||||
&hc->tokens, hc->bcast.timestamp, timestamp)) {
|
||||
status_peer_debug(source_peer,
|
||||
"Spammy update for %s/%u flagged"
|
||||
" (last %u, now %u)",
|
||||
type_to_string(tmpctx,
|
||||
struct short_channel_id,
|
||||
&short_channel_id),
|
||||
direction,
|
||||
hc->bcast.timestamp, timestamp);
|
||||
spam = true;
|
||||
} else {
|
||||
spam = false;
|
||||
}
|
||||
} else {
|
||||
spam = false;
|
||||
}
|
||||
if (force_spam_flag)
|
||||
spam = true;
|
||||
|
||||
/* Delete any prior entries (noop if they don't exist) */
|
||||
delete_spam_update(rstate, hc);
|
||||
if (!spam)
|
||||
gossip_store_delete(rstate->daemon->gs, &hc->bcast,
|
||||
WIRE_CHANNEL_UPDATE);
|
||||
gossip_store_delete(rstate->daemon->gs, &hc->bcast,
|
||||
WIRE_CHANNEL_UPDATE);
|
||||
|
||||
/* Update timestamp(s) */
|
||||
hc->rgraph.timestamp = timestamp;
|
||||
if (!spam)
|
||||
hc->bcast.timestamp = timestamp;
|
||||
hc->bcast.timestamp = timestamp;
|
||||
|
||||
/* If this is a peer's update to one of our local channels, tell lightningd. */
|
||||
if (node_id_eq(&chan->nodes[!direction]->id, &rstate->daemon->id)) {
|
||||
|
@ -1266,15 +1200,13 @@ bool routing_add_channel_update(struct routing_state *rstate,
|
|||
|
||||
/* If we're loading from store, this means we don't re-add to store. */
|
||||
if (index) {
|
||||
if (!spam)
|
||||
hc->bcast.index = index;
|
||||
hc->bcast.index = index;
|
||||
hc->rgraph.index = index;
|
||||
} else {
|
||||
hc->rgraph.index
|
||||
= gossip_store_add(rstate->daemon->gs, update, timestamp,
|
||||
spam, dying, NULL);
|
||||
if (!spam)
|
||||
hc->bcast.index = hc->rgraph.index;
|
||||
dying, NULL);
|
||||
hc->bcast.index = hc->rgraph.index;
|
||||
|
||||
peer_supplied_good_gossip(rstate->daemon, source_peer, 1);
|
||||
}
|
||||
|
@ -1439,8 +1371,7 @@ u8 *handle_channel_update(struct routing_state *rstate, const u8 *update TAKES,
|
|||
return towire_warningfmt(rstate, NULL, "%s", err);
|
||||
}
|
||||
|
||||
routing_add_channel_update(rstate, take(serialized), 0, source_peer, force,
|
||||
false);
|
||||
routing_add_channel_update(rstate, take(serialized), 0, source_peer, force);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1526,8 +1457,7 @@ bool routing_add_node_announcement(struct routing_state *rstate,
|
|||
const u8 *msg TAKES,
|
||||
u32 index,
|
||||
const struct node_id *source_peer TAKES,
|
||||
bool *was_unknown,
|
||||
bool force_spam_flag)
|
||||
bool *was_unknown)
|
||||
{
|
||||
struct node *node;
|
||||
secp256k1_ecdsa_signature signature;
|
||||
|
@ -1537,7 +1467,6 @@ bool routing_add_node_announcement(struct routing_state *rstate,
|
|||
u8 alias[32];
|
||||
u8 *features, *addresses;
|
||||
struct tlv_node_ann_tlvs *na_tlv;
|
||||
bool spam;
|
||||
|
||||
if (was_unknown)
|
||||
*was_unknown = false;
|
||||
|
@ -1586,40 +1515,24 @@ bool routing_add_node_announcement(struct routing_state *rstate,
|
|||
|
||||
SUPERVERBOSE("Deferring node_announcement for node %s",
|
||||
type_to_string(tmpctx, struct node_id, &node_id));
|
||||
/* a pending spam node announcement is possible when loading
|
||||
* from the store */
|
||||
if (index && force_spam_flag) {
|
||||
tal_free(pna->spam.node_announcement);
|
||||
pna->spam.node_announcement = tal_dup_talarr(pna, u8, msg);
|
||||
pna->spam.index = index;
|
||||
} else {
|
||||
tal_free(pna->node_announcement);
|
||||
tal_free(pna->source_peer);
|
||||
pna->node_announcement = tal_dup_talarr(pna, u8, msg);
|
||||
pna->source_peer = tal_dup_or_null(pna, struct node_id, source_peer);
|
||||
pna->timestamp = timestamp;
|
||||
pna->index = index;
|
||||
}
|
||||
tal_free(pna->node_announcement);
|
||||
tal_free(pna->source_peer);
|
||||
pna->node_announcement = tal_dup_talarr(pna, u8, msg);
|
||||
pna->source_peer = tal_dup_or_null(pna, struct node_id, source_peer);
|
||||
pna->timestamp = timestamp;
|
||||
pna->index = index;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (node->bcast.index) {
|
||||
u32 redundant_time;
|
||||
|
||||
/* The gossip_store should contain a single broadcastable entry
|
||||
* and potentially one rate-limited entry. Any more is a bug */
|
||||
if (index){
|
||||
if (!force_spam_flag){
|
||||
status_broken("gossip_store broadcastable "
|
||||
"node_announcement %u replaces %u!",
|
||||
index, node->bcast.index);
|
||||
return false;
|
||||
} else if (node->bcast.index != node->rgraph.index){
|
||||
status_broken("gossip_store rate-limited "
|
||||
"node_announcement %u replaces %u!",
|
||||
index, node->rgraph.index);
|
||||
return false;
|
||||
}
|
||||
/* The gossip_store should contain a single broadcastable entry: Any more is a bug */
|
||||
if (index) {
|
||||
status_broken("gossip_store broadcastable "
|
||||
"node_announcement %u replaces %u!",
|
||||
index, node->bcast.index);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (node->rgraph.timestamp >= timestamp) {
|
||||
|
@ -1640,56 +1553,25 @@ bool routing_add_node_announcement(struct routing_state *rstate,
|
|||
/* Ignoring != failing */
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Make sure it's not spamming us. */
|
||||
if (!ratelimit(rstate,
|
||||
&node->tokens, node->bcast.timestamp, timestamp)) {
|
||||
status_peer_debug(source_peer,
|
||||
"Spammy nannounce for %s flagged"
|
||||
" (last %u, now %u)",
|
||||
type_to_string(tmpctx,
|
||||
struct node_id,
|
||||
&node_id),
|
||||
node->bcast.timestamp, timestamp);
|
||||
spam = true;
|
||||
} else {
|
||||
spam = false;
|
||||
}
|
||||
} else {
|
||||
spam = false;
|
||||
}
|
||||
if (force_spam_flag)
|
||||
spam = true;
|
||||
|
||||
/* Routing graph always references the latest message. */
|
||||
node->rgraph.timestamp = timestamp;
|
||||
if (!spam) {
|
||||
node->bcast.timestamp = timestamp;
|
||||
/* remove prior spam update if one exists */
|
||||
if (node->rgraph.index != node->bcast.index) {
|
||||
gossip_store_delete(rstate->daemon->gs, &node->rgraph,
|
||||
WIRE_NODE_ANNOUNCEMENT);
|
||||
}
|
||||
/* Harmless if it was never added */
|
||||
gossip_store_delete(rstate->daemon->gs, &node->bcast,
|
||||
WIRE_NODE_ANNOUNCEMENT);
|
||||
/* Remove prior spam update. */
|
||||
} else if (node->rgraph.index != node->bcast.index) {
|
||||
gossip_store_delete(rstate->daemon->gs, &node->rgraph,
|
||||
WIRE_NODE_ANNOUNCEMENT);
|
||||
}
|
||||
node->bcast.timestamp = timestamp;
|
||||
|
||||
/* Harmless if it was never added */
|
||||
gossip_store_delete(rstate->daemon->gs, &node->bcast,
|
||||
WIRE_NODE_ANNOUNCEMENT);
|
||||
|
||||
/* Don't add to the store if it was loaded from the store. */
|
||||
if (index) {
|
||||
node->rgraph.index = index;
|
||||
if (!spam)
|
||||
node->bcast.index = index;
|
||||
node->bcast.index = index;
|
||||
} else {
|
||||
node->rgraph.index
|
||||
= gossip_store_add(rstate->daemon->gs, msg, timestamp,
|
||||
spam, false, NULL);
|
||||
if (!spam)
|
||||
node->bcast.index = node->rgraph.index;
|
||||
false, NULL);
|
||||
node->bcast.index = node->rgraph.index;
|
||||
|
||||
peer_supplied_good_gossip(rstate->daemon, source_peer, 1);
|
||||
}
|
||||
|
@ -1765,7 +1647,7 @@ u8 *handle_node_announcement(struct routing_state *rstate, const u8 *node_ann,
|
|||
}
|
||||
|
||||
/* May still fail, if we don't know the node. */
|
||||
routing_add_node_announcement(rstate, serialized, 0, source_peer, was_unknown, false);
|
||||
routing_add_node_announcement(rstate, serialized, 0, source_peer, was_unknown);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1956,7 +1838,7 @@ void routing_channel_spent(struct routing_state *rstate,
|
|||
|
||||
/* Save to gossip_store in case we restart */
|
||||
msg = towire_gossip_store_chan_dying(tmpctx, &chan->scid, deadline);
|
||||
index = gossip_store_add(rstate->daemon->gs, msg, 0, false, false, NULL);
|
||||
index = gossip_store_add(rstate->daemon->gs, msg, 0, false, NULL);
|
||||
|
||||
/* Mark it dying, so we don't gossip it */
|
||||
gossip_store_mark_dying(rstate->daemon->gs, &chan->bcast,
|
||||
|
|
|
@ -315,8 +315,7 @@ bool routing_add_channel_update(struct routing_state *rstate,
|
|||
const u8 *update TAKES,
|
||||
u32 index,
|
||||
const struct node_id *source_peer TAKES,
|
||||
bool ignore_timestamp,
|
||||
bool force_spam_flag);
|
||||
bool ignore_timestamp);
|
||||
/**
|
||||
* Add a node_announcement to the network view without checking it
|
||||
*
|
||||
|
@ -328,8 +327,7 @@ bool routing_add_node_announcement(struct routing_state *rstate,
|
|||
const u8 *msg TAKES,
|
||||
u32 index,
|
||||
const struct node_id *source_peer TAKES,
|
||||
bool *was_unknown,
|
||||
bool force_spam_flag);
|
||||
bool *was_unknown);
|
||||
|
||||
/**
|
||||
* Add to rstate->dying_channels
|
||||
|
|
|
@ -1710,132 +1710,6 @@ def test_gossip_no_backtalk(node_factory):
|
|||
TEST_NETWORK != 'regtest',
|
||||
"Channel announcement contains genesis hash, receiving node discards on mismatch"
|
||||
)
|
||||
def test_gossip_ratelimit(node_factory, bitcoind):
|
||||
"""Check that we ratelimit incoming gossip.
|
||||
|
||||
We create a partitioned network, in which the first partition consisting
|
||||
of l1 and l2 is used to create an on-chain footprint and we then feed
|
||||
canned gossip to the other partition consisting of l3. l3 should ratelimit
|
||||
the incoming gossip.
|
||||
|
||||
We get BROKEN logs because gossipd talks about non-existent channels to
|
||||
lightningd ("**BROKEN** lightningd: Local update for bad scid 103x1x1").
|
||||
"""
|
||||
l3 = node_factory.get_node(node_id=3,
|
||||
allow_broken_log=True,
|
||||
options={'dev-gossip-time': 1568096251})
|
||||
|
||||
# Bump to block 102, so the following tx ends up in 103x1:
|
||||
bitcoind.generate_block(1)
|
||||
|
||||
# We don't actually need to start l1 and l2, they're just there to create
|
||||
# an unspent outpoint matching the expected script. This is also more
|
||||
# stable against output ordering issues.
|
||||
tx = bitcoind.rpc.createrawtransaction(
|
||||
[],
|
||||
[
|
||||
# Fundrawtransaction will fill in the first output with the change
|
||||
{"bcrt1qtwxd8wg5eanumk86vfeujvp48hfkgannf77evggzct048wggsrxsum2pmm": 0.01000000}
|
||||
]
|
||||
)
|
||||
tx = bitcoind.rpc.fundrawtransaction(tx, {'changePosition': 0})['hex']
|
||||
tx = bitcoind.rpc.signrawtransactionwithwallet(tx)['hex']
|
||||
txid = bitcoind.rpc.sendrawtransaction(tx)
|
||||
wait_for(lambda: txid in bitcoind.rpc.getrawmempool())
|
||||
|
||||
# Make the tx gossipable:
|
||||
bitcoind.generate_block(6)
|
||||
sync_blockheight(bitcoind, [l3, ])
|
||||
|
||||
def channel_fees(node):
|
||||
channels = node.rpc.listchannels()['channels']
|
||||
return [c['fee_per_millionth'] for c in channels]
|
||||
|
||||
# Here are some ones I generated earlier (by removing gossip
|
||||
# ratelimiting)
|
||||
subprocess.check_call(
|
||||
[
|
||||
'devtools/gossipwith',
|
||||
'--max-messages=0',
|
||||
'{}@localhost:{}'.format(l3.info['id'], l3.port),
|
||||
# announcement
|
||||
'0100987b271fc95a37dbed78e6159e0ab792cda64603780454ce80832b4e31f63a6760abc8fdc53be35bb7cfccd125ee3d15b4fbdfb42165098970c19c7822bb413f46390e0c043c777226927eacd2186a03f064e4bdc30f891cb6e4990af49967d34b338755e99d728987e3d49227815e17f3ab40092434a59e33548e870071176db7d44d8c8f4c4cac27ae6554eb9350e97d47617e3a1355296c78e8234446fa2f138ad1b03439f18520227fb9e9eb92689b3a0ed36e6764f5a41777e9a2a4ce1026d19a4e4d8f7715c13ac2d6bf3238608a1ccf9afd91f774d84d170d9edddebf7460c54d49bd6cd81410bc3eeeba2b7278b1b5f7e748d77d793f31086847d582000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010001022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d590266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c0351802e3bd38009866c9da8ec4aa99cc4ea9c6c0dd46df15c61ef0ce1f271291714e5702324266de8403b3ab157a09f1f784d587af61831c998c151bcc21bb74c2b2314b',
|
||||
# first update is free
|
||||
'010225bfd9c5e2c5660188a14deb4002cd645ee67f00ad3b82146e46711ec460cb0c6819fdd1c680cb6d24e3906679ef071f13243a04a123e4b83310ebf0518ffd4206226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d773ffb010100060000000000000000000000010000000a000000003b023380'
|
||||
],
|
||||
timeout=TIMEOUT
|
||||
)
|
||||
|
||||
# Wait for it to process channel.
|
||||
wait_for(lambda: channel_fees(l3) == [10])
|
||||
|
||||
subprocess.check_call(
|
||||
[
|
||||
'devtools/gossipwith',
|
||||
'--max-messages=0',
|
||||
'{}@localhost:{}'.format(l3.info['id'], l3.port),
|
||||
# next 4 are let through...
|
||||
'01023a892ad9c9953a54ad3b8e2e03a93d1c973241b62f9a5cd1f17d5cdf08de0e8b4fcd24aa8bd45a48b788fe9dab3d416f28dfa390bc900ec0176ec5bd1afd435706226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400001010006000000000000000000000014000003e9000000003b023380',
|
||||
'010245966763623ebc16796165263d4b21711ef04ebf3929491e695ff89ed2b8ccc0668ceb9e35e0ff5b8901d95732a119c1ed84ac99861daa2de462118f7b70049f06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400101010006000000000000000000000014000003ea000000003b023380',
|
||||
'0102c479b7684b9db496b844f6925f4ffd8a27c5840a020d1b537623c1545dcd8e195776381bbf51213e541a853a4a49a0faf84316e7ccca5e7074901a96bbabe04e06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400201010006000000000000000000000014000003eb000000003b023380',
|
||||
# timestamp=1568096259, fee_proportional_millionths=1004
|
||||
'01024b866012d995d3d7aec7b7218a283de2d03492dbfa21e71dd546ec2e36c3d4200453420aa02f476f99c73fe1e223ea192f5fa544b70a8319f2a216f1513d503d06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400301010006000000000000000000000014000003ec000000003b023380',
|
||||
# update 5 marks you as a nasty spammer, but the routing graph is
|
||||
# updated with this even though the gossip is not broadcast.
|
||||
'01025b5b5a0daed874ab02bd3356d38190ff46bbaf5f10db5067da70f3ca203480ca78059e6621c6143f3da4e454d0adda6d01a9980ed48e71ccd0c613af73570a7106226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400401010006000000000000000000000014000003ed000000003b023380'
|
||||
],
|
||||
timeout=TIMEOUT
|
||||
)
|
||||
# Rate limited channel_update okay to use in routing graph.
|
||||
wait_for(lambda: channel_fees(l3) == [1005])
|
||||
# but should be flagged so we don't propagate to the network.
|
||||
assert(l3.daemon.is_in_log("Spammy update for 103x1x1/1 flagged"))
|
||||
|
||||
# ask for a gossip sync
|
||||
raw = subprocess.run(['devtools/gossipwith',
|
||||
'--initial-sync',
|
||||
'--timeout-after={}'.format(1),
|
||||
'--hex',
|
||||
'{}@localhost:{}'.format(l3.info['id'], l3.port)],
|
||||
check=True,
|
||||
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
|
||||
# The last message is the most recent channel update.
|
||||
message = raw.decode('utf-8').split()[-1]
|
||||
decoded = subprocess.run(['devtools/decodemsg', message],
|
||||
check=True,
|
||||
timeout=TIMEOUT,
|
||||
stdout=subprocess.PIPE).stdout.decode('utf8')
|
||||
assert("fee_proportional_millionths=1004" in decoded)
|
||||
# Used in routing graph, but not passed to gossip peers.
|
||||
assert("fee_proportional_millionths=1005" not in decoded)
|
||||
|
||||
# 24 seconds later, it will accept another.
|
||||
l3.rpc.call('dev-gossip-set-time', [1568096251 + 24])
|
||||
|
||||
subprocess.run(['devtools/gossipwith',
|
||||
'--max-messages=0',
|
||||
'{}@localhost:{}'.format(l3.info['id'], l3.port),
|
||||
# update 6: timestamp=1568096284 fee_proportional_millionths=1006
|
||||
'010282d24bcd984956bd9b891848404ee59d89643923b21641d2c2c0770a51b8f5da00cef82458add970f0b654aa4c8d54f68a9a1cc6470a35810303b09437f1f73d06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77401c01010006000000000000000000000014000003ee000000003b023380'],
|
||||
check=True, timeout=TIMEOUT)
|
||||
|
||||
wait_for(lambda: channel_fees(l3) == [1006])
|
||||
raw = subprocess.run(['devtools/gossipwith',
|
||||
'--initial-sync',
|
||||
'--timeout-after={}'.format(1),
|
||||
'--hex',
|
||||
'{}@localhost:{}'.format(l3.info['id'], l3.port)],
|
||||
check=True,
|
||||
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
|
||||
message = raw.decode('utf-8').split()[-1]
|
||||
decoded = subprocess.run(['devtools/decodemsg', message],
|
||||
check=True,
|
||||
timeout=TIMEOUT,
|
||||
stdout=subprocess.PIPE).stdout.decode('utf8')
|
||||
|
||||
assert("fee_proportional_millionths=1006" in decoded)
|
||||
|
||||
|
||||
def check_socket(ip_addr, port):
|
||||
result = True
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
@ -2131,98 +2005,6 @@ def test_dump_own_gossip(node_factory):
|
|||
assert expect == []
|
||||
|
||||
|
||||
@unittest.skipIf(
|
||||
TEST_NETWORK != 'regtest',
|
||||
"Channel announcement contains genesis hash, receiving node discards on mismatch"
|
||||
)
|
||||
def test_read_spam_nannounce(node_factory, bitcoind):
|
||||
"""issue #6531 lead to a node announcement not being deleted from
|
||||
the gossip_store."""
|
||||
"""broadcastable and spam node announcements should be loaded properly when
|
||||
reading the gossip_store - even when they are both pending a channel
|
||||
update."""
|
||||
opts = {'dev-gossip-time': 1691773540}
|
||||
l1 = node_factory.get_node(start=False, opts=opts)
|
||||
canned_store = (
|
||||
"0c" # Gossip store version byte
|
||||
"0000" # length flags
|
||||
"01b0" # length
|
||||
"d163af25" # checksum
|
||||
"64d66a3a" # timestamp
|
||||
# Channel announcement
|
||||
"010000335733f5942df5d950eb8766dee3a9d6626922844ed6ae7a110dd7e7edc32e3f6f3d9ac5cdea23ce25bb8dbf761fd3d5fc56c05b6856316d12e9d32ca0f08c69ca0306fe716e7b5151317d6440b7373d9fbc646ead48f2163f2b6d511afe6a79c75551c2620fc80974f2f864329d9778a08cdfbc9f2c9c1344c432702c66807cfb4db69b80fae8c33c70143d948b36614d620891bee2df99c86bc62207c17e3b9186214c0ccff2ded5598accc90eb1d5b2f7a83cd7f68d712ea047d8019f343063b0a236356a387146f58fa832ddc13c4714522cbb503f5f3ca8fcec6602be2438ad3f98fa0ceed58fe3a066d385fcacd98c704b2a8e98a3e20bf76b35b736000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010000022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d029053521d6ea7a52cdd55f733d0fb2d077c0373b0053b5b810d927244061b757302d6063d022691b2490ab454dee73a57c6ff5d308352b461ece69f3c284f2c2412"
|
||||
# Channel Amount
|
||||
"0000000a911183f600000000"
|
||||
"100500000000000f4240"
|
||||
# broadcastable node announcement (rgb=000001)
|
||||
"0000009533d9cf8c64d66a44"
|
||||
"010108f4e25debdd74d0f52b7f1da5dbd82f429d057f48e3d2ed49fcc65cfe3e185c086c1a83d8f3bb15dc0cc852d80390c24cd1fe6d288b91eb55cf98c4a9baf7c0000788a0000a0269a264d66a44022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d5900000153494c454e544152544953542d3536392d67303030643963302d6d6f646465640000"
|
||||
# Rate-limited node announcment (rgb=000002)
|
||||
"2000009519aa897a64d66a49"
|
||||
"0101685f67556cd0a87e04c6d1e9daa4e31dcf14f881d6f1231b1bee8adf6666977b6b9baa497e91d2b6daae726cde69e3faf924d9c95d0ca6b374d6693d4fc0d648000788a0000a0269a264d66a49022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d5900000253494c454e544152544953542d3536392d67303030643963302d6d6f646465640000"
|
||||
# Channel Update
|
||||
"0000008a0234021c64d66a61"
|
||||
"010242ce9d9e79f939399ea1291c04fffcafdfa911246464a4b48c16b7b816dd57b4168562a6c519eb31c37718b68bdfc6345d7c2db663b8b04a3558ce7736c5b61706226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f000067000001000064d66a6101000006000000000000000000000015000003e8000000003b023380"
|
||||
# Channel Update
|
||||
"0000008acd55fcb264d66a61"
|
||||
"01023f5dd1f69f675a71d2a7a34956b26f12c4fe9ee287a8449a3eb7b756c583e3bb1334a8eb7c3e148d0f43e08b95c50017ba62da9a7843fe4850a3cb3c74dc5e2c06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f000067000001000064d66a6101010006000000000000000000000015000003e8000000003b023380"
|
||||
)
|
||||
with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), 'wb') as f:
|
||||
f.write(bytearray.fromhex(canned_store))
|
||||
|
||||
bitcoind.generate_block(1)
|
||||
tx = bitcoind.rpc.createrawtransaction(
|
||||
[],
|
||||
[
|
||||
# Fundrawtransaction will fill in the first output with the change
|
||||
{"bcrt1qpd7nwe3jrt07st89uy82nn2xrmqtxyzqpty5ygt6w546lf6n0wcskswjvh": 0.01000000}
|
||||
]
|
||||
)
|
||||
tx = bitcoind.rpc.fundrawtransaction(tx, {'changePosition': 0})['hex']
|
||||
tx = bitcoind.rpc.signrawtransactionwithwallet(tx)['hex']
|
||||
txid = bitcoind.rpc.sendrawtransaction(tx)
|
||||
wait_for(lambda: txid in bitcoind.rpc.getrawmempool())
|
||||
bitcoind.generate_block(6)
|
||||
l1.start()
|
||||
# retrieves node info originating from the spam announcement
|
||||
node_info = l1.rpc.listnodes('022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59')
|
||||
assert only_one(node_info['nodes'])['color'] == '000002'
|
||||
|
||||
out = subprocess.run(['devtools/gossipwith',
|
||||
'--initial-sync',
|
||||
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
|
||||
'--hex',
|
||||
'{}@localhost:{}'.format(l1.info['id'], l1.port)],
|
||||
check=True,
|
||||
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout.decode()
|
||||
|
||||
received_broadcastable = False
|
||||
received_spam = False
|
||||
for message in out.splitlines():
|
||||
gos = subprocess.run(['devtools/decodemsg', message], check=True,
|
||||
timeout=TIMEOUT,
|
||||
stdout=subprocess.PIPE).stdout.decode()
|
||||
|
||||
for line in gos.splitlines():
|
||||
if 'rgb_color=[000001]' in line:
|
||||
received_broadcastable = True
|
||||
if 'rgb_color=[000002]' in line:
|
||||
received_spam = True
|
||||
|
||||
assert received_broadcastable
|
||||
assert not received_spam
|
||||
# Send a new node announcement. It should replace both.
|
||||
subprocess.run(['devtools/gossipwith',
|
||||
'--max-messages=0',
|
||||
'{}@localhost:{}'.format(l1.info['id'], l1.port),
|
||||
# color=000003
|
||||
'0101273fd2c58deb4c3bd98610079657219a5c8291d6a85c3607eae895f25e08babd6e45edd1e62f719b20526ed1c8fc3c7d9e7e3fafa4f24e4cb64872d041a13503000788a0000a0269a264d66a4e022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d5900000353494c454e544152544953542d3536392d67303030643963302d6d6f646465640000'],
|
||||
check=True, timeout=TIMEOUT)
|
||||
l1.daemon.wait_for_log('Received node_announcement')
|
||||
l1.restart()
|
||||
assert not l1.daemon.is_in_log('BROKEN')
|
||||
|
||||
|
||||
def test_listchannels_deprecated_local(node_factory, bitcoind):
|
||||
"""Test listchannels shows local/private channels only in deprecated mode"""
|
||||
l1, l2, l3 = node_factory.get_nodes(3,
|
||||
|
|
Loading…
Add table
Reference in a new issue