mirror of
https://github.com/ElementsProject/lightning.git
synced 2024-11-19 09:54:16 +01:00
gossipd: add dev-send-timestamp-filter command for testing timestamp filtering.
Since we currently only (ab)use it to send everything, we need a way to generate boutique queries for testing. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
db6a6442cb
commit
c34b49c356
@ -1989,6 +1989,37 @@ fail:
|
||||
take(towire_gossip_scids_reply(NULL, false, false)));
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct io_plan *send_timestamp_filter(struct io_conn *conn,
|
||||
struct daemon *daemon,
|
||||
const u8 *msg)
|
||||
{
|
||||
struct pubkey id;
|
||||
u32 first, range;
|
||||
struct peer *peer;
|
||||
|
||||
if (!fromwire_gossip_send_timestamp_filter(msg, &id, &first, &range))
|
||||
master_badmsg(WIRE_GOSSIP_SEND_TIMESTAMP_FILTER, msg);
|
||||
|
||||
peer = find_peer(daemon, &id);
|
||||
if (!peer) {
|
||||
status_broken("send_timestamp_filter: unknown peer %s",
|
||||
type_to_string(tmpctx, struct pubkey, &id));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!feature_offered(peer->lfeatures, LOCAL_GOSSIP_QUERIES)) {
|
||||
status_broken("send_timestamp_filter: no gossip_query support in peer %s",
|
||||
type_to_string(tmpctx, struct pubkey, &id));
|
||||
goto out;
|
||||
}
|
||||
|
||||
msg = towire_gossip_timestamp_filter(NULL, &daemon->rstate->chain_hash,
|
||||
first, range);
|
||||
queue_peer_msg(peer, take(msg));
|
||||
out:
|
||||
return daemon_conn_read_next(conn, &daemon->master);
|
||||
}
|
||||
#endif /* DEVELOPER */
|
||||
|
||||
static int make_listen_fd(int domain, void *addr, socklen_t len, bool mayfail)
|
||||
@ -3209,9 +3240,13 @@ static struct io_plan *recv_req(struct io_conn *conn, struct daemon_conn *master
|
||||
|
||||
case WIRE_GOSSIP_QUERY_SCIDS:
|
||||
return query_scids_req(conn, daemon, daemon->master.msg_in);
|
||||
|
||||
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
||||
return send_timestamp_filter(conn, daemon, daemon->master.msg_in);
|
||||
#else
|
||||
case WIRE_GOSSIP_PING:
|
||||
case WIRE_GOSSIP_QUERY_SCIDS:
|
||||
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
||||
break;
|
||||
#endif /* !DEVELOPER */
|
||||
|
||||
|
@ -166,6 +166,12 @@ gossip_scids_reply,3131
|
||||
gossip_scids_reply,,ok,bool
|
||||
gossip_scids_reply,,complete,bool
|
||||
|
||||
# Test gossip timestamp filtering.
|
||||
gossip_send_timestamp_filter,3028
|
||||
gossip_send_timestamp_filter,,id,struct pubkey
|
||||
gossip_send_timestamp_filter,,first_timestamp,u32
|
||||
gossip_send_timestamp_filter,,timestamp_range,u32
|
||||
|
||||
# Given a short_channel_id, return the endpoints
|
||||
gossip_resolve_channel_request,3009
|
||||
gossip_resolve_channel_request,,channel_id,struct short_channel_id
|
||||
|
|
@ -137,6 +137,7 @@ static unsigned gossip_msg(struct subd *gossip, const u8 *msg, const int *fds)
|
||||
case WIRE_GOSSIP_ROUTING_FAILURE:
|
||||
case WIRE_GOSSIP_MARK_CHANNEL_UNROUTABLE:
|
||||
case WIRE_GOSSIP_QUERY_SCIDS:
|
||||
case WIRE_GOSSIP_SEND_TIMESTAMP_FILTER:
|
||||
case WIRE_GOSSIPCTL_PEER_DISCONNECT:
|
||||
case WIRE_GOSSIPCTL_PEER_IMPORTANT:
|
||||
case WIRE_GOSSIPCTL_PEER_DISCONNECTED:
|
||||
@ -629,4 +630,51 @@ static const struct json_command dev_query_scids_command = {
|
||||
"Query {peerid} for [scids]"
|
||||
};
|
||||
AUTODATA(json_command, &dev_query_scids_command);
|
||||
|
||||
static void json_dev_send_timestamp_filter(struct command *cmd,
|
||||
const char *buffer,
|
||||
const jsmntok_t *params)
|
||||
{
|
||||
u8 *msg;
|
||||
jsmntok_t *idtok, *firsttok, *rangetok;
|
||||
struct pubkey id;
|
||||
u32 first, range;
|
||||
|
||||
if (!json_get_params(cmd, buffer, params,
|
||||
"id", &idtok,
|
||||
"first", &firsttok,
|
||||
"range", &rangetok,
|
||||
NULL)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!json_tok_pubkey(buffer, idtok, &id)) {
|
||||
command_fail(cmd, JSONRPC2_INVALID_PARAMS,
|
||||
"'%.*s' is not a valid id",
|
||||
idtok->end - idtok->start,
|
||||
buffer + idtok->start);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!json_tok_number(buffer, firsttok, &first)
|
||||
|| !json_tok_number(buffer, rangetok, &range)) {
|
||||
command_fail(cmd, JSONRPC2_INVALID_PARAMS,
|
||||
"bad first or range numbers");
|
||||
return;
|
||||
}
|
||||
|
||||
log_debug(cmd->ld->log, "Setting timestamp range %u+%u", first, range);
|
||||
/* Tell gossipd, since this is a gossip query. */
|
||||
msg = towire_gossip_send_timestamp_filter(NULL, &id, first, range);
|
||||
subd_send_msg(cmd->ld->gossip, take(msg));
|
||||
|
||||
command_success(cmd, null_response(cmd));
|
||||
}
|
||||
|
||||
static const struct json_command dev_send_timestamp_filter = {
|
||||
"dev-send-timestamp-filter",
|
||||
json_dev_send_timestamp_filter,
|
||||
"Send {peerid} the timestamp filter {first} {range}"
|
||||
};
|
||||
AUTODATA(json_command, &dev_send_timestamp_filter);
|
||||
#endif /* DEVELOPER */
|
||||
|
@ -2681,6 +2681,93 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1.daemon.wait_for_log('\[IN\] 0101')
|
||||
l1.daemon.wait_for_log('\[IN\] 0101')
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||
def test_gossip_timestamp_filter(self):
|
||||
# Need full IO logging so we can see gossip (from gossipd and channeld)
|
||||
l1 = self.node_factory.get_node(options={'log-level': 'io'})
|
||||
l2 = self.node_factory.get_node()
|
||||
l3 = self.node_factory.get_node()
|
||||
|
||||
# Full IO logging for gossipds
|
||||
subprocess.run(['kill', '-USR1', l1.subd_pid('gossipd')])
|
||||
subprocess.run(['kill', '-USR1', l2.subd_pid('gossipd')])
|
||||
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||
|
||||
before_anything = int(time.time() - 1.0)
|
||||
|
||||
# Make a public channel.
|
||||
chan12 = self.fund_channel(l1, l2, 10**5)
|
||||
bitcoind.generate_block(5)
|
||||
sync_blockheight([l1, l2])
|
||||
|
||||
self.wait_for_routes(l3, [chan12])
|
||||
after_12 = int(time.time())
|
||||
# Full IO logging for l1's channeld
|
||||
subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')])
|
||||
|
||||
# Make another one, different timestamp.
|
||||
chan23 = self.fund_channel(l2, l3, 10**5)
|
||||
bitcoind.generate_block(5)
|
||||
sync_blockheight([l2, l3])
|
||||
|
||||
self.wait_for_routes(l1, [chan23])
|
||||
after_23 = int(time.time())
|
||||
|
||||
# Make sure l1 has received all the gossip.
|
||||
wait_for(lambda: ['alias' in node for node in l1.rpc.listnodes()['nodes']] == [True, True, True])
|
||||
|
||||
# l1 sets broad timestamp, will receive info about both channels again.
|
||||
l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
|
||||
first=0,
|
||||
range=0xFFFFFFFF)
|
||||
before_sendfilter = l1.daemon.logsearch_start
|
||||
|
||||
# 0x0100 = channel_announcement
|
||||
# 0x0102 = channel_update
|
||||
# 0x0101 = node_announcement
|
||||
l1.daemon.wait_for_log('\[IN\] 0100')
|
||||
# The order of node_announcements relative to others is undefined.
|
||||
l1.daemon.wait_for_logs(['\[IN\] 0102',
|
||||
'\[IN\] 0102',
|
||||
'\[IN\] 0100',
|
||||
'\[IN\] 0102',
|
||||
'\[IN\] 0102',
|
||||
'\[IN\] 0101',
|
||||
'\[IN\] 0101',
|
||||
'\[IN\] 0101'])
|
||||
|
||||
# Now timestamp which doesn't overlap (gives nothing).
|
||||
before_sendfilter = l1.daemon.logsearch_start
|
||||
l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
|
||||
first=0,
|
||||
range=before_anything)
|
||||
time.sleep(1)
|
||||
assert not l1.daemon.is_in_log('\[IN\] 0100', before_sendfilter)
|
||||
|
||||
# Now choose range which will only give first update.
|
||||
l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
|
||||
first=before_anything,
|
||||
range=after_12 - before_anything + 1)
|
||||
# 0x0100 = channel_announcement
|
||||
l1.daemon.wait_for_log('\[IN\] 0100')
|
||||
# 0x0102 = channel_update
|
||||
# (Node announcement may have any timestamp)
|
||||
l1.daemon.wait_for_log('\[IN\] 0102')
|
||||
l1.daemon.wait_for_log('\[IN\] 0102')
|
||||
|
||||
# Now choose range which will only give second update.
|
||||
l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
|
||||
first=after_12,
|
||||
range=after_23 - after_12 + 1)
|
||||
# 0x0100 = channel_announcement
|
||||
l1.daemon.wait_for_log('\[IN\] 0100')
|
||||
# 0x0102 = channel_update
|
||||
# (Node announcement may have any timestamp)
|
||||
l1.daemon.wait_for_log('\[IN\] 0102')
|
||||
l1.daemon.wait_for_log('\[IN\] 0102')
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||
def test_routing_gossip_reconnect(self):
|
||||
# Connect two peers, reconnect and then see if we resume the
|
||||
|
Loading…
Reference in New Issue
Block a user