From c633cbe2ee82f725710baf865d3940aabd60306c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 4 Jun 2018 13:53:25 +0930 Subject: [PATCH] tests: add dev-query-scids And write the test for it. Signed-off-by: Rusty Russell --- contrib/pylightning/lightning/lightning.py | 10 +++ lightningd/gossip_control.c | 85 ++++++++++++++++++++++ tests/test_lightningd.py | 59 +++++++++++++++ 3 files changed, 154 insertions(+) diff --git a/contrib/pylightning/lightning/lightning.py b/contrib/pylightning/lightning/lightning.py index f8d87b7ed..049bc6944 100644 --- a/contrib/pylightning/lightning/lightning.py +++ b/contrib/pylightning/lightning/lightning.py @@ -249,6 +249,16 @@ class LightningRpc(UnixDomainSocketRpc): """ return self.call("dev-crash") + def dev_query_scids(self, id, scids): + """ + Ask peer for a particular set of scids + """ + payload = { + "id": id, + "scids": scids + } + return self.call("dev-query-scids", payload) + def getinfo(self): """ Show information about this node diff --git a/lightningd/gossip_control.c b/lightningd/gossip_control.c index 1cecba96f..aa68a8629 100644 --- a/lightningd/gossip_control.c +++ b/lightningd/gossip_control.c @@ -545,3 +545,88 @@ static const struct json_command listchannels_command = { "Show channel {short_channel_id} (or all known channels, if no {short_channel_id})" }; AUTODATA(json_command, &listchannels_command); + +#if DEVELOPER +static void json_scids_reply(struct subd *gossip UNUSED, const u8 *reply, + const int *fds UNUSED, struct command *cmd) +{ + bool ok, complete; + struct json_result *response = new_json_result(cmd); + + if (!fromwire_gossip_scids_reply(reply, &ok, &complete)) { + command_fail(cmd, LIGHTNINGD, + "Gossip gave bad gossip_scids_reply"); + return; + } + + if (!ok) { + command_fail(cmd, LIGHTNINGD, + "Gossip refused to query peer"); + return; + } + + json_object_start(response, NULL); + json_add_bool(response, "complete", complete); + json_object_end(response); + command_success(cmd, response); +} + +static void json_dev_query_scids(struct command *cmd, + const char *buffer, const jsmntok_t *params) +{ + u8 *msg; + jsmntok_t *idtok, *scidstok; + const jsmntok_t *t, *end; + struct pubkey id; + struct short_channel_id *scids; + size_t i; + + if (!json_get_params(cmd, buffer, params, + "id", &idtok, + "scids", &scidstok, + NULL)) { + return; + } + + if (!json_tok_pubkey(buffer, idtok, &id)) { + command_fail(cmd, JSONRPC2_INVALID_PARAMS, + "'%.*s' is not a valid id", + idtok->end - idtok->start, + buffer + idtok->start); + return; + } + + if (scidstok->type != JSMN_ARRAY) { + command_fail(cmd, JSONRPC2_INVALID_PARAMS, + "'%.*s' is not an array", + scidstok->end - scidstok->start, + buffer + scidstok->start); + return; + } + + scids = tal_arr(cmd, struct short_channel_id, scidstok->size); + end = json_next(scidstok); + for (i = 0, t = scidstok + 1; t < end; t = json_next(t), i++) { + if (!json_tok_short_channel_id(buffer, t, &scids[i])) { + command_fail(cmd, JSONRPC2_INVALID_PARAMS, + "scid %zu '%.*s' is not an scid", + i, t->end - t->start, + buffer + t->start); + return; + } + } + + /* Tell gossipd, since this is a gossip query. */ + msg = towire_gossip_query_scids(cmd, &id, scids); + subd_req(cmd->ld->gossip, cmd->ld->gossip, + take(msg), -1, 0, json_scids_reply, cmd); + command_still_pending(cmd); +} + +static const struct json_command dev_query_scids_command = { + "dev-query-scids", + json_dev_query_scids, + "Query {peerid} for [scids]" +}; +AUTODATA(json_command, &dev_query_scids_command); +#endif /* DEVELOPER */ diff --git a/tests/test_lightningd.py b/tests/test_lightningd.py index 1717b93a6..4cc80459e 100644 --- a/tests/test_lightningd.py +++ b/tests/test_lightningd.py @@ -2622,6 +2622,65 @@ class LightningDTests(BaseLightningDTests): l1.daemon.wait_for_log('Got pong 1000 bytes \({}\.\.\.\)' .format(l2.info['version'])) + @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") + def test_query_short_channel_id(self): + l1 = self.node_factory.get_node(options={'log-level': 'io'}) + l2 = self.node_factory.get_node() + l3 = self.node_factory.get_node() + l1.rpc.connect(l2.info['id'], 'localhost', l2.port) + l2.rpc.connect(l3.info['id'], 'localhost', l3.port) + + # Need full IO logging so we can see gossip (from gossipd and channeld) + subprocess.run(['kill', '-USR1', l1.subd_pid('gossipd')]) + + # Empty result tests. + reply = l1.rpc.dev_query_scids(l2.info['id'], ['1:1:1', '2:2:2']) + # 0x0105 = query_short_channel_ids + l1.daemon.wait_for_log('\[OUT\] 0105.*0000000100000100010000020000020002') + assert reply['complete'] + + # Make channels public. + scid12 = self.fund_channel(l1, l2, 10**5) + scid23 = self.fund_channel(l2, l3, 10**5) + bitcoind.generate_block(5) + sync_blockheight([l1, l2, l3]) + + # It will know about everything. + l1.daemon.wait_for_log('Received node_announcement for node {}'.format(l3.info['id'])) + subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')]) + + # This query should get channel announcements, channel updates, and node announcements. + reply = l1.rpc.dev_query_scids(l2.info['id'], [scid23]) + # 0x0105 = query_short_channel_ids + l1.daemon.wait_for_log('\[OUT\] 0105') + assert reply['complete'] + + # 0x0100 = channel_announcement + l1.daemon.wait_for_log('\[IN\] 0100') + # 0x0102 = channel_update + l1.daemon.wait_for_log('\[IN\] 0102') + l1.daemon.wait_for_log('\[IN\] 0102') + # 0x0101 = node_announcement + l1.daemon.wait_for_log('\[IN\] 0101') + l1.daemon.wait_for_log('\[IN\] 0101') + + reply = l1.rpc.dev_query_scids(l2.info['id'], [scid12, scid23]) + assert reply['complete'] + # Technically, this order could be different, but this matches code. + # 0x0100 = channel_announcement + l1.daemon.wait_for_log('\[IN\] 0100') + # 0x0102 = channel_update + l1.daemon.wait_for_log('\[IN\] 0102') + l1.daemon.wait_for_log('\[IN\] 0102') + # 0x0100 = channel_announcement + l1.daemon.wait_for_log('\[IN\] 0100') + # 0x0102 = channel_update + l1.daemon.wait_for_log('\[IN\] 0102') + l1.daemon.wait_for_log('\[IN\] 0102') + # 0x0101 = node_announcement + l1.daemon.wait_for_log('\[IN\] 0101') + l1.daemon.wait_for_log('\[IN\] 0101') + @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_routing_gossip_reconnect(self): # Connect two peers, reconnect and then see if we resume the