JSONRPC: rename getpeers to listpeers.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2018-01-17 10:23:20 +10:30
parent 34a994c7db
commit 5698a133c2
3 changed files with 28 additions and 28 deletions

View File

@ -79,9 +79,9 @@ class LightningRpc(UnixDomainSocketRpc):
"""Get info about a specific peer, optionally with its log.
"""
if log_level:
peers = self.getpeers(log_level)['peers']
peers = self.listpeers(log_level)['peers']
else:
peers = self.getpeers()['peers']
peers = self.listpeers()['peers']
for p in peers:
if p['peerid'] == peer_id:
return p

View File

@ -869,7 +869,7 @@ static void gossipd_getpeers_complete(struct subd *gossip, const u8 *msg,
command_success(gpa->cmd, response);
}
static void json_getpeers(struct command *cmd,
static void json_listpeers(struct command *cmd,
const char *buffer, const jsmntok_t *params)
{
jsmntok_t *leveltok;
@ -905,13 +905,13 @@ static void json_getpeers(struct command *cmd,
command_still_pending(cmd);
}
static const struct json_command getpeers_command = {
"getpeers",
json_getpeers,
static const struct json_command listpeers_command = {
"listpeers",
json_listpeers,
"List the current peers, if {level} is set, include {log}s",
"Returns a 'peers' array"
};
AUTODATA(json_command, &getpeers_command);
AUTODATA(json_command, &listpeers_command);
struct peer *peer_from_json(struct lightningd *ld,
const char *buffer,

View File

@ -385,8 +385,8 @@ class LightningDTests(BaseLightningDTests):
assert ret['id'] == l1.info['id']
# Should still only have one peer!
assert len(l1.rpc.getpeers()) == 1
assert len(l2.rpc.getpeers()) == 1
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
def test_balance(self):
l1,l2 = self.connect()
@ -1979,8 +1979,8 @@ class LightningDTests(BaseLightningDTests):
.format(l2.info['id']))
# Should still only have one peer!
assert len(l1.rpc.getpeers()) == 1
assert len(l2.rpc.getpeers()) == 1
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_disconnect_funder(self):
@ -2008,8 +2008,8 @@ class LightningDTests(BaseLightningDTests):
l1.rpc.fundchannel(l2.info['id'], 20000)
# Should still only have one peer!
assert len(l1.rpc.getpeers()) == 1
assert len(l2.rpc.getpeers()) == 1
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_disconnect_fundee(self):
@ -2035,8 +2035,8 @@ class LightningDTests(BaseLightningDTests):
l1.rpc.fundchannel(l2.info['id'], 20000)
# Should still only have one peer!
assert len(l1.rpc.getpeers()) == 1
assert len(l2.rpc.getpeers()) == 1
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_disconnect_half_signed(self):
@ -2460,8 +2460,8 @@ class LightningDTests(BaseLightningDTests):
# Fail because l1 dislikes l2's huge locktime.
self.assertRaisesRegex(ValueError, r'to_self_delay \d+ larger than \d+',
l1.rpc.fundchannel, l2.info['id'], int(funds/10))
assert l1.rpc.getpeers()['peers'][0]['connected']
assert l2.rpc.getpeers()['peers'][0]['connected']
assert l1.rpc.listpeers()['peers'][0]['connected']
assert l2.rpc.listpeers()['peers'][0]['connected']
# Restart l2 without ridiculous locktime.
l2.daemon.cmd_line.remove('--locktime-blocks={}'.format(max_locktime + 1))
@ -2473,8 +2473,8 @@ class LightningDTests(BaseLightningDTests):
l1.rpc.fundchannel, l2.info['id'], funds)
# Should still be connected.
assert l1.rpc.getpeers()['peers'][0]['connected']
assert l2.rpc.getpeers()['peers'][0]['connected']
assert l1.rpc.listpeers()['peers'][0]['connected']
assert l2.rpc.listpeers()['peers'][0]['connected']
# This works.
l1.rpc.fundchannel(l2.info['id'], int(funds/10))
@ -2541,7 +2541,7 @@ class LightningDTests(BaseLightningDTests):
self.fund_channel(l1, l2, 100000)
peers = l1.rpc.getpeers()['peers']
peers = l1.rpc.listpeers()['peers']
assert(len(peers) == 1 and peers[0]['state'] == 'CHANNELD_NORMAL')
# Both nodes should now have exactly one channel in the database
@ -2562,28 +2562,28 @@ class LightningDTests(BaseLightningDTests):
print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld']))
# Wait for l1 to notice
wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected'])
wait_for(lambda: not l1.rpc.listpeers()['peers'][0]['connected'])
# Now restart l1 and it should reload peers/channels from the DB
l2.daemon.start()
wait_for(lambda: len(l2.rpc.getpeers()['peers']) == 1)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)
# Wait for the restored HTLC to finish
wait_for(lambda: l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
wait_for(lambda: len([p for p in l1.rpc.getpeers()['peers'] if p['connected']]), interval=1)
wait_for(lambda: len([p for p in l2.rpc.getpeers()['peers'] if p['connected']]), interval=1)
wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']]), interval=1)
wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']]), interval=1)
# Now make sure this is really functional by sending a payment
self.pay(l1, l2, 10000)
time.sleep(1)
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000
assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 20000
assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000
assert l2.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 20000
# Finally restart l1, and make sure it remembers
l1.stop()
l1.daemon.start()
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000
assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000
# Now make sure l1 is watching for unilateral closes
l2.rpc.dev_fail(l1.info['id']);