mirror of
https://github.com/ElementsProject/lightning.git
synced 2024-11-19 09:54:16 +01:00
JSONRPC: rename getpeers to listpeers.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
34a994c7db
commit
5698a133c2
@ -79,9 +79,9 @@ class LightningRpc(UnixDomainSocketRpc):
|
|||||||
"""Get info about a specific peer, optionally with its log.
|
"""Get info about a specific peer, optionally with its log.
|
||||||
"""
|
"""
|
||||||
if log_level:
|
if log_level:
|
||||||
peers = self.getpeers(log_level)['peers']
|
peers = self.listpeers(log_level)['peers']
|
||||||
else:
|
else:
|
||||||
peers = self.getpeers()['peers']
|
peers = self.listpeers()['peers']
|
||||||
for p in peers:
|
for p in peers:
|
||||||
if p['peerid'] == peer_id:
|
if p['peerid'] == peer_id:
|
||||||
return p
|
return p
|
||||||
|
@ -869,7 +869,7 @@ static void gossipd_getpeers_complete(struct subd *gossip, const u8 *msg,
|
|||||||
command_success(gpa->cmd, response);
|
command_success(gpa->cmd, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void json_getpeers(struct command *cmd,
|
static void json_listpeers(struct command *cmd,
|
||||||
const char *buffer, const jsmntok_t *params)
|
const char *buffer, const jsmntok_t *params)
|
||||||
{
|
{
|
||||||
jsmntok_t *leveltok;
|
jsmntok_t *leveltok;
|
||||||
@ -905,13 +905,13 @@ static void json_getpeers(struct command *cmd,
|
|||||||
command_still_pending(cmd);
|
command_still_pending(cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct json_command getpeers_command = {
|
static const struct json_command listpeers_command = {
|
||||||
"getpeers",
|
"listpeers",
|
||||||
json_getpeers,
|
json_listpeers,
|
||||||
"List the current peers, if {level} is set, include {log}s",
|
"List the current peers, if {level} is set, include {log}s",
|
||||||
"Returns a 'peers' array"
|
"Returns a 'peers' array"
|
||||||
};
|
};
|
||||||
AUTODATA(json_command, &getpeers_command);
|
AUTODATA(json_command, &listpeers_command);
|
||||||
|
|
||||||
struct peer *peer_from_json(struct lightningd *ld,
|
struct peer *peer_from_json(struct lightningd *ld,
|
||||||
const char *buffer,
|
const char *buffer,
|
||||||
|
@ -385,8 +385,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
assert ret['id'] == l1.info['id']
|
assert ret['id'] == l1.info['id']
|
||||||
|
|
||||||
# Should still only have one peer!
|
# Should still only have one peer!
|
||||||
assert len(l1.rpc.getpeers()) == 1
|
assert len(l1.rpc.listpeers()) == 1
|
||||||
assert len(l2.rpc.getpeers()) == 1
|
assert len(l2.rpc.listpeers()) == 1
|
||||||
|
|
||||||
def test_balance(self):
|
def test_balance(self):
|
||||||
l1,l2 = self.connect()
|
l1,l2 = self.connect()
|
||||||
@ -1979,8 +1979,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
.format(l2.info['id']))
|
.format(l2.info['id']))
|
||||||
|
|
||||||
# Should still only have one peer!
|
# Should still only have one peer!
|
||||||
assert len(l1.rpc.getpeers()) == 1
|
assert len(l1.rpc.listpeers()) == 1
|
||||||
assert len(l2.rpc.getpeers()) == 1
|
assert len(l2.rpc.listpeers()) == 1
|
||||||
|
|
||||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||||
def test_disconnect_funder(self):
|
def test_disconnect_funder(self):
|
||||||
@ -2008,8 +2008,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
l1.rpc.fundchannel(l2.info['id'], 20000)
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
||||||
|
|
||||||
# Should still only have one peer!
|
# Should still only have one peer!
|
||||||
assert len(l1.rpc.getpeers()) == 1
|
assert len(l1.rpc.listpeers()) == 1
|
||||||
assert len(l2.rpc.getpeers()) == 1
|
assert len(l2.rpc.listpeers()) == 1
|
||||||
|
|
||||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||||
def test_disconnect_fundee(self):
|
def test_disconnect_fundee(self):
|
||||||
@ -2035,8 +2035,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
l1.rpc.fundchannel(l2.info['id'], 20000)
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
||||||
|
|
||||||
# Should still only have one peer!
|
# Should still only have one peer!
|
||||||
assert len(l1.rpc.getpeers()) == 1
|
assert len(l1.rpc.listpeers()) == 1
|
||||||
assert len(l2.rpc.getpeers()) == 1
|
assert len(l2.rpc.listpeers()) == 1
|
||||||
|
|
||||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||||
def test_disconnect_half_signed(self):
|
def test_disconnect_half_signed(self):
|
||||||
@ -2460,8 +2460,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
# Fail because l1 dislikes l2's huge locktime.
|
# Fail because l1 dislikes l2's huge locktime.
|
||||||
self.assertRaisesRegex(ValueError, r'to_self_delay \d+ larger than \d+',
|
self.assertRaisesRegex(ValueError, r'to_self_delay \d+ larger than \d+',
|
||||||
l1.rpc.fundchannel, l2.info['id'], int(funds/10))
|
l1.rpc.fundchannel, l2.info['id'], int(funds/10))
|
||||||
assert l1.rpc.getpeers()['peers'][0]['connected']
|
assert l1.rpc.listpeers()['peers'][0]['connected']
|
||||||
assert l2.rpc.getpeers()['peers'][0]['connected']
|
assert l2.rpc.listpeers()['peers'][0]['connected']
|
||||||
|
|
||||||
# Restart l2 without ridiculous locktime.
|
# Restart l2 without ridiculous locktime.
|
||||||
l2.daemon.cmd_line.remove('--locktime-blocks={}'.format(max_locktime + 1))
|
l2.daemon.cmd_line.remove('--locktime-blocks={}'.format(max_locktime + 1))
|
||||||
@ -2473,8 +2473,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
l1.rpc.fundchannel, l2.info['id'], funds)
|
l1.rpc.fundchannel, l2.info['id'], funds)
|
||||||
|
|
||||||
# Should still be connected.
|
# Should still be connected.
|
||||||
assert l1.rpc.getpeers()['peers'][0]['connected']
|
assert l1.rpc.listpeers()['peers'][0]['connected']
|
||||||
assert l2.rpc.getpeers()['peers'][0]['connected']
|
assert l2.rpc.listpeers()['peers'][0]['connected']
|
||||||
|
|
||||||
# This works.
|
# This works.
|
||||||
l1.rpc.fundchannel(l2.info['id'], int(funds/10))
|
l1.rpc.fundchannel(l2.info['id'], int(funds/10))
|
||||||
@ -2541,7 +2541,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
|
|
||||||
self.fund_channel(l1, l2, 100000)
|
self.fund_channel(l1, l2, 100000)
|
||||||
|
|
||||||
peers = l1.rpc.getpeers()['peers']
|
peers = l1.rpc.listpeers()['peers']
|
||||||
assert(len(peers) == 1 and peers[0]['state'] == 'CHANNELD_NORMAL')
|
assert(len(peers) == 1 and peers[0]['state'] == 'CHANNELD_NORMAL')
|
||||||
|
|
||||||
# Both nodes should now have exactly one channel in the database
|
# Both nodes should now have exactly one channel in the database
|
||||||
@ -2562,28 +2562,28 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld']))
|
print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld']))
|
||||||
|
|
||||||
# Wait for l1 to notice
|
# Wait for l1 to notice
|
||||||
wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected'])
|
wait_for(lambda: not l1.rpc.listpeers()['peers'][0]['connected'])
|
||||||
|
|
||||||
# Now restart l1 and it should reload peers/channels from the DB
|
# Now restart l1 and it should reload peers/channels from the DB
|
||||||
l2.daemon.start()
|
l2.daemon.start()
|
||||||
wait_for(lambda: len(l2.rpc.getpeers()['peers']) == 1)
|
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)
|
||||||
|
|
||||||
# Wait for the restored HTLC to finish
|
# Wait for the restored HTLC to finish
|
||||||
wait_for(lambda: l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
|
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
|
||||||
|
|
||||||
wait_for(lambda: len([p for p in l1.rpc.getpeers()['peers'] if p['connected']]), interval=1)
|
wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']]), interval=1)
|
||||||
wait_for(lambda: len([p for p in l2.rpc.getpeers()['peers'] if p['connected']]), interval=1)
|
wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']]), interval=1)
|
||||||
|
|
||||||
# Now make sure this is really functional by sending a payment
|
# Now make sure this is really functional by sending a payment
|
||||||
self.pay(l1, l2, 10000)
|
self.pay(l1, l2, 10000)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000
|
assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000
|
||||||
assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 20000
|
assert l2.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 20000
|
||||||
|
|
||||||
# Finally restart l1, and make sure it remembers
|
# Finally restart l1, and make sure it remembers
|
||||||
l1.stop()
|
l1.stop()
|
||||||
l1.daemon.start()
|
l1.daemon.start()
|
||||||
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000
|
assert l1.rpc.listpeers()['peers'][0]['msatoshi_to_us'] == 99980000
|
||||||
|
|
||||||
# Now make sure l1 is watching for unilateral closes
|
# Now make sure l1 is watching for unilateral closes
|
||||||
l2.rpc.dev_fail(l1.info['id']);
|
l2.rpc.dev_fail(l1.info['id']);
|
||||||
|
Loading…
Reference in New Issue
Block a user