core-lightning/tests/test_gossip.py
Rusty Russell edf1b3cec9 More option cleanups.
Because we have too many which are never used and I don't want to document
them.

1. Remove unused anchor_onchain_wait.  When implemented, it should be
   hardcoded to 100 or more.
2. Remove anchor_confirms_max.  10 always reasonable, and we can readd
   an override option should someone need it.
3. max_htlc_expiry should be the same as locktime_max (which increases
   from 3 to 5 days by default): they're both a limit on how long
   funds can be locked up.
4. channel_update_interval should always be a dev option.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-05-20 02:32:42 +00:00

61 lines
2.3 KiB
Python

from fixtures import * # noqa: F401,F403
from test_lightningd import wait_for
import os
import time
import unittest
DEVELOPER = os.getenv("DEVELOPER", "0") == "1"
@unittest.skipIf(not DEVELOPER, "needs --dev-broadcast-interval, --dev-channelupdate-interval")
def test_gossip_pruning(node_factory, bitcoind):
""" Create channel and see it being updated in time before pruning
"""
opts = {'dev-channel-update-interval': 5}
l1, l2, l3 = node_factory.get_nodes(3, opts)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
scid1 = l1.fund_channel(l2, 10**6)
scid2 = l2.fund_channel(l3, 10**6)
bitcoind.rpc.generate(6)
# Channels should be activated locally
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True] * 4)
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True] * 4)
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']] == [True] * 4)
# All of them should send a keepalive message
l1.daemon.wait_for_logs([
'Sending keepalive channel_update for {}'.format(scid1),
])
l2.daemon.wait_for_logs([
'Sending keepalive channel_update for {}'.format(scid1),
'Sending keepalive channel_update for {}'.format(scid2),
])
l3.daemon.wait_for_logs([
'Sending keepalive channel_update for {}'.format(scid2),
])
# Now kill l3, so that l2 and l1 can prune it from their view after 10 seconds
# FIXME: This sleep() masks a real bug: that channeld sends a
# channel_update message (to disable the channel) with same
# timestamp as the last keepalive, and thus is ignored. The minimal
# fix is to backdate the keepalives 1 second, but maybe we should
# simply have gossipd generate all updates?
time.sleep(1)
l3.stop()
l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
l2.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
assert scid2 not in [c['short_channel_id'] for c in l1.rpc.listchannels()['channels']]
assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']]
assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']]