2018-10-10 00:39:05 +02:00
|
|
|
from collections import namedtuple
|
2019-08-24 16:39:01 +02:00
|
|
|
from decimal import Decimal
|
2018-08-03 17:29:38 +02:00
|
|
|
from fixtures import * # noqa: F401,F403
|
2019-06-29 18:54:39 +02:00
|
|
|
from fixtures import TEST_NETWORK
|
2019-05-22 14:42:35 +02:00
|
|
|
from flaky import flaky # noqa: F401
|
2020-04-03 02:03:58 +02:00
|
|
|
from pyln.client import RpcError, Millisatoshi
|
2020-01-06 12:20:12 +01:00
|
|
|
from utils import (
|
|
|
|
DEVELOPER, only_one, wait_for, sync_blockheight, VALGRIND, TIMEOUT,
|
2020-04-02 05:08:22 +02:00
|
|
|
SLOW_MACHINE, expected_peer_features, expected_node_features,
|
|
|
|
check_coin_moves, first_channel_id, account_balance
|
2020-01-06 12:20:12 +01:00
|
|
|
)
|
2019-05-31 23:58:58 +02:00
|
|
|
from bitcoin.core import CMutableTransaction, CMutableTxOut
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2019-05-31 23:58:58 +02:00
|
|
|
import binascii
|
2018-08-10 08:19:06 +02:00
|
|
|
import os
|
2018-08-03 17:29:38 +02:00
|
|
|
import pytest
|
2018-08-10 08:19:06 +02:00
|
|
|
import random
|
2019-05-23 01:28:18 +02:00
|
|
|
import re
|
2018-08-10 08:19:06 +02:00
|
|
|
import shutil
|
2019-05-31 23:58:58 +02:00
|
|
|
import time
|
2018-08-03 17:29:38 +02:00
|
|
|
import unittest
|
|
|
|
|
|
|
|
|
|
|
|
def test_connect(node_factory):
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False)
|
|
|
|
|
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
|
|
|
# These should be in openingd.
|
|
|
|
assert l1.rpc.getpeer(l2.info['id'])['connected']
|
|
|
|
assert l2.rpc.getpeer(l1.info['id'])['connected']
|
|
|
|
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0
|
|
|
|
assert len(l2.rpc.getpeer(l1.info['id'])['channels']) == 0
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Reconnect should be a noop
|
|
|
|
ret = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
|
|
|
|
assert ret['id'] == l2.info['id']
|
|
|
|
|
|
|
|
ret = l2.rpc.connect(l1.info['id'], host='localhost', port=l1.port)
|
|
|
|
assert ret['id'] == l1.info['id']
|
|
|
|
|
|
|
|
# Should still only have one peer!
|
|
|
|
assert len(l1.rpc.listpeers()) == 1
|
|
|
|
assert len(l2.rpc.listpeers()) == 1
|
|
|
|
|
|
|
|
# Should get reasonable error if unknown addr for peer.
|
2019-12-04 19:45:18 +01:00
|
|
|
with pytest.raises(RpcError, match=r'Unable to connect, no address known'):
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.connect('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e')
|
|
|
|
|
|
|
|
# Should get reasonable error if connection refuse.
|
|
|
|
with pytest.raises(RpcError, match=r'Connection establishment: Connection refused'):
|
|
|
|
l1.rpc.connect('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'localhost', 1)
|
|
|
|
|
|
|
|
# Should get reasonable error if wrong key for peer.
|
2018-10-18 02:52:43 +02:00
|
|
|
with pytest.raises(RpcError, match=r'Cryptographic handshake: peer closed connection \(wrong key\?\)'):
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.connect('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'localhost', l2.port)
|
|
|
|
|
|
|
|
|
|
|
|
def test_connect_standard_addr(node_factory):
|
|
|
|
"""Test standard node@host:port address
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.get_nodes(3)
|
|
|
|
|
|
|
|
# node@host
|
|
|
|
ret = l1.rpc.connect("{}@{}".format(l2.info['id'], 'localhost'), port=l2.port)
|
|
|
|
assert ret['id'] == l2.info['id']
|
|
|
|
|
|
|
|
# node@host:port
|
|
|
|
ret = l1.rpc.connect("{}@localhost:{}".format(l3.info['id'], l3.port))
|
|
|
|
assert ret['id'] == l3.info['id']
|
|
|
|
|
|
|
|
# node@[ipv6]:port --- not supported by our CI
|
|
|
|
# ret = l1.rpc.connect("{}@[::1]:{}".format(l3.info['id'], l3.port))
|
|
|
|
# assert ret['id'] == l3.info['id']
|
|
|
|
|
|
|
|
|
|
|
|
def test_reconnect_channel_peers(node_factory, executor):
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
l2.restart()
|
|
|
|
|
|
|
|
# Should reconnect.
|
|
|
|
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
|
|
|
|
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
|
|
|
|
# Connect command should succeed.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Stop l2 and wait for l1 to notice.
|
|
|
|
l2.stop()
|
|
|
|
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
|
|
|
|
|
|
|
|
# Now should fail.
|
|
|
|
with pytest.raises(RpcError, match=r'Connection refused'):
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Wait for exponential backoff to give us a 2 second window.
|
2018-08-09 02:25:29 +02:00
|
|
|
l1.daemon.wait_for_log('Will try reconnect in 2 seconds')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# It should now succeed when it restarts.
|
|
|
|
l2.start()
|
|
|
|
|
|
|
|
# Multiples should be fine!
|
|
|
|
fut1 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
|
|
|
|
fut2 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
|
|
|
|
fut3 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
|
|
|
|
fut1.result(10)
|
|
|
|
fut2.result(10)
|
|
|
|
fut3.result(10)
|
|
|
|
|
|
|
|
|
|
|
|
def test_balance(node_factory):
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=True)
|
|
|
|
p1 = only_one(l1.rpc.getpeer(peer_id=l2.info['id'], level='info')['channels'])
|
|
|
|
p2 = only_one(l2.rpc.getpeer(l1.info['id'], 'info')['channels'])
|
|
|
|
assert p1['msatoshi_to_us'] == 10**6 * 1000
|
|
|
|
assert p1['msatoshi_total'] == 10**6 * 1000
|
|
|
|
assert p2['msatoshi_to_us'] == 0
|
|
|
|
assert p2['msatoshi_total'] == 10**6 * 1000
|
|
|
|
|
|
|
|
|
|
|
|
def test_bad_opening(node_factory):
|
|
|
|
# l1 asks for a too-long locktime
|
|
|
|
l1 = node_factory.get_node(options={'watchtime-blocks': 100})
|
|
|
|
l2 = node_factory.get_node(options={'max-locktime-blocks': 99})
|
|
|
|
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
assert ret['id'] == l2.info['id']
|
|
|
|
|
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
|
|
|
l1.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
|
|
|
l2.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l1.fundwallet(10**6 + 1000000)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6)
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log('to_self_delay 100 larger than 99')
|
|
|
|
|
|
|
|
|
2019-08-14 11:57:27 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "gossip without DEVELOPER=1 is slow")
|
2019-09-18 21:55:41 +02:00
|
|
|
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee computation and limits are network specific")
|
2019-04-03 10:20:39 +02:00
|
|
|
def test_opening_tiny_channel(node_factory):
|
|
|
|
# Test custom min-capacity-sat parameters
|
|
|
|
#
|
2019-04-15 22:48:10 +02:00
|
|
|
# o---> [l2] (1000) - old default (too little for reserves)
|
2019-04-03 10:20:39 +02:00
|
|
|
# /
|
2019-04-15 22:48:10 +02:00
|
|
|
# [l1]-----> [l3] (~6000) - technical minimal value that wont be rejected
|
2019-04-03 10:20:39 +02:00
|
|
|
# \
|
2019-04-15 22:48:10 +02:00
|
|
|
# o---> [l4] (~10000) - the current default
|
|
|
|
# \
|
|
|
|
# o-> [l5] (20000) - a node with a higher minimal value
|
2019-04-03 10:20:39 +02:00
|
|
|
#
|
|
|
|
# For each:
|
|
|
|
# 1. Try to establish channel 1sat smaller than min_capacity_sat
|
|
|
|
# 2. Try to establish channel exact min_capacity_sat
|
|
|
|
#
|
|
|
|
# BOLT2
|
|
|
|
# The receiving node MAY fail the channel if:
|
|
|
|
# - funding_satoshis is too small
|
|
|
|
# - it considers `feerate_per_kw` too small for timely processing or unreasonably large.
|
|
|
|
#
|
|
|
|
dustlimit = 546
|
|
|
|
reserves = 2 * dustlimit
|
|
|
|
min_commit_tx_fees = 5430
|
2019-09-09 18:11:24 +02:00
|
|
|
min_for_opener = min_commit_tx_fees + dustlimit + 1
|
2019-04-03 10:20:39 +02:00
|
|
|
|
2019-04-15 22:48:12 +02:00
|
|
|
l1_min_capacity = 1000 # 1k old default, too small but used at l1 to allow small incoming channels
|
|
|
|
l2_min_capacity = reserves # just enough to get past capacity filter
|
2019-09-09 18:11:24 +02:00
|
|
|
l3_min_capacity = min_for_opener # the absolute technical minimum
|
2019-04-15 22:48:12 +02:00
|
|
|
l4_min_capacity = 10000 # the current default
|
|
|
|
l5_min_capacity = 20000 # a server with more than default minimum
|
2019-04-03 10:20:39 +02:00
|
|
|
|
2019-04-15 22:48:12 +02:00
|
|
|
l1 = node_factory.get_node(options={'min-capacity-sat': l1_min_capacity})
|
2019-04-15 22:48:10 +02:00
|
|
|
l2 = node_factory.get_node(options={'min-capacity-sat': l2_min_capacity})
|
2019-04-03 10:20:39 +02:00
|
|
|
l3 = node_factory.get_node(options={'min-capacity-sat': l3_min_capacity})
|
|
|
|
l4 = node_factory.get_node(options={'min-capacity-sat': l4_min_capacity})
|
2019-04-15 22:48:10 +02:00
|
|
|
l5 = node_factory.get_node(options={'min-capacity-sat': l5_min_capacity})
|
2019-04-03 10:20:39 +02:00
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
2019-04-15 22:48:10 +02:00
|
|
|
l1.rpc.connect(l5.info['id'], 'localhost', l5.port)
|
2019-04-03 10:20:39 +02:00
|
|
|
|
2019-04-15 22:48:12 +02:00
|
|
|
# Open channel with one less than reserves should be rejected at l2
|
|
|
|
with pytest.raises(RpcError, match=r'channel_reserve_satoshis .*sat and .*sat too large for funding .*sat'):
|
|
|
|
l1.fund_channel(l2, l2_min_capacity - 1)
|
2019-04-15 22:48:10 +02:00
|
|
|
# Open a channel with exactly the minimal amount for the fundee,
|
2019-09-09 18:11:24 +02:00
|
|
|
# This will raise an exception at l1, as the opener cannot afford fees for initial_commit_tx.
|
2019-04-15 22:48:12 +02:00
|
|
|
# Note: The old default of 1k sat is below the technical minimum when accounting for dust reserves and fees
|
2019-04-15 22:48:10 +02:00
|
|
|
# This is why this must fail, for this reason the default will be raised to 10k sat.
|
2019-04-03 10:20:39 +02:00
|
|
|
with pytest.raises(RpcError, match=r'Funder cannot afford fee on initial commitment transaction'):
|
2019-04-15 22:48:12 +02:00
|
|
|
l1.fund_channel(l2, l2_min_capacity)
|
2019-04-03 10:20:39 +02:00
|
|
|
|
2019-04-15 22:48:10 +02:00
|
|
|
# Open channel with one less than technical minimum should be rejected at l3
|
|
|
|
with pytest.raises(RpcError, match=r'channel capacity is .*sat, which is below .*sat'):
|
2019-04-15 22:48:12 +02:00
|
|
|
l1.fund_channel(l3, l3_min_capacity - 1)
|
2019-04-15 22:48:10 +02:00
|
|
|
# When amount technical minimum matches exactly, own initial_commit_tx fees can now be covered
|
2019-04-15 22:48:12 +02:00
|
|
|
l1.fund_channel(l3, l3_min_capacity)
|
2019-04-03 10:20:39 +02:00
|
|
|
|
2019-04-15 22:48:10 +02:00
|
|
|
# Open channel with one less than default 10k sats should be rejected at l4
|
2019-04-03 10:20:39 +02:00
|
|
|
with pytest.raises(RpcError, match=r'channel capacity is .*, which is below .*msat'):
|
2019-04-15 22:48:12 +02:00
|
|
|
l1.fund_channel(l4, l4_min_capacity - 1)
|
2019-04-15 22:48:10 +02:00
|
|
|
# This must be possible with enough capacity
|
2019-04-15 22:48:12 +02:00
|
|
|
l1.fund_channel(l4, l4_min_capacity)
|
2019-04-03 10:20:39 +02:00
|
|
|
|
2019-04-15 22:48:10 +02:00
|
|
|
# Open channel with less than minimum should be rejected at l5
|
|
|
|
with pytest.raises(RpcError, match=r'channel capacity is .*, which is below .*msat'):
|
2019-04-15 22:48:12 +02:00
|
|
|
l1.fund_channel(l5, l5_min_capacity - 1)
|
2019-04-15 22:48:10 +02:00
|
|
|
# bigger channels must not be affected
|
|
|
|
l1.fund_channel(l5, l5_min_capacity * 10)
|
2019-04-03 10:20:39 +02:00
|
|
|
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
def test_second_channel(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
l3 = node_factory.get_node()
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
l1.fund_channel(l3, 10**6)
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_disconnect(node_factory):
|
|
|
|
# These should all make us fail
|
|
|
|
disconnects = ['-WIRE_INIT',
|
|
|
|
'@WIRE_INIT',
|
|
|
|
'+WIRE_INIT']
|
|
|
|
l1 = node_factory.get_node(disconnect=disconnects)
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Should have 3 connect fails.
|
|
|
|
for d in disconnects:
|
2019-11-18 01:26:17 +01:00
|
|
|
l1.daemon.wait_for_log('{}-.*Failed connected out'
|
2018-08-03 17:29:38 +02:00
|
|
|
.format(l2.info['id']))
|
|
|
|
|
|
|
|
# Should still only have one peer!
|
|
|
|
assert len(l1.rpc.listpeers()) == 1
|
|
|
|
assert len(l2.rpc.listpeers()) == 1
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
2019-09-09 18:11:24 +02:00
|
|
|
def test_disconnect_opener(node_factory):
|
|
|
|
# Now error on opener side during channel open.
|
2018-08-03 17:29:38 +02:00
|
|
|
disconnects = ['-WIRE_OPEN_CHANNEL',
|
|
|
|
'@WIRE_OPEN_CHANNEL',
|
|
|
|
'+WIRE_OPEN_CHANNEL',
|
|
|
|
'-WIRE_FUNDING_CREATED',
|
|
|
|
'@WIRE_FUNDING_CREATED']
|
|
|
|
l1 = node_factory.get_node(disconnect=disconnects)
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
l1.fundwallet(2000000)
|
|
|
|
|
|
|
|
for d in disconnects:
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
assert l1.rpc.getpeer(l2.info['id']) is None
|
|
|
|
|
|
|
|
# This one will succeed.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
|
|
|
|
# Should still only have one peer!
|
|
|
|
assert len(l1.rpc.listpeers()) == 1
|
|
|
|
assert len(l2.rpc.listpeers()) == 1
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_disconnect_fundee(node_factory):
|
|
|
|
# Now error on fundee side during channel open.
|
|
|
|
disconnects = ['-WIRE_ACCEPT_CHANNEL',
|
|
|
|
'@WIRE_ACCEPT_CHANNEL',
|
|
|
|
'+WIRE_ACCEPT_CHANNEL']
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects)
|
|
|
|
|
|
|
|
l1.fundwallet(2000000)
|
|
|
|
|
|
|
|
for d in disconnects:
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
assert l1.rpc.getpeer(l2.info['id']) is None
|
|
|
|
|
|
|
|
# This one will succeed.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
|
|
|
|
# Should still only have one peer!
|
|
|
|
assert len(l1.rpc.listpeers()) == 1
|
|
|
|
assert len(l2.rpc.listpeers()) == 1
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_disconnect_half_signed(node_factory):
|
|
|
|
# Now, these are the corner cases. Fundee sends funding_signed,
|
2019-09-09 18:11:24 +02:00
|
|
|
# but opener doesn't receive it.
|
2018-08-03 17:29:38 +02:00
|
|
|
disconnects = ['@WIRE_FUNDING_SIGNED']
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects)
|
|
|
|
|
|
|
|
l1.fundwallet(2000000)
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
|
2019-09-09 18:11:24 +02:00
|
|
|
# Peer remembers, opener doesn't.
|
2018-08-03 17:29:38 +02:00
|
|
|
assert l1.rpc.getpeer(l2.info['id']) is None
|
|
|
|
assert l2.rpc.getpeer(l1.info['id'])['id'] == l1.info['id']
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_signed(node_factory):
|
|
|
|
# This will fail *after* both sides consider channel opening.
|
|
|
|
disconnects = ['+WIRE_FUNDING_SIGNED']
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects,
|
|
|
|
may_reconnect=True)
|
|
|
|
|
|
|
|
l1.fundwallet(2000000)
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
|
|
|
|
# They haven't forgotten each other.
|
|
|
|
assert l1.rpc.getpeer(l2.info['id'])['id'] == l2.info['id']
|
|
|
|
assert l2.rpc.getpeer(l1.info['id'])['id'] == l1.info['id']
|
|
|
|
|
|
|
|
# Technically, this is async to fundchannel (and could reconnect first)
|
|
|
|
l1.daemon.wait_for_logs(['sendrawtx exit 0',
|
|
|
|
'Peer has reconnected, state CHANNELD_AWAITING_LOCKIN'])
|
|
|
|
|
|
|
|
l1.bitcoin.generate_block(6)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
|
|
|
|
l2.daemon.wait_for_log(' to CHANNELD_NORMAL')
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_openingd(node_factory):
|
2019-09-09 18:11:24 +02:00
|
|
|
# Openingd thinks we're still opening; opener reconnects..
|
2018-08-03 17:29:38 +02:00
|
|
|
disconnects = ['0WIRE_ACCEPT_CHANNEL']
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects,
|
|
|
|
may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fundwallet(2000000)
|
|
|
|
|
|
|
|
# l2 closes on l1, l1 forgets.
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
assert l1.rpc.getpeer(l2.info['id']) is None
|
|
|
|
|
|
|
|
# Reconnect.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
2018-08-09 02:25:29 +02:00
|
|
|
# We should get a message about reconnecting.
|
|
|
|
l2.daemon.wait_for_log('Killing openingd: Reconnected')
|
2019-11-18 01:27:17 +01:00
|
|
|
l2.daemon.wait_for_log('openingd.*Handed peer, entering loop')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Should work fine.
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 20000)
|
|
|
|
l1.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
|
2019-05-22 21:36:11 +02:00
|
|
|
l1.bitcoin.generate_block(3)
|
|
|
|
|
|
|
|
# Just to be sure, second openingd hand over to channeld. This log line is about channeld being started
|
2019-11-18 01:27:18 +01:00
|
|
|
l2.daemon.wait_for_log(r'channeld-chan#[0-9]: pid [0-9]+, msgfd [0-9]+')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
|
2018-08-10 08:18:33 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_gossiping(node_factory):
|
|
|
|
# connectd thinks we're still gossiping; peer reconnects.
|
|
|
|
disconnects = ['0WIRE_PING']
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects,
|
|
|
|
may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2019-05-22 14:41:35 +02:00
|
|
|
# Make sure l2 knows about l1
|
|
|
|
wait_for(lambda: l2.rpc.listpeers(l1.info['id'])['peers'] != [])
|
2018-08-10 08:18:33 +02:00
|
|
|
|
2018-08-10 13:41:32 +02:00
|
|
|
l2.rpc.ping(l1.info['id'], 1, 65532)
|
2018-09-28 05:24:14 +02:00
|
|
|
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
|
2018-08-10 08:18:33 +02:00
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l2.daemon.wait_for_log('processing now old peer gone')
|
|
|
|
|
|
|
|
|
2019-09-08 17:26:44 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs dev-disconnect")
|
|
|
|
def test_reconnect_no_update(node_factory, executor):
|
|
|
|
"""
|
|
|
|
This tests if the `funding_locked` is sent if we receive a
|
|
|
|
`channel_reestablish` message with `next_commitment_number` == 1 and
|
|
|
|
our `next_commitment_number` == 1.
|
|
|
|
"""
|
|
|
|
disconnects = ["@WIRE_FUNDING_LOCKED", "@WIRE_SHUTDOWN"]
|
|
|
|
# Allow bad gossip because it might receive WIRE_CHANNEL_UPDATE before
|
|
|
|
# announcement before of the disconnection
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True, allow_bad_gossip=True)
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
|
|
|
|
|
|
|
|
# For channeld reconnection
|
|
|
|
l1.rpc.connect(l2.info["id"], "localhost", l2.port)
|
|
|
|
fundchannel_exec = executor.submit(l1.fund_channel, l2, 10**6, False)
|
|
|
|
l1.daemon.wait_for_log(r"channeld.* Retransmitting funding_locked for channel")
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# For closingd reconnection
|
|
|
|
scid = fundchannel_exec.result()
|
|
|
|
l1.daemon.start()
|
|
|
|
executor.submit(l1.rpc.close, scid, 0)
|
|
|
|
l2.daemon.wait_for_log(r"closingd.* Retransmitting funding_locked for channel")
|
|
|
|
l1.daemon.wait_for_log(r"CLOSINGD_COMPLETE")
|
|
|
|
|
|
|
|
|
2018-08-10 08:19:06 +02:00
|
|
|
def test_connect_stresstest(node_factory, executor):
|
|
|
|
# This test is unreliable, but it's better than nothing.
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l3 = node_factory.get_node(may_reconnect=True)
|
|
|
|
|
|
|
|
# Hack l3 into a clone of l2, to stress reconnect code.
|
|
|
|
l3.stop()
|
2019-11-23 02:46:40 +01:00
|
|
|
shutil.copyfile(os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'hsm_secret'),
|
|
|
|
os.path.join(l3.daemon.lightning_dir, TEST_NETWORK, 'hsm_secret'))
|
2018-08-10 08:19:06 +02:00
|
|
|
l3.start()
|
|
|
|
l3.info = l3.rpc.getinfo()
|
|
|
|
|
|
|
|
assert l3.info['id'] == l2.info['id']
|
|
|
|
|
|
|
|
# We fire off random connect/disconnect commands.
|
|
|
|
actions = [
|
|
|
|
(l2.rpc.connect, l1.info['id'], 'localhost', l1.port),
|
|
|
|
(l3.rpc.connect, l1.info['id'], 'localhost', l3.port),
|
|
|
|
(l1.rpc.connect, l2.info['id'], 'localhost', l2.port),
|
|
|
|
(l1.rpc.connect, l3.info['id'], 'localhost', l3.port),
|
|
|
|
(l1.rpc.disconnect, l2.info['id'])
|
|
|
|
]
|
|
|
|
args = [random.choice(actions) for _ in range(1000)]
|
|
|
|
|
|
|
|
# We get them all to connect to each other.
|
|
|
|
futs = []
|
|
|
|
for a in args:
|
|
|
|
futs.append(executor.submit(*a))
|
|
|
|
|
|
|
|
# We don't actually care if they fail, since some will.
|
|
|
|
successes = 0
|
|
|
|
failures = 0
|
|
|
|
for f in futs:
|
|
|
|
if f.exception():
|
|
|
|
failures += 1
|
|
|
|
else:
|
|
|
|
f.result()
|
|
|
|
successes += 1
|
|
|
|
|
|
|
|
assert successes > failures
|
|
|
|
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_normal(node_factory):
|
|
|
|
# Should reconnect fine even if locked message gets lost.
|
|
|
|
disconnects = ['-WIRE_FUNDING_LOCKED',
|
|
|
|
'@WIRE_FUNDING_LOCKED',
|
|
|
|
'+WIRE_FUNDING_LOCKED']
|
|
|
|
l1 = node_factory.get_node(disconnect=disconnects,
|
|
|
|
may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_sender_add1(node_factory):
|
|
|
|
# Fail after add is OK, will cause payment failure though.
|
|
|
|
disconnects = ['-WIRE_UPDATE_ADD_HTLC-nocommit',
|
|
|
|
'+WIRE_UPDATE_ADD_HTLC-nocommit',
|
|
|
|
'@WIRE_UPDATE_ADD_HTLC-nocommit']
|
|
|
|
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
2018-08-03 17:29:38 +02:00
|
|
|
l1 = node_factory.get_node(disconnect=disconnects,
|
2018-08-23 01:27:17 +02:00
|
|
|
may_reconnect=True,
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
amt = 200000000
|
|
|
|
rhash = l2.rpc.invoice(amt, 'test_reconnect_sender_add1', 'desc')['payment_hash']
|
|
|
|
assert only_one(l2.rpc.listinvoices('test_reconnect_sender_add1')['invoices'])['status'] == 'unpaid'
|
|
|
|
|
2019-01-10 22:42:18 +01:00
|
|
|
route = [{'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1'}]
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
for i in range(0, len(disconnects)):
|
|
|
|
l1.rpc.sendpay(route, rhash)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.waitsendpay(rhash)
|
|
|
|
|
|
|
|
# Wait for reconnection.
|
|
|
|
l1.daemon.wait_for_log('Already have funding locked in')
|
|
|
|
|
|
|
|
# This will send commit, so will reconnect as required.
|
|
|
|
l1.rpc.sendpay(route, rhash)
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_sender_add(node_factory):
|
|
|
|
disconnects = ['-WIRE_COMMITMENT_SIGNED',
|
|
|
|
'@WIRE_COMMITMENT_SIGNED',
|
|
|
|
'+WIRE_COMMITMENT_SIGNED',
|
|
|
|
'-WIRE_REVOKE_AND_ACK',
|
|
|
|
'@WIRE_REVOKE_AND_ACK',
|
|
|
|
'+WIRE_REVOKE_AND_ACK']
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
2018-08-03 17:29:38 +02:00
|
|
|
l1 = node_factory.get_node(disconnect=disconnects,
|
2018-08-23 01:27:17 +02:00
|
|
|
may_reconnect=True,
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
amt = 200000000
|
|
|
|
rhash = l2.rpc.invoice(amt, 'testpayment', 'desc')['payment_hash']
|
|
|
|
assert only_one(l2.rpc.listinvoices('testpayment')['invoices'])['status'] == 'unpaid'
|
|
|
|
|
2019-01-10 22:42:18 +01:00
|
|
|
route = [{'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1'}]
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# This will send commit, so will reconnect as required.
|
|
|
|
l1.rpc.sendpay(route, rhash)
|
|
|
|
# Should have printed this for every reconnect.
|
|
|
|
for i in range(0, len(disconnects)):
|
|
|
|
l1.daemon.wait_for_log('Already have funding locked in')
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_receiver_add(node_factory):
|
|
|
|
disconnects = ['-WIRE_COMMITMENT_SIGNED',
|
|
|
|
'@WIRE_COMMITMENT_SIGNED',
|
|
|
|
'+WIRE_COMMITMENT_SIGNED',
|
|
|
|
'-WIRE_REVOKE_AND_ACK',
|
|
|
|
'@WIRE_REVOKE_AND_ACK',
|
|
|
|
'+WIRE_REVOKE_AND_ACK']
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
2020-03-10 19:31:24 +01:00
|
|
|
l1 = node_factory.get_node(may_reconnect=True, feerates=(7500, 7500, 7500, 7500))
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(disconnect=disconnects,
|
|
|
|
may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
amt = 200000000
|
|
|
|
rhash = l2.rpc.invoice(amt, 'testpayment2', 'desc')['payment_hash']
|
|
|
|
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'unpaid'
|
|
|
|
|
2019-01-10 22:42:18 +01:00
|
|
|
route = [{'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1'}]
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.sendpay(route, rhash)
|
|
|
|
for i in range(len(disconnects)):
|
|
|
|
l1.daemon.wait_for_log('Already have funding locked in')
|
|
|
|
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_receiver_fulfill(node_factory):
|
|
|
|
# Ordering matters: after +WIRE_UPDATE_FULFILL_HTLC, channeld
|
|
|
|
# will continue and try to send WIRE_COMMITMENT_SIGNED: if
|
|
|
|
# that's the next failure, it will do two in one run.
|
|
|
|
disconnects = ['@WIRE_UPDATE_FULFILL_HTLC',
|
|
|
|
'+WIRE_UPDATE_FULFILL_HTLC',
|
|
|
|
'-WIRE_UPDATE_FULFILL_HTLC',
|
|
|
|
'-WIRE_COMMITMENT_SIGNED',
|
|
|
|
'@WIRE_COMMITMENT_SIGNED',
|
|
|
|
'+WIRE_COMMITMENT_SIGNED',
|
|
|
|
'-WIRE_REVOKE_AND_ACK',
|
|
|
|
'@WIRE_REVOKE_AND_ACK',
|
|
|
|
'+WIRE_REVOKE_AND_ACK']
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(disconnect=disconnects,
|
|
|
|
may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
amt = 200000000
|
|
|
|
rhash = l2.rpc.invoice(amt, 'testpayment2', 'desc')['payment_hash']
|
|
|
|
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'unpaid'
|
|
|
|
|
2019-01-10 22:42:18 +01:00
|
|
|
route = [{'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1'}]
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.sendpay(route, rhash)
|
|
|
|
for i in range(len(disconnects)):
|
|
|
|
l1.daemon.wait_for_log('Already have funding locked in')
|
|
|
|
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
|
|
|
|
|
|
|
|
|
2019-05-22 14:42:35 +02:00
|
|
|
@flaky
|
2018-08-03 17:29:38 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_shutdown_reconnect(node_factory):
|
|
|
|
disconnects = ['-WIRE_SHUTDOWN',
|
|
|
|
'@WIRE_SHUTDOWN',
|
|
|
|
'+WIRE_SHUTDOWN']
|
|
|
|
l1 = node_factory.get_node(disconnect=disconnects,
|
2019-08-08 08:48:44 +02:00
|
|
|
may_reconnect=True)
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
chan = l1.fund_channel(l2, 10**6)
|
|
|
|
l1.pay(l2, 200000000)
|
|
|
|
|
|
|
|
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 0
|
|
|
|
|
2019-08-08 08:48:44 +02:00
|
|
|
# This should wait until we're closed.
|
|
|
|
l1.rpc.close(chan)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
|
|
|
|
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
|
|
|
|
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
|
|
|
|
|
|
|
|
# And should put closing into mempool (happens async, so
|
|
|
|
# CLOSINGD_COMPLETE may come first).
|
|
|
|
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
|
|
|
|
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
|
|
|
|
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
|
|
|
|
|
|
|
|
|
2019-06-03 14:17:06 +02:00
|
|
|
@flaky
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_reconnect_remote_sends_no_sigs(node_factory):
|
|
|
|
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
|
|
|
|
|
|
|
|
# When l1 restarts (with rescan=1), make it think it hasn't
|
|
|
|
# reached announce_depth, so it wont re-send announcement_signatures
|
|
|
|
def no_blocks_above(req):
|
|
|
|
if req['params'][0] > 107:
|
|
|
|
return {"result": None,
|
|
|
|
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
|
|
|
|
else:
|
|
|
|
return {'result': l1.bitcoin.rpc.getblockhash(req['params'][0]),
|
|
|
|
"error": None, 'id': req['id']}
|
|
|
|
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockhash', no_blocks_above)
|
|
|
|
l1.restart()
|
|
|
|
|
|
|
|
# l2 will now uses (REMOTE's) announcement_signatures it has stored
|
|
|
|
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
|
|
|
|
'CHANNELD_NORMAL:Reconnected, and reestablished.',
|
|
|
|
'CHANNELD_NORMAL:Funding transaction locked. Channel announced.'])
|
|
|
|
|
|
|
|
# But l2 still sends its own sigs on reconnect
|
|
|
|
l2.daemon.wait_for_logs([r'peer_out WIRE_ANNOUNCEMENT_SIGNATURES',
|
|
|
|
r'peer_out WIRE_ANNOUNCEMENT_SIGNATURES'])
|
|
|
|
|
|
|
|
# l1 only did send them the first time
|
|
|
|
assert(''.join(l1.daemon.logs).count(r'peer_out WIRE_ANNOUNCEMENT_SIGNATURES') == 1)
|
|
|
|
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
def test_shutdown_awaiting_lockin(node_factory, bitcoind):
|
2019-08-08 08:48:44 +02:00
|
|
|
l1 = node_factory.get_node()
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(options={'funding-confirms': 3})
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.fundwallet(10**6 + 1000000)
|
|
|
|
chanid = l1.rpc.fundchannel(l2.info['id'], 10**6)['channel_id']
|
|
|
|
|
|
|
|
# Technically, this is async to fundchannel.
|
|
|
|
l1.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
2019-08-08 08:48:44 +02:00
|
|
|
l1.rpc.close(chanid)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l1.daemon.wait_for_log('CHANNELD_AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN')
|
|
|
|
l2.daemon.wait_for_log('CHANNELD_AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN')
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log('CHANNELD_SHUTTING_DOWN to CLOSINGD_SIGEXCHANGE')
|
|
|
|
l2.daemon.wait_for_log('CHANNELD_SHUTTING_DOWN to CLOSINGD_SIGEXCHANGE')
|
|
|
|
|
|
|
|
# And should put closing into mempool (happens async, so
|
|
|
|
# CLOSINGD_COMPLETE may come first).
|
|
|
|
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
|
|
|
|
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
|
|
|
|
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
|
|
|
|
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
l2.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
|
|
|
|
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
|
|
|
|
|
|
|
|
|
|
|
|
def test_funding_change(node_factory, bitcoind):
|
|
|
|
"""Add some funds, fund a channel, and make sure we remember the change
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False)
|
|
|
|
l1.fundwallet(10000000)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
|
|
|
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
|
|
|
|
assert only_one(outputs)['value'] == 10000000
|
|
|
|
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 1000000)
|
|
|
|
outputs = {r['status']: r['value'] for r in l1.db_query(
|
|
|
|
'SELECT status, SUM(value) AS value FROM outputs GROUP BY status;')}
|
|
|
|
|
|
|
|
# The 10m out is spent and we have a change output of 9m-fee
|
|
|
|
assert outputs[0] > 8990000
|
|
|
|
assert outputs[2] == 10000000
|
|
|
|
|
|
|
|
|
|
|
|
def test_funding_all(node_factory, bitcoind):
|
|
|
|
"""Add some funds, fund a channel using all funds, make sure no funds remain
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False)
|
|
|
|
|
|
|
|
l1.fundwallet(0.1 * 10**8)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
|
|
|
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
|
|
|
|
assert only_one(outputs)['value'] == 10000000
|
|
|
|
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], "all")
|
|
|
|
|
|
|
|
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
|
|
|
|
assert len(outputs) == 0
|
|
|
|
|
|
|
|
|
|
|
|
def test_funding_all_too_much(node_factory):
|
|
|
|
"""Add more than max possible funds, fund a channel using all funds we can.
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False)
|
|
|
|
|
|
|
|
l1.fundwallet(2**24 + 10000)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], "all")
|
|
|
|
|
|
|
|
assert only_one(l1.rpc.listfunds()['outputs'])['status'] == 'unconfirmed'
|
|
|
|
assert only_one(l1.rpc.listfunds()['channels'])['channel_total_sat'] == 2**24 - 1
|
|
|
|
|
|
|
|
|
|
|
|
def test_funding_fail(node_factory, bitcoind):
|
|
|
|
"""Add some funds, fund a channel without enough funds"""
|
|
|
|
# Previous runs with same bitcoind can leave funds!
|
|
|
|
max_locktime = 5 * 6 * 24
|
|
|
|
l1 = node_factory.get_node(random_hsm=True, options={'max-locktime-blocks': max_locktime})
|
|
|
|
l2 = node_factory.get_node(options={'watchtime-blocks': max_locktime + 1})
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
funds = 1000000
|
|
|
|
|
2019-03-04 04:13:49 +01:00
|
|
|
addr = l1.rpc.newaddr()['bech32']
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.bitcoin.rpc.sendtoaddress(addr, funds / 10**8)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
# Wait for it to arrive.
|
|
|
|
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
|
|
|
|
|
|
|
|
# Fail because l1 dislikes l2's huge locktime.
|
|
|
|
with pytest.raises(RpcError, match=r'to_self_delay \d+ larger than \d+'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], int(funds / 10))
|
|
|
|
|
|
|
|
assert only_one(l1.rpc.listpeers()['peers'])['connected']
|
|
|
|
assert only_one(l2.rpc.listpeers()['peers'])['connected']
|
|
|
|
|
|
|
|
# Restart l2 without ridiculous locktime.
|
|
|
|
del l2.daemon.opts['watchtime-blocks']
|
|
|
|
l2.restart()
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# We don't have enough left to cover fees if we try to spend it all.
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], funds)
|
|
|
|
|
|
|
|
# Should still be connected.
|
|
|
|
assert only_one(l1.rpc.listpeers()['peers'])['connected']
|
2019-11-18 01:27:17 +01:00
|
|
|
l2.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
2018-08-03 17:29:38 +02:00
|
|
|
assert only_one(l2.rpc.listpeers()['peers'])['connected']
|
|
|
|
|
|
|
|
# This works.
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], int(funds / 10))
|
|
|
|
|
|
|
|
|
|
|
|
def test_funding_toolarge(node_factory, bitcoind):
|
|
|
|
"""Try to create a giant channel"""
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Send funds.
|
|
|
|
amount = 2**24
|
2019-03-04 04:13:49 +01:00
|
|
|
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
|
2018-08-03 17:29:38 +02:00
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
# Wait for it to arrive.
|
|
|
|
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
|
|
|
|
|
|
|
|
# Fail to open (too large)
|
|
|
|
with pytest.raises(RpcError, match=r'Amount exceeded 16777215'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], amount)
|
|
|
|
|
|
|
|
# This should work.
|
|
|
|
amount = amount - 1
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], amount)
|
|
|
|
|
|
|
|
|
2019-12-21 06:53:12 +01:00
|
|
|
def test_funding_push(node_factory, bitcoind):
|
|
|
|
""" Try to push peer some sats """
|
2020-04-02 05:08:22 +02:00
|
|
|
# We track balances, to verify that accounting is ok.
|
|
|
|
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
|
|
|
|
|
|
|
|
l1 = node_factory.get_node(options={'plugin': coin_mvt_plugin})
|
2019-12-21 06:53:12 +01:00
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Send funds.
|
|
|
|
amount = 2**24
|
|
|
|
push_sat = 20000
|
|
|
|
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
# Wait for it to arrive.
|
|
|
|
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
|
|
|
|
|
|
|
|
# Fail to open (try to push too much)
|
|
|
|
with pytest.raises(RpcError, match=r'Requested to push_msat of 20000000msat is greater than available funding amount 10000sat'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10000, push_msat=push_sat * 1000)
|
|
|
|
|
|
|
|
# This should work.
|
|
|
|
amount = amount - 1
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], amount, push_msat=push_sat * 1000)
|
2020-04-02 05:08:22 +02:00
|
|
|
|
2019-12-21 06:53:12 +01:00
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
funds = only_one(l1.rpc.listfunds()['channels'])
|
|
|
|
assert funds['channel_sat'] + push_sat == funds['channel_total_sat']
|
|
|
|
|
2020-04-02 05:08:22 +02:00
|
|
|
chanid = first_channel_id(l2, l1)
|
2020-04-22 01:51:13 +02:00
|
|
|
l1.daemon.wait_for_log('coins account: {}'.format(chanid))
|
|
|
|
# give the file write a second
|
|
|
|
time.sleep(1)
|
2020-04-02 05:08:22 +02:00
|
|
|
channel_mvts = [
|
|
|
|
{'type': 'chain_mvt', 'credit': 0, 'debit': 20000000, 'tag': 'pushed'},
|
|
|
|
{'type': 'chain_mvt', 'credit': 16777215000, 'debit': 0, 'tag': 'deposit'},
|
|
|
|
]
|
|
|
|
check_coin_moves(l1, chanid, channel_mvts)
|
|
|
|
assert account_balance(l1, chanid) == (amount - push_sat) * 1000
|
|
|
|
|
2019-12-21 06:53:12 +01:00
|
|
|
|
2019-06-08 15:57:24 +02:00
|
|
|
def test_funding_by_utxos(node_factory, bitcoind):
|
|
|
|
"""Fund a channel with specific utxos"""
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, fundchannel=False)
|
|
|
|
|
|
|
|
# Get 3 differents utxo
|
|
|
|
l1.fundwallet(0.01 * 10**8)
|
|
|
|
l1.fundwallet(0.01 * 10**8)
|
|
|
|
l1.fundwallet(0.01 * 10**8)
|
|
|
|
wait_for(lambda: len(l1.rpc.listfunds()["outputs"]) == 3)
|
|
|
|
|
|
|
|
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]]
|
|
|
|
|
|
|
|
# Fund with utxos we don't own
|
|
|
|
with pytest.raises(RpcError, match=r"No matching utxo was found from the wallet"):
|
|
|
|
l3.rpc.fundchannel(l2.info["id"], int(0.01 * 10**8), utxos=utxos)
|
|
|
|
|
|
|
|
# Fund with an empty array
|
|
|
|
with pytest.raises(RpcError, match=r"Please specify an array of \\'txid:output_index\\', not \"*\""):
|
|
|
|
l1.rpc.fundchannel(l2.info["id"], int(0.01 * 10**8), utxos=[])
|
|
|
|
|
|
|
|
# Fund a channel from some of the utxos, without change
|
|
|
|
l1.rpc.fundchannel(l2.info["id"], "all", utxos=utxos[0:2])
|
|
|
|
|
|
|
|
# Fund a channel from the rest of utxos, with change
|
|
|
|
l1.rpc.connect(l3.info["id"], "localhost", l3.port)
|
|
|
|
l1.rpc.fundchannel(l3.info["id"], int(0.007 * 10**8), utxos=[utxos[2]])
|
|
|
|
|
|
|
|
# Fund another channel with already spent utxos
|
|
|
|
with pytest.raises(RpcError, match=r"No matching utxo was found from the wallet"):
|
|
|
|
l1.rpc.fundchannel(l3.info["id"], int(0.01 * 10**8), utxos=utxos)
|
|
|
|
|
|
|
|
|
2019-12-13 11:16:14 +01:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs dev_forget_channel")
|
2019-05-23 01:28:18 +02:00
|
|
|
def test_funding_external_wallet_corners(node_factory, bitcoind):
|
2019-12-12 01:50:30 +01:00
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
2019-05-23 01:28:18 +02:00
|
|
|
|
|
|
|
amount = 2**24
|
2019-09-03 21:06:38 +02:00
|
|
|
l1.fundwallet(amount + 10000000)
|
|
|
|
|
2019-05-23 01:28:18 +02:00
|
|
|
amount = amount - 1
|
|
|
|
fake_txid = '929764844a8f9938b669a60a1d51a11c9e2613c7eb4776e4126f1f20c0a685c3'
|
2019-05-31 23:37:05 +02:00
|
|
|
fake_txout = 0
|
2019-05-23 01:28:18 +02:00
|
|
|
|
|
|
|
with pytest.raises(RpcError, match=r'Unknown peer'):
|
|
|
|
l1.rpc.fundchannel_start(l2.info['id'], amount)
|
|
|
|
|
|
|
|
with pytest.raises(RpcError, match=r'Unknown peer'):
|
2019-06-05 02:26:39 +02:00
|
|
|
l1.rpc.fundchannel_complete(l2.info['id'], fake_txid, fake_txout)
|
2019-05-23 01:28:18 +02:00
|
|
|
|
|
|
|
# Should not be able to continue without being in progress.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
with pytest.raises(RpcError, match=r'No channel funding in progress.'):
|
2019-06-05 02:26:39 +02:00
|
|
|
l1.rpc.fundchannel_complete(l2.info['id'], fake_txid, fake_txout)
|
2019-05-23 01:28:18 +02:00
|
|
|
|
2020-04-03 02:03:55 +02:00
|
|
|
# Fail to open (too large)
|
|
|
|
with pytest.raises(RpcError, match=r'Amount exceeded 16777215'):
|
|
|
|
l1.rpc.fundchannel_start(l2.info['id'], amount + 1)
|
|
|
|
|
2019-05-31 23:57:04 +02:00
|
|
|
l1.rpc.fundchannel_start(l2.info['id'], amount)
|
|
|
|
with pytest.raises(RpcError, match=r'Already funding channel'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], amount)
|
|
|
|
|
|
|
|
l1.rpc.fundchannel_cancel(l2.info['id'])
|
|
|
|
# Should be able to 'restart' after canceling
|
2019-08-24 16:39:01 +02:00
|
|
|
amount2 = 1000000
|
|
|
|
funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount2)['funding_address']
|
|
|
|
|
|
|
|
addr = l1.rpc.newaddr()['bech32']
|
|
|
|
l1.bitcoin.rpc.sendtoaddress(addr, 0.1)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
|
|
|
|
# Create the funding transaction
|
|
|
|
prep = l1.rpc.txprepare([{funding_addr: amount2}])
|
|
|
|
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
|
|
|
|
assert decode['txid'] == prep['txid']
|
|
|
|
|
|
|
|
# One output will be correct.
|
|
|
|
if decode['vout'][0]['value'] == Decimal('0.01000000'):
|
|
|
|
txout = 0
|
|
|
|
elif decode['vout'][1]['value'] == Decimal('0.01000000'):
|
|
|
|
txout = 1
|
|
|
|
else:
|
|
|
|
assert False
|
|
|
|
|
|
|
|
# Be sure fundchannel_complete is successful
|
|
|
|
assert l1.rpc.fundchannel_complete(l2.info['id'], prep['txid'], txout)['commitments_secured']
|
2019-12-12 01:46:08 +01:00
|
|
|
|
|
|
|
# Peer shouldn't be able to cancel channel
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot cancel channel that was initiated by peer'):
|
|
|
|
l2.rpc.fundchannel_cancel(l1.info['id'])
|
|
|
|
|
|
|
|
# We can cancel channel after fundchannel_complete
|
2019-08-24 16:39:01 +02:00
|
|
|
assert l1.rpc.fundchannel_cancel(l2.info['id'])['cancelled']
|
|
|
|
|
2019-12-12 01:50:30 +01:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address']
|
|
|
|
assert l1.rpc.fundchannel_complete(l2.info['id'], prep['txid'], txout)['commitments_secured']
|
|
|
|
|
|
|
|
# Check that can still cancel when peer is disconnected
|
|
|
|
l1.rpc.disconnect(l2.info['id'], force=True)
|
|
|
|
wait_for(lambda: not only_one(l1.rpc.listpeers()['peers'])['connected'])
|
|
|
|
assert l1.rpc.fundchannel_cancel(l2.info['id'])['cancelled']
|
|
|
|
assert len(l1.rpc.listpeers()['peers']) == 0
|
|
|
|
|
|
|
|
# l2 still has the channel open/waiting
|
|
|
|
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state']
|
|
|
|
== 'CHANNELD_AWAITING_LOCKIN')
|
|
|
|
|
|
|
|
# on reconnect, channel should get destroyed
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.daemon.wait_for_log('Rejecting WIRE_CHANNEL_REESTABLISH for unknown channel_id')
|
|
|
|
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
|
|
|
|
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
|
|
|
|
|
|
|
|
# we have to connect again, because we got disconnected when everything errored
|
2019-08-24 16:39:01 +02:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address']
|
2020-02-04 01:32:26 +01:00
|
|
|
# A successful funding_complete will always have a commitments_secured that is true,
|
|
|
|
# otherwise it would have failed
|
2019-08-24 16:39:01 +02:00
|
|
|
assert l1.rpc.fundchannel_complete(l2.info['id'], prep['txid'], txout)['commitments_secured']
|
|
|
|
l1.rpc.txsend(prep['txid'])
|
|
|
|
with pytest.raises(RpcError, match=r'.* been broadcast.*'):
|
|
|
|
l1.rpc.fundchannel_cancel(l2.info['id'])
|
|
|
|
l1.rpc.close(l2.info['id'])
|
2019-05-31 23:57:04 +02:00
|
|
|
|
2019-05-23 01:28:18 +02:00
|
|
|
|
2019-06-12 08:22:53 +02:00
|
|
|
def test_funding_cancel_race(node_factory, bitcoind, executor):
|
|
|
|
l1 = node_factory.get_node()
|
2019-06-14 05:38:02 +02:00
|
|
|
|
2019-08-06 00:09:23 +02:00
|
|
|
if VALGRIND or SLOW_MACHINE:
|
2019-06-14 05:38:02 +02:00
|
|
|
num = 5
|
|
|
|
else:
|
|
|
|
num = 100
|
|
|
|
|
|
|
|
nodes = node_factory.get_nodes(num)
|
2019-06-12 08:22:53 +02:00
|
|
|
|
|
|
|
# Speed up cleanup by not cleaning our test nodes: on my laptop, this goes
|
|
|
|
# from 214 to 15 seconds
|
|
|
|
node_factory.nodes = [l1]
|
|
|
|
|
|
|
|
num_complete = 0
|
|
|
|
num_cancel = 0
|
|
|
|
|
2019-06-30 02:34:06 +02:00
|
|
|
for count, n in enumerate(nodes):
|
2019-06-12 08:22:53 +02:00
|
|
|
l1.rpc.connect(n.info['id'], 'localhost', n.port)
|
|
|
|
l1.rpc.fundchannel_start(n.info['id'], "100000sat")
|
|
|
|
|
|
|
|
# We simply make up txids. And submit two of each at once.
|
2019-06-30 02:34:06 +02:00
|
|
|
completes = []
|
|
|
|
cancels = []
|
|
|
|
|
|
|
|
# Switch order around.
|
|
|
|
for i in range(4):
|
|
|
|
if (i + count) % 2 == 0:
|
|
|
|
completes.append(executor.submit(l1.rpc.fundchannel_complete, n.info['id'], "9f1844419d2f41532a57fb5ef038cacb602000f7f37b3dae68dc2d047c89048f", 0))
|
|
|
|
else:
|
|
|
|
cancels.append(executor.submit(l1.rpc.fundchannel_cancel, n.info['id']))
|
2019-06-12 08:22:53 +02:00
|
|
|
|
|
|
|
# Only one should succeed.
|
|
|
|
success = False
|
|
|
|
for c in completes:
|
|
|
|
try:
|
|
|
|
c.result(TIMEOUT)
|
|
|
|
num_complete += 1
|
|
|
|
assert not success
|
|
|
|
success = True
|
|
|
|
except RpcError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# These may both succeed, iff the above didn't.
|
|
|
|
cancelled = False
|
|
|
|
for c in cancels:
|
|
|
|
try:
|
|
|
|
c.result(TIMEOUT)
|
|
|
|
cancelled = True
|
|
|
|
assert not success
|
|
|
|
except RpcError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if cancelled:
|
|
|
|
num_cancel += 1
|
|
|
|
else:
|
|
|
|
assert success
|
|
|
|
|
|
|
|
print("Cancelled {} complete {}".format(num_cancel, num_complete))
|
|
|
|
assert num_cancel + num_complete == len(nodes)
|
|
|
|
|
|
|
|
# We should have raced at least once!
|
2019-06-14 05:38:02 +02:00
|
|
|
if not VALGRIND:
|
|
|
|
assert num_cancel > 0
|
|
|
|
assert num_complete > 0
|
2019-06-12 08:22:53 +02:00
|
|
|
|
|
|
|
|
2019-10-15 03:08:48 +02:00
|
|
|
@unittest.skipIf(TEST_NETWORK != 'regtest', "External wallet support doesn't work with elements yet.")
|
|
|
|
def test_funding_close_upfront(node_factory, bitcoind):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
2019-11-21 01:47:44 +01:00
|
|
|
opts = {'plugin': os.path.join(os.getcwd(), 'tests/plugins/accepter_close_to.py')}
|
|
|
|
l2 = node_factory.get_node(options=opts)
|
|
|
|
|
|
|
|
# The 'accepter_close_to' plugin uses the channel funding amount to determine
|
|
|
|
# whether or not to include a 'close_to' address
|
|
|
|
amt_normal = 100007 # continues without returning a close_to
|
|
|
|
amt_addr = 100001 # returns valid regtest address
|
|
|
|
|
|
|
|
remote_valid_addr = 'bcrt1q7gtnxmlaly9vklvmfj06amfdef3rtnrdazdsvw'
|
|
|
|
|
|
|
|
def _fundchannel(l1, l2, amount, close_to):
|
2019-10-15 03:08:48 +02:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
assert(l1.rpc.listpeers()['peers'][0]['id'] == l2.info['id'])
|
|
|
|
|
|
|
|
resp = l1.rpc.fundchannel_start(l2.info['id'], amount, close_to=close_to)
|
|
|
|
address = resp['funding_address']
|
|
|
|
|
|
|
|
if close_to:
|
|
|
|
assert resp['close_to']
|
|
|
|
else:
|
|
|
|
assert 'close_to' not in resp
|
|
|
|
|
|
|
|
peer = l1.rpc.listpeers()['peers'][0]
|
|
|
|
# Peer should still be connected and in state waiting for funding_txid
|
|
|
|
assert peer['id'] == l2.info['id']
|
|
|
|
r = re.compile('Funding channel start: awaiting funding_txid with output to .*')
|
|
|
|
assert any(r.match(line) for line in peer['channels'][0]['status'])
|
|
|
|
assert 'OPENINGD' in peer['channels'][0]['state']
|
|
|
|
|
|
|
|
# 'Externally' fund the address from fundchannel_start
|
|
|
|
addr_scriptpubkey = bitcoind.rpc.getaddressinfo(address)['scriptPubKey']
|
|
|
|
txout = CMutableTxOut(amount, bytearray.fromhex(addr_scriptpubkey))
|
|
|
|
unfunded_tx = CMutableTransaction([], [txout])
|
|
|
|
hextx = binascii.hexlify(unfunded_tx.serialize()).decode('utf8')
|
|
|
|
|
|
|
|
funded_tx_obj = bitcoind.rpc.fundrawtransaction(hextx)
|
|
|
|
raw_funded_tx = funded_tx_obj['hex']
|
|
|
|
txid = bitcoind.rpc.decoderawtransaction(raw_funded_tx)['txid']
|
|
|
|
txout = 1 if funded_tx_obj['changepos'] == 0 else 0
|
|
|
|
|
|
|
|
assert l1.rpc.fundchannel_complete(l2.info['id'], txid, txout)['commitments_secured']
|
|
|
|
|
|
|
|
# Broadcast the transaction manually and confirm that channel locks in
|
|
|
|
signed_tx = bitcoind.rpc.signrawtransactionwithwallet(raw_funded_tx)['hex']
|
|
|
|
assert txid == bitcoind.rpc.decoderawtransaction(signed_tx)['txid']
|
|
|
|
|
|
|
|
bitcoind.rpc.sendrawtransaction(signed_tx)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
for node in [l1, l2]:
|
|
|
|
node.daemon.wait_for_log(r'State changed from CHANNELD_AWAITING_LOCKIN to CHANNELD_NORMAL')
|
2019-11-21 01:47:44 +01:00
|
|
|
channel = node.rpc.listpeers()['peers'][0]['channels'][-1]
|
2019-10-15 03:08:48 +02:00
|
|
|
assert amount * 1000 == channel['msatoshi_total']
|
|
|
|
|
|
|
|
# check that normal peer close works
|
2019-11-21 01:47:44 +01:00
|
|
|
_fundchannel(l1, l2, amt_normal, None)
|
2019-10-15 03:08:48 +02:00
|
|
|
assert l1.rpc.close(l2.info['id'])['type'] == 'mutual'
|
|
|
|
|
|
|
|
# check that you can provide a closing address upfront
|
|
|
|
addr = l1.rpc.newaddr()['bech32']
|
2019-11-21 01:47:44 +01:00
|
|
|
_fundchannel(l1, l2, amt_normal, addr)
|
2019-10-29 18:20:34 +01:00
|
|
|
# confirm that it appears in listpeers
|
|
|
|
assert addr == only_one(l1.rpc.listpeers()['peers'])['channels'][1]['close_to_addr']
|
2019-10-15 03:08:48 +02:00
|
|
|
resp = l1.rpc.close(l2.info['id'])
|
|
|
|
assert resp['type'] == 'mutual'
|
|
|
|
assert only_one(only_one(bitcoind.rpc.decoderawtransaction(resp['tx'])['vout'])['scriptPubKey']['addresses']) == addr
|
|
|
|
|
|
|
|
# check that passing in the same addr to close works
|
2019-10-29 18:20:34 +01:00
|
|
|
addr = bitcoind.rpc.getnewaddress()
|
2019-11-21 01:47:44 +01:00
|
|
|
_fundchannel(l1, l2, amt_normal, addr)
|
2019-10-29 18:20:34 +01:00
|
|
|
assert addr == only_one(l1.rpc.listpeers()['peers'])['channels'][2]['close_to_addr']
|
2019-10-15 03:08:48 +02:00
|
|
|
resp = l1.rpc.close(l2.info['id'], destination=addr)
|
|
|
|
assert resp['type'] == 'mutual'
|
|
|
|
assert only_one(only_one(bitcoind.rpc.decoderawtransaction(resp['tx'])['vout'])['scriptPubKey']['addresses']) == addr
|
|
|
|
|
2019-11-21 01:47:44 +01:00
|
|
|
# check that remote peer closing works as expected (and that remote's close_to works)
|
|
|
|
_fundchannel(l1, l2, amt_addr, addr)
|
|
|
|
# send some money to remote so that they have a closeout
|
|
|
|
l1.rpc.pay(l2.rpc.invoice((amt_addr // 2) * 1000, 'test_remote_close_to', 'desc')['bolt11'])
|
|
|
|
assert only_one(l2.rpc.listpeers()['peers'])['channels'][-1]['close_to_addr'] == remote_valid_addr
|
|
|
|
|
2019-10-15 03:08:48 +02:00
|
|
|
resp = l2.rpc.close(l1.info['id'])
|
|
|
|
assert resp['type'] == 'mutual'
|
2019-11-21 01:47:44 +01:00
|
|
|
vouts = bitcoind.rpc.decoderawtransaction(resp['tx'])['vout']
|
|
|
|
assert len(vouts) == 2
|
|
|
|
for vout in vouts:
|
|
|
|
assert only_one(vout['scriptPubKey']['addresses']) in [addr, remote_valid_addr]
|
2019-10-15 03:08:48 +02:00
|
|
|
|
|
|
|
# check that passing in a different addr to close causes an RPC error
|
|
|
|
addr2 = l1.rpc.newaddr()['bech32']
|
2019-11-21 01:47:44 +01:00
|
|
|
_fundchannel(l1, l2, amt_normal, addr)
|
2019-10-15 03:08:48 +02:00
|
|
|
with pytest.raises(RpcError, match=r'does not match previous shutdown script'):
|
|
|
|
l1.rpc.close(l2.info['id'], destination=addr2)
|
|
|
|
|
|
|
|
|
2019-06-29 18:54:39 +02:00
|
|
|
@unittest.skipIf(TEST_NETWORK != 'regtest', "External wallet support doesn't work with elements yet.")
|
2019-05-23 01:28:18 +02:00
|
|
|
def test_funding_external_wallet(node_factory, bitcoind):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node()
|
2019-08-09 11:02:46 +02:00
|
|
|
l3 = node_factory.get_node()
|
2019-05-23 01:28:18 +02:00
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
assert(l1.rpc.listpeers()['peers'][0]['id'] == l2.info['id'])
|
|
|
|
|
|
|
|
amount = 2**24 - 1
|
|
|
|
address = l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address']
|
|
|
|
assert len(address) > 0
|
|
|
|
|
|
|
|
peer = l1.rpc.listpeers()['peers'][0]
|
|
|
|
# Peer should still be connected and in state waiting for funding_txid
|
|
|
|
assert peer['id'] == l2.info['id']
|
|
|
|
r = re.compile('Funding channel start: awaiting funding_txid with output to .*')
|
|
|
|
assert any(r.match(line) for line in peer['channels'][0]['status'])
|
|
|
|
assert 'OPENINGD' in peer['channels'][0]['state']
|
|
|
|
|
|
|
|
# Trying to start a second funding should not work, it's in progress.
|
|
|
|
with pytest.raises(RpcError, match=r'Already funding channel'):
|
|
|
|
l1.rpc.fundchannel_start(l2.info['id'], amount)
|
|
|
|
|
2019-05-31 23:58:58 +02:00
|
|
|
# 'Externally' fund the address from fundchannel_start
|
|
|
|
addr_scriptpubkey = bitcoind.rpc.getaddressinfo(address)['scriptPubKey']
|
|
|
|
txout = CMutableTxOut(amount, bytearray.fromhex(addr_scriptpubkey))
|
|
|
|
unfunded_tx = CMutableTransaction([], [txout])
|
|
|
|
hextx = binascii.hexlify(unfunded_tx.serialize()).decode('utf8')
|
|
|
|
|
|
|
|
funded_tx_obj = bitcoind.rpc.fundrawtransaction(hextx)
|
|
|
|
raw_funded_tx = funded_tx_obj['hex']
|
|
|
|
txid = bitcoind.rpc.decoderawtransaction(raw_funded_tx)['txid']
|
|
|
|
txout = 1 if funded_tx_obj['changepos'] == 0 else 0
|
|
|
|
|
2019-06-05 02:26:39 +02:00
|
|
|
assert l1.rpc.fundchannel_complete(l2.info['id'], txid, txout)['commitments_secured']
|
2019-05-31 23:58:58 +02:00
|
|
|
|
|
|
|
# Broadcast the transaction manually and confirm that channel locks in
|
|
|
|
signed_tx = bitcoind.rpc.signrawtransactionwithwallet(raw_funded_tx)['hex']
|
|
|
|
assert txid == bitcoind.rpc.decoderawtransaction(signed_tx)['txid']
|
|
|
|
|
|
|
|
bitcoind.rpc.sendrawtransaction(signed_tx)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log(r'Funding tx {} depth 1 of 1'.format(txid))
|
|
|
|
|
2019-09-06 07:34:24 +02:00
|
|
|
# Check that tx is broadcast by a third party can be catched.
|
|
|
|
# Only when the transaction (broadcast by a third pary) is onchain, we can catch it.
|
|
|
|
with pytest.raises(RpcError, match=r'.* been broadcast.*'):
|
|
|
|
l1.rpc.fundchannel_cancel(l2.info['id'])
|
|
|
|
|
2019-05-31 23:58:58 +02:00
|
|
|
for node in [l1, l2]:
|
2019-06-17 23:58:27 +02:00
|
|
|
node.daemon.wait_for_log(r'State changed from CHANNELD_AWAITING_LOCKIN to CHANNELD_NORMAL')
|
2019-05-31 23:58:58 +02:00
|
|
|
channel = node.rpc.listpeers()['peers'][0]['channels'][0]
|
|
|
|
assert amount * 1000 == channel['msatoshi_total']
|
|
|
|
|
2019-08-09 11:02:46 +02:00
|
|
|
# Test that we don't crash if peer disconnects after fundchannel_start
|
|
|
|
l2.connect(l3)
|
|
|
|
l2.rpc.fundchannel_start(l3.info["id"], amount)
|
|
|
|
l3.rpc.close(l2.info["id"])
|
|
|
|
|
2019-05-23 01:28:18 +02:00
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
def test_lockin_between_restart(node_factory, bitcoind):
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l2 = node_factory.get_node(options={'funding-confirms': 3},
|
|
|
|
may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.fundwallet(10**6 + 1000000)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6)['tx']
|
|
|
|
|
|
|
|
# l1 goes down.
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# Now 120 blocks go by...
|
|
|
|
bitcoind.generate_block(120)
|
|
|
|
|
|
|
|
# Restart
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# All should be good.
|
|
|
|
l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
|
|
|
|
l2.daemon.wait_for_log(' to CHANNELD_NORMAL')
|
|
|
|
|
|
|
|
|
|
|
|
def test_funding_while_offline(node_factory, bitcoind):
|
|
|
|
l1 = node_factory.get_node()
|
2019-03-04 04:13:49 +01:00
|
|
|
addr = l1.rpc.newaddr()['bech32']
|
2018-08-03 17:29:38 +02:00
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
|
|
|
# l1 goes down.
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# We send funds
|
|
|
|
bitcoind.rpc.sendtoaddress(addr, (10**6 + 1000000) / 10**8)
|
|
|
|
|
|
|
|
# Now 120 blocks go by...
|
|
|
|
bitcoind.generate_block(120)
|
|
|
|
|
|
|
|
# Restart
|
|
|
|
l1.start()
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
|
|
|
assert len(l1.rpc.listfunds()['outputs']) == 1
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_channel_persistence(node_factory, bitcoind, executor):
|
|
|
|
# Start two nodes and open a channel (to remember). l2 will
|
|
|
|
# mysteriously die while committing the first HTLC so we can
|
|
|
|
# check that HTLCs reloaded from the DB work.
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
2020-03-10 19:31:24 +01:00
|
|
|
l1 = node_factory.get_node(may_reconnect=True, feerates=(7500, 7500, 7500,
|
|
|
|
7500))
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
|
|
|
|
may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Neither node should have a channel open, they are just connected
|
|
|
|
for n in (l1, l2):
|
|
|
|
assert(n.db_query('SELECT COUNT(id) as count FROM channels;')[0]['count'] == 0)
|
|
|
|
|
|
|
|
l1.fund_channel(l2, 100000)
|
|
|
|
|
|
|
|
peers = l1.rpc.listpeers()['peers']
|
|
|
|
assert(only_one(peers[0]['channels'])['state'] == 'CHANNELD_NORMAL')
|
|
|
|
|
|
|
|
# Both nodes should now have exactly one channel in the database
|
|
|
|
for n in (l1, l2):
|
|
|
|
assert(n.db_query('SELECT COUNT(id) as count FROM channels;')[0]['count'] == 1)
|
|
|
|
|
|
|
|
# Fire off a sendpay request, it'll get interrupted by a restart
|
|
|
|
executor.submit(l1.pay, l2, 10000)
|
|
|
|
# Wait for it to be committed to, i.e., stored in the DB
|
|
|
|
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
|
|
|
|
|
|
|
|
# Stop l2, l1 will reattempt to connect
|
|
|
|
print("Killing l2 in mid HTLC")
|
|
|
|
l2.daemon.kill()
|
|
|
|
|
|
|
|
# Clear the disconnect and timer stop so we can proceed normally
|
|
|
|
del l2.daemon.opts['dev-disconnect']
|
|
|
|
|
|
|
|
# Wait for l1 to notice
|
|
|
|
wait_for(lambda: 'connected' not in only_one(l1.rpc.listpeers()['peers'][0]['channels']))
|
|
|
|
|
|
|
|
# Now restart l2 and it should reload peers/channels from the DB
|
|
|
|
l2.start()
|
|
|
|
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)
|
|
|
|
|
|
|
|
# Wait for the restored HTLC to finish
|
2018-10-10 01:11:55 +02:00
|
|
|
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['msatoshi_to_us'] == 99990000)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2018-10-10 01:11:55 +02:00
|
|
|
wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']]))
|
|
|
|
wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']]))
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Now make sure this is really functional by sending a payment
|
|
|
|
l1.pay(l2, 10000)
|
|
|
|
|
|
|
|
# L1 doesn't actually update msatoshi_to_us until it receives
|
|
|
|
# revoke_and_ack from L2, which can take a little bit.
|
|
|
|
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['msatoshi_to_us'] == 99980000)
|
|
|
|
assert only_one(l2.rpc.listpeers()['peers'][0]['channels'])['msatoshi_to_us'] == 20000
|
|
|
|
|
|
|
|
# Finally restart l1, and make sure it remembers
|
|
|
|
l1.restart()
|
|
|
|
assert only_one(l1.rpc.listpeers()['peers'][0]['channels'])['msatoshi_to_us'] == 99980000
|
|
|
|
|
|
|
|
# Now make sure l1 is watching for unilateral closes
|
|
|
|
l2.rpc.dev_fail(l1.info['id'])
|
|
|
|
l2.daemon.wait_for_log('Failing due to dev-fail command')
|
2018-09-19 06:06:07 +02:00
|
|
|
l2.wait_for_channel_onchain(l1.info['id'])
|
2018-08-03 17:29:38 +02:00
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
# L1 must notice.
|
|
|
|
l1.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
|
|
|
|
2019-02-16 10:12:18 +01:00
|
|
|
@unittest.skipIf(not DEVELOPER, "gossip without DEVELOPER=1 is slow")
|
2018-12-08 00:38:41 +01:00
|
|
|
def test_private_channel(node_factory):
|
|
|
|
l1, l2 = node_factory.line_graph(2, announce_channels=False, wait_for_announce=False)
|
|
|
|
l3, l4 = node_factory.line_graph(2, announce_channels=True, wait_for_announce=True)
|
|
|
|
|
|
|
|
assert l1.daemon.is_in_log('Will open private channel with node {}'.format(l2.info['id']))
|
|
|
|
assert not l2.daemon.is_in_log('Will open private channel with node {}'.format(l1.info['id']))
|
|
|
|
assert not l3.daemon.is_in_log('Will open private channel with node {}'.format(l4.info['id']))
|
|
|
|
|
|
|
|
l3.daemon.wait_for_log('Received node_announcement for node {}'.format(l4.info['id']))
|
|
|
|
l4.daemon.wait_for_log('Received node_announcement for node {}'.format(l3.info['id']))
|
|
|
|
|
|
|
|
assert not l1.daemon.is_in_log('Received node_announcement for node {}'.format(l2.info['id']))
|
|
|
|
assert not l2.daemon.is_in_log('Received node_announcement for node {}'.format(l1.info['id']))
|
|
|
|
|
2019-01-08 00:47:39 +01:00
|
|
|
# test for 'private' flag in rpc output
|
|
|
|
assert only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['private']
|
|
|
|
# check non-private channel
|
|
|
|
assert not only_one(only_one(l4.rpc.listpeers(l3.info['id'])['peers'])['channels'])['private']
|
|
|
|
|
2018-12-08 00:38:41 +01:00
|
|
|
|
2019-09-18 03:05:05 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-fast-gossip")
|
2018-08-03 17:29:38 +02:00
|
|
|
def test_channel_reenable(node_factory):
|
2018-12-08 00:27:14 +01:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True}, fundchannel=True, wait_for_announce=True)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l1.daemon.wait_for_log('Received node_announcement for node {}'.format(l2.info['id']))
|
|
|
|
l2.daemon.wait_for_log('Received node_announcement for node {}'.format(l1.info['id']))
|
|
|
|
|
|
|
|
# Both directions should be active before the restart
|
|
|
|
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True])
|
|
|
|
|
|
|
|
# Restart l2, will cause l1 to reconnect
|
2018-08-14 02:26:03 +02:00
|
|
|
l2.stop()
|
|
|
|
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [False, False])
|
|
|
|
l2.start()
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2018-08-14 02:26:03 +02:00
|
|
|
# Updates may be suppressed if redundant; just test results.
|
2018-08-03 17:29:38 +02:00
|
|
|
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True])
|
2018-08-14 02:26:03 +02:00
|
|
|
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True, True])
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_update_fee(node_factory, bitcoind):
|
2019-08-08 08:48:44 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=True)
|
2018-08-03 17:29:38 +02:00
|
|
|
chanid = l1.get_channel_scid(l2)
|
|
|
|
|
|
|
|
# Make l1 send out feechange.
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((14000, 11000, 7500, 3750))
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Now make sure an HTLC works.
|
|
|
|
# (First wait for route propagation.)
|
|
|
|
l1.wait_channel_active(chanid)
|
|
|
|
sync_blockheight(bitcoind, [l1, l2])
|
|
|
|
|
|
|
|
# Make payments.
|
|
|
|
l1.pay(l2, 200000000)
|
2020-04-09 07:00:06 +02:00
|
|
|
# First payment causes fee update.
|
|
|
|
l2.daemon.wait_for_log('peer updated fee to 14000')
|
2018-08-03 17:29:38 +02:00
|
|
|
l2.pay(l1, 100000000)
|
|
|
|
|
|
|
|
# Now shutdown cleanly.
|
2019-08-08 08:48:44 +02:00
|
|
|
l1.rpc.close(chanid)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l1.daemon.wait_for_log(' to CLOSINGD_COMPLETE')
|
|
|
|
l2.daemon.wait_for_log(' to CLOSINGD_COMPLETE')
|
|
|
|
|
|
|
|
# And should put closing into mempool.
|
2018-09-19 06:06:07 +02:00
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
|
|
|
l2.wait_for_channel_onchain(l1.info['id'])
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
l2.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
|
|
|
bitcoind.generate_block(99)
|
|
|
|
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
2019-12-13 11:16:14 +01:00
|
|
|
def test_fee_limits(node_factory, bitcoind):
|
2018-08-23 01:27:24 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts={'dev-max-fee-multiplier': 5, 'may_reconnect': True}, fundchannel=True)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2020-04-09 07:00:06 +02:00
|
|
|
# Kick off fee adjustment using HTLC.
|
|
|
|
l1.pay(l2, 1000)
|
|
|
|
|
2018-08-23 01:27:24 +02:00
|
|
|
# L1 asks for stupid low fee (will actually hit the floor of 253)
|
|
|
|
l1.stop()
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15, 15, 15, 15), False)
|
2018-08-23 01:27:24 +02:00
|
|
|
l1.start()
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2019-12-13 11:16:14 +01:00
|
|
|
l1.daemon.wait_for_log('Peer transient failure in CHANNELD_NORMAL: channeld: .*: update_fee 253 outside range 1875-75000')
|
2018-08-03 17:29:38 +02:00
|
|
|
# Make sure the resolution of this one doesn't interfere with the next!
|
|
|
|
# Note: may succeed, may fail with insufficient fee, depending on how
|
|
|
|
# bitcoind feels!
|
2019-12-13 11:16:14 +01:00
|
|
|
l2.daemon.wait_for_log('sendrawtx exit')
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [l1, l2])
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2019-07-23 16:14:23 +02:00
|
|
|
# Trying to open a channel with too low a fee-rate is denied
|
|
|
|
l4 = node_factory.get_node()
|
|
|
|
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
|
|
|
with pytest.raises(RpcError, match='They sent error .* feerate_per_kw 253 below minimum'):
|
|
|
|
l1.fund_channel(l4, 10**6)
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# Restore to normal.
|
2018-08-23 01:27:24 +02:00
|
|
|
l1.stop()
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 11000, 7500, 3750), False)
|
2018-08-23 01:27:24 +02:00
|
|
|
l1.start()
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Try with node which sets --ignore-fee-limits
|
2018-08-23 01:27:24 +02:00
|
|
|
l3 = node_factory.get_node(options={'ignore-fee-limits': 'true'}, may_reconnect=True)
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
chan = l1.fund_channel(l3, 10**6)
|
|
|
|
|
2020-04-09 07:00:06 +02:00
|
|
|
# Kick off fee adjustment using HTLC.
|
|
|
|
l1.pay(l3, 1000)
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# Try stupid high fees
|
2018-08-23 01:27:24 +02:00
|
|
|
l1.stop()
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000 * 10, 11000, 7500, 3750), False)
|
2018-08-23 01:27:24 +02:00
|
|
|
l1.start()
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l3.daemon.wait_for_log('peer_in WIRE_UPDATE_FEE')
|
|
|
|
l3.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
|
|
|
|
|
|
|
|
# We need to wait until both have committed and revoked the
|
|
|
|
# old state, otherwise we'll still try to commit with the old
|
|
|
|
# 15sat/byte fee
|
|
|
|
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
|
|
|
|
|
|
|
|
# This should wait for close to complete
|
|
|
|
l1.rpc.close(chan)
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_update_fee_reconnect(node_factory, bitcoind):
|
2020-04-09 07:00:06 +02:00
|
|
|
# Disconnect after commitsig for fee update.
|
|
|
|
disconnects = ['+WIRE_COMMITMENT_SIGNED*3']
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
|
|
|
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True,
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(15000, 15000, 15000, 3750))
|
2018-08-23 01:27:24 +02:00
|
|
|
# We match l2's later feerate, so we agree on same closing tx for simplicity.
|
|
|
|
l2 = node_factory.get_node(may_reconnect=True,
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(14000, 15000, 14000, 3750))
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
chan = l1.fund_channel(l2, 10**6)
|
|
|
|
|
2020-04-09 07:00:06 +02:00
|
|
|
# Make an HTLC just to get us to do feechanges.
|
|
|
|
l1.pay(l2, 1000)
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# Make l1 send out feechange; triggers disconnect/reconnect.
|
2018-08-23 01:27:24 +02:00
|
|
|
# (Note: < 10% change, so no smoothing here!)
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((14000, 14000, 14000, 3750))
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.daemon.wait_for_log('Setting REMOTE feerate to 14000')
|
|
|
|
l2.daemon.wait_for_log('Setting LOCAL feerate to 14000')
|
2018-10-26 07:49:53 +02:00
|
|
|
l1.daemon.wait_for_log(r'dev_disconnect: \+WIRE_COMMITMENT_SIGNED')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Wait for reconnect....
|
2019-12-12 15:07:53 +01:00
|
|
|
l1.daemon.wait_for_log('Feerate:.*LOCAL now 14000')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
l1.pay(l2, 200000000)
|
|
|
|
l2.pay(l1, 100000000)
|
|
|
|
|
|
|
|
# They should both have gotten commits with correct feerate.
|
|
|
|
assert l1.daemon.is_in_log('got commitsig [0-9]*: feerate 14000')
|
|
|
|
assert l2.daemon.is_in_log('got commitsig [0-9]*: feerate 14000')
|
|
|
|
|
|
|
|
# Now shutdown cleanly.
|
|
|
|
l1.rpc.close(chan)
|
|
|
|
|
|
|
|
# And should put closing into mempool.
|
2018-09-19 06:06:07 +02:00
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
|
|
|
l2.wait_for_channel_onchain(l1.info['id'])
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
l2.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
|
|
|
bitcoind.generate_block(99)
|
|
|
|
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
|
|
|
|
def test_multiple_channels(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
for i in range(3):
|
|
|
|
# FIXME: we shouldn't disconnect on close?
|
|
|
|
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
assert ret['id'] == l2.info['id']
|
|
|
|
|
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
|
|
|
l1.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
|
|
|
l2.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
2018-08-03 17:29:38 +02:00
|
|
|
chan = l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
l1.rpc.close(chan)
|
|
|
|
|
|
|
|
channels = only_one(l1.rpc.listpeers()['peers'])['channels']
|
|
|
|
assert len(channels) == 3
|
|
|
|
# Most in state ONCHAIN, last is CLOSINGD_COMPLETE
|
|
|
|
for i in range(len(channels) - 1):
|
|
|
|
assert channels[i]['state'] == 'ONCHAIN'
|
|
|
|
assert channels[-1]['state'] == 'CLOSINGD_COMPLETE'
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
|
|
|
def test_forget_channel(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
l1.fundwallet(10**6)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**5)
|
|
|
|
|
|
|
|
assert len(l1.rpc.listpeers()['peers']) == 1
|
|
|
|
|
|
|
|
# This should fail, the funding tx is in the mempool and may confirm
|
|
|
|
with pytest.raises(RpcError, match=r'Cowardly refusing to forget channel'):
|
|
|
|
l1.rpc.dev_forget_channel(l2.info['id'])
|
|
|
|
|
|
|
|
assert len(l1.rpc.listpeers()['peers']) == 1
|
|
|
|
|
|
|
|
# Forcing should work
|
|
|
|
l1.rpc.dev_forget_channel(l2.info['id'], True)
|
|
|
|
assert len(l1.rpc.listpeers()['peers']) == 0
|
|
|
|
|
|
|
|
# And restarting should keep that peer forgotten
|
|
|
|
l1.restart()
|
|
|
|
assert len(l1.rpc.listpeers()['peers']) == 0
|
|
|
|
|
2019-06-22 15:37:12 +02:00
|
|
|
# The entry in the channels table should still be there
|
|
|
|
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
|
|
|
|
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
def test_peerinfo(node_factory, bitcoind):
|
2019-08-08 08:48:44 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts={'may_reconnect': True})
|
2020-04-28 02:33:03 +02:00
|
|
|
lfeatures = expected_peer_features()
|
|
|
|
nfeatures = expected_node_features()
|
2018-08-03 17:29:38 +02:00
|
|
|
# Gossiping but no node announcement yet
|
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
|
|
|
assert l1.rpc.getpeer(l2.info['id'])['connected']
|
|
|
|
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0
|
2019-10-11 04:52:04 +02:00
|
|
|
assert l1.rpc.getpeer(l2.info['id'])['features'] == lfeatures
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Fund a channel to force a node announcement
|
|
|
|
chan = l1.fund_channel(l2, 10**6)
|
|
|
|
# Now proceed to funding-depth and do a full gossip round
|
|
|
|
bitcoind.generate_block(5)
|
|
|
|
l1.daemon.wait_for_logs(['Received node_announcement for node ' + l2.info['id']])
|
|
|
|
l2.daemon.wait_for_logs(['Received node_announcement for node ' + l1.info['id']])
|
|
|
|
|
2019-10-11 04:52:04 +02:00
|
|
|
# Should have announced the same features as told to peer.
|
2018-08-03 17:29:38 +02:00
|
|
|
nodes1 = l1.rpc.listnodes(l2.info['id'])['nodes']
|
|
|
|
nodes2 = l2.rpc.listnodes(l2.info['id'])['nodes']
|
|
|
|
peer1 = l1.rpc.getpeer(l2.info['id'])
|
|
|
|
peer2 = l2.rpc.getpeer(l1.info['id'])
|
2020-04-28 02:33:03 +02:00
|
|
|
# peer features != to node features now because of keysend, which adds a node feature
|
|
|
|
assert only_one(nodes1)['features'] == nfeatures
|
|
|
|
assert only_one(nodes2)['features'] == nfeatures
|
|
|
|
assert peer1['features'] == lfeatures
|
|
|
|
assert peer2['features'] == lfeatures
|
2018-08-17 06:14:39 +02:00
|
|
|
|
|
|
|
# If it reconnects after db load, it should know features.
|
|
|
|
l1.restart()
|
|
|
|
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'])
|
|
|
|
wait_for(lambda: l2.rpc.getpeer(l1.info['id'])['connected'])
|
2019-10-11 04:52:04 +02:00
|
|
|
assert l1.rpc.getpeer(l2.info['id'])['features'] == lfeatures
|
|
|
|
assert l2.rpc.getpeer(l1.info['id'])['features'] == lfeatures
|
2018-08-17 06:14:39 +02:00
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# Close the channel to forget the peer
|
2019-08-08 08:48:44 +02:00
|
|
|
l1.rpc.close(chan)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
2018-09-28 05:24:14 +02:00
|
|
|
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
|
|
|
|
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
|
|
|
|
|
2020-05-04 03:37:41 +02:00
|
|
|
# Make sure close tx hits mempool before we mine blocks.
|
|
|
|
bitcoind.generate_block(100, wait_for_mempool=1)
|
2019-05-22 21:36:11 +02:00
|
|
|
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# The only channel was closed, everybody should have forgotten the nodes
|
|
|
|
assert l1.rpc.listnodes()['nodes'] == []
|
|
|
|
assert l2.rpc.listnodes()['nodes'] == []
|
|
|
|
|
|
|
|
|
|
|
|
def test_disconnectpeer(node_factory, bitcoind):
|
|
|
|
l1, l2, l3 = node_factory.get_nodes(3, opts={'may_reconnect': False})
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
|
|
|
|
# Gossiping
|
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
|
|
|
assert l1.rpc.getpeer(l2.info['id'])['connected']
|
|
|
|
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0
|
|
|
|
assert l1.rpc.getpeer(l3.info['id'])['connected']
|
|
|
|
assert len(l1.rpc.getpeer(l3.info['id'])['channels']) == 0
|
2018-08-09 02:25:19 +02:00
|
|
|
wait_for(lambda: l2.rpc.getpeer(l1.info['id']) is not None)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Disconnect l2 from l1
|
|
|
|
l1.rpc.disconnect(l2.info['id'])
|
|
|
|
|
|
|
|
# Make sure listpeers no longer returns the disconnected node
|
|
|
|
assert l1.rpc.getpeer(l2.info['id']) is None
|
2018-08-09 02:25:19 +02:00
|
|
|
wait_for(lambda: l2.rpc.getpeer(l1.info['id']) is None)
|
2018-08-03 17:29:38 +02:00
|
|
|
|
|
|
|
# Make sure you cannot disconnect after disconnecting
|
|
|
|
with pytest.raises(RpcError, match=r'Peer not connected'):
|
|
|
|
l1.rpc.disconnect(l2.info['id'])
|
|
|
|
with pytest.raises(RpcError, match=r'Peer not connected'):
|
|
|
|
l2.rpc.disconnect(l1.info['id'])
|
|
|
|
|
|
|
|
# Fund channel l1 -> l3
|
|
|
|
l1.fund_channel(l3, 10**6)
|
|
|
|
bitcoind.generate_block(5)
|
|
|
|
|
|
|
|
# disconnecting a non gossiping peer results in error
|
2018-08-09 02:25:29 +02:00
|
|
|
with pytest.raises(RpcError, match=r'Peer is in state CHANNELD_NORMAL'):
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.rpc.disconnect(l3.info['id'])
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs --dev-max-funding-unconfirmed-blocks")
|
|
|
|
def test_fundee_forget_funding_tx_unconfirmed(node_factory, bitcoind):
|
|
|
|
"""Test that fundee will forget the channel if
|
|
|
|
the funding tx has been unconfirmed for too long.
|
|
|
|
"""
|
|
|
|
# Keep this low (default is 2016), since everything
|
|
|
|
# is much slower in VALGRIND mode and wait_for_log
|
|
|
|
# could time out before lightningd processes all the
|
|
|
|
# blocks.
|
|
|
|
blocks = 200
|
2019-09-09 18:11:24 +02:00
|
|
|
# opener
|
2018-08-22 02:13:57 +02:00
|
|
|
l1 = node_factory.get_node()
|
2019-09-09 18:11:24 +02:00
|
|
|
# peer
|
2018-08-03 17:29:38 +02:00
|
|
|
l2 = node_factory.get_node(options={"dev-max-funding-unconfirmed-blocks": blocks})
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
2019-09-09 18:11:24 +02:00
|
|
|
# Give opener some funds.
|
2018-08-03 17:29:38 +02:00
|
|
|
l1.fundwallet(10**7)
|
|
|
|
# Let blocks settle.
|
|
|
|
time.sleep(1)
|
|
|
|
|
2018-09-05 10:43:32 +02:00
|
|
|
def mock_sendrawtransaction(r):
|
2019-01-23 02:36:50 +01:00
|
|
|
return {'id': r['id'], 'error': {'code': 100, 'message': 'sendrawtransaction disabled'}}
|
2018-09-05 10:43:32 +02:00
|
|
|
|
2019-09-09 18:11:24 +02:00
|
|
|
# Prevent opener from broadcasting funding tx (any tx really).
|
2018-09-05 10:43:32 +02:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('sendrawtransaction', mock_sendrawtransaction)
|
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# Fund the channel.
|
2019-09-09 18:11:24 +02:00
|
|
|
# The process will complete, but opener will be unable
|
2018-08-03 17:29:38 +02:00
|
|
|
# to broadcast and confirm funding tx.
|
2019-01-16 14:24:30 +01:00
|
|
|
with pytest.raises(RpcError, match=r'sendrawtransaction disabled'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6)
|
2018-09-05 10:43:32 +02:00
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# Generate blocks until unconfirmed.
|
|
|
|
bitcoind.generate_block(blocks)
|
|
|
|
|
|
|
|
# fundee will forget channel!
|
|
|
|
l2.daemon.wait_for_log('Forgetting channel: It has been {} blocks'.format(blocks))
|
2018-09-05 10:43:32 +02:00
|
|
|
|
2018-08-03 17:29:38 +02:00
|
|
|
# fundee will also forget and disconnect from peer.
|
|
|
|
assert len(l2.rpc.listpeers(l1.info['id'])['peers']) == 0
|
2018-08-22 02:09:56 +02:00
|
|
|
|
|
|
|
|
2018-08-23 01:27:25 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs dev_fail")
|
|
|
|
def test_no_fee_estimate(node_factory, bitcoind, executor):
|
|
|
|
l1 = node_factory.get_node(start=False)
|
2018-09-05 01:01:50 +02:00
|
|
|
|
|
|
|
# Fail any fee estimation requests until we allow them further down
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', {
|
|
|
|
'error': {"errors": ["Insufficient data or no feerate found"], "blocks": 0}
|
|
|
|
})
|
2018-08-23 01:27:25 +02:00
|
|
|
l1.start()
|
|
|
|
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Can't fund a channel.
|
|
|
|
l1.fundwallet(10**7)
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot estimate fees'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6)
|
|
|
|
|
|
|
|
# Can't withdraw either.
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot estimate fees'):
|
2019-03-04 04:13:49 +01:00
|
|
|
l1.rpc.withdraw(l2.rpc.newaddr()['bech32'], 'all')
|
2018-08-23 01:27:25 +02:00
|
|
|
|
2018-08-28 22:46:34 +02:00
|
|
|
# Can't use feerate names, either.
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot estimate fees'):
|
2019-03-04 04:13:49 +01:00
|
|
|
l1.rpc.withdraw(l2.rpc.newaddr()['bech32'], 'all', 'urgent')
|
|
|
|
l1.rpc.withdraw(l2.rpc.newaddr()['bech32'], 'all', 'normal')
|
|
|
|
l1.rpc.withdraw(l2.rpc.newaddr()['bech32'], 'all', 'slow')
|
2018-08-28 22:46:34 +02:00
|
|
|
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot estimate fees'):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6, 'urgent')
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6, 'normal')
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6, 'slow')
|
|
|
|
|
2018-08-27 07:13:57 +02:00
|
|
|
# Can with manual feerate.
|
2019-03-04 04:13:49 +01:00
|
|
|
l1.rpc.withdraw(l2.rpc.newaddr()['bech32'], 10000, '1500perkb')
|
2019-02-21 21:19:14 +01:00
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6, '2000perkw', minconf=0)
|
2018-08-27 07:13:57 +02:00
|
|
|
|
|
|
|
# Make sure we clean up cahnnel for later attempt.
|
|
|
|
l1.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
l1.rpc.dev_fail(l2.info['id'])
|
2018-08-29 03:29:50 +02:00
|
|
|
l1.daemon.wait_for_log('Failing due to dev-fail command')
|
2018-09-19 06:06:07 +02:00
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
2018-08-27 07:13:57 +02:00
|
|
|
bitcoind.generate_block(6)
|
|
|
|
wait_for(lambda: only_one(l1.rpc.getpeer(l2.info['id'])['channels'])['state'] == 'ONCHAIN')
|
|
|
|
wait_for(lambda: only_one(l2.rpc.getpeer(l1.info['id'])['channels'])['state'] == 'ONCHAIN')
|
|
|
|
|
2018-08-23 01:27:25 +02:00
|
|
|
# But can accept incoming connections.
|
2018-08-27 07:13:57 +02:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2018-08-23 01:27:25 +02:00
|
|
|
l2.fund_channel(l1, 10**6)
|
|
|
|
|
|
|
|
# Can do HTLCs.
|
|
|
|
l2.pay(l1, 10**5)
|
|
|
|
|
|
|
|
# Can do mutual close.
|
|
|
|
l1.rpc.close(l2.info['id'])
|
2019-05-31 16:38:58 +02:00
|
|
|
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) > 0)
|
2018-08-23 01:27:25 +02:00
|
|
|
bitcoind.generate_block(100)
|
|
|
|
|
|
|
|
# Can do unilateral close.
|
|
|
|
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
|
|
|
l2.fund_channel(l1, 10**6)
|
|
|
|
l2.pay(l1, 10**9 // 2)
|
|
|
|
l1.rpc.dev_fail(l2.info['id'])
|
2018-08-29 03:29:50 +02:00
|
|
|
l1.daemon.wait_for_log('Failing due to dev-fail command')
|
2018-09-19 06:06:07 +02:00
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
2018-08-23 01:27:25 +02:00
|
|
|
bitcoind.generate_block(5)
|
2018-09-19 06:06:07 +02:00
|
|
|
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) > 0)
|
2018-08-23 01:27:25 +02:00
|
|
|
bitcoind.generate_block(100)
|
|
|
|
|
2018-08-29 03:29:50 +02:00
|
|
|
# Start estimatesmartfee.
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 11000, 7500, 3750), True)
|
2018-08-23 01:27:25 +02:00
|
|
|
|
2018-08-28 22:46:34 +02:00
|
|
|
# Can now fund a channel (as a test, use slow feerate).
|
2018-08-23 01:27:25 +02:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2018-08-28 22:46:34 +02:00
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6, 'slow')
|
2018-08-23 01:27:25 +02:00
|
|
|
|
2019-09-14 20:02:42 +02:00
|
|
|
# Can withdraw (use urgent feerate). `minconf` may be needed depending on
|
|
|
|
# the previous `fundchannel` selecting all confirmed outputs.
|
|
|
|
l1.rpc.withdraw(l2.rpc.newaddr()['bech32'], 'all', 'urgent', minconf=0)
|
2018-08-23 01:27:25 +02:00
|
|
|
|
|
|
|
|
2018-08-22 02:09:56 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
|
2019-09-09 18:11:24 +02:00
|
|
|
def test_opener_feerate_reconnect(node_factory, bitcoind):
|
2018-08-22 02:09:56 +02:00
|
|
|
# l1 updates fees, then reconnect so l2 retransmits commitment_signed.
|
2020-04-09 07:00:06 +02:00
|
|
|
disconnects = ['-WIRE_COMMITMENT_SIGNED*3']
|
2018-08-23 13:45:33 +02:00
|
|
|
l1 = node_factory.get_node(may_reconnect=True,
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-08-22 02:09:56 +02:00
|
|
|
l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
2020-04-09 07:00:06 +02:00
|
|
|
# Need a payment otherwise it won't update fee.
|
|
|
|
l1.pay(l2, 10**9 // 2)
|
|
|
|
|
2018-08-23 13:45:33 +02:00
|
|
|
# create fee update, causing disconnect.
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 11000, 7500, 3750))
|
2018-10-26 07:49:53 +02:00
|
|
|
l2.daemon.wait_for_log(r'dev_disconnect: \-WIRE_COMMITMENT_SIGNED')
|
2018-08-22 02:09:56 +02:00
|
|
|
|
|
|
|
# Wait until they reconnect.
|
|
|
|
l1.daemon.wait_for_log('Peer transient failure in CHANNELD_NORMAL')
|
|
|
|
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'])
|
|
|
|
|
|
|
|
# Should work normally.
|
|
|
|
l1.pay(l2, 200000000)
|
2018-08-17 06:16:34 +02:00
|
|
|
|
|
|
|
|
2019-09-09 18:11:24 +02:00
|
|
|
def test_opener_simple_reconnect(node_factory, bitcoind):
|
2018-12-10 02:03:42 +01:00
|
|
|
"""Sanity check that reconnection works with completely unused channels"""
|
|
|
|
# Set fees even so it doesn't send any commitments.
|
|
|
|
l1 = node_factory.get_node(may_reconnect=True,
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-12-10 02:03:42 +01:00
|
|
|
l2 = node_factory.get_node(may_reconnect=True)
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.fund_channel(l2, 10**6)
|
|
|
|
|
|
|
|
l1.rpc.disconnect(l2.info['id'], True)
|
|
|
|
|
|
|
|
# Wait until they reconnect.
|
|
|
|
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'])
|
|
|
|
|
|
|
|
# Should work normally.
|
|
|
|
l1.pay(l2, 200000000)
|
|
|
|
|
|
|
|
|
2019-09-12 22:49:42 +02:00
|
|
|
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "sqlite3-specific DB rollback")
|
2018-08-17 06:16:34 +02:00
|
|
|
@unittest.skipIf(not DEVELOPER, "needs LIGHTNINGD_DEV_LOG_IO")
|
2018-08-17 07:06:36 +02:00
|
|
|
def test_dataloss_protection(node_factory, bitcoind):
|
2019-11-18 01:27:17 +01:00
|
|
|
l1 = node_factory.get_node(may_reconnect=True, options={'log-level': 'io'},
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2019-11-18 01:27:17 +01:00
|
|
|
l2 = node_factory.get_node(may_reconnect=True, options={'log-level': 'io'},
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500), allow_broken_log=True)
|
2018-08-17 06:16:34 +02:00
|
|
|
|
2020-04-28 02:33:03 +02:00
|
|
|
lf = expected_peer_features()
|
2019-11-14 04:34:29 +01:00
|
|
|
|
2018-08-17 06:16:34 +02:00
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
# l1 should send out WIRE_INIT (0010)
|
2019-11-23 01:19:23 +01:00
|
|
|
l1.daemon.wait_for_log(r"\[OUT\] 0010.*"
|
2019-09-10 04:21:27 +02:00
|
|
|
# lflen
|
|
|
|
+ format(len(lf) // 2, '04x')
|
2019-08-28 06:09:04 +02:00
|
|
|
+ lf)
|
2018-08-17 06:16:34 +02:00
|
|
|
|
|
|
|
l1.fund_channel(l2, 10**6)
|
2018-08-17 07:06:36 +02:00
|
|
|
l2.stop()
|
|
|
|
|
|
|
|
# Save copy of the db.
|
2019-11-23 02:46:40 +01:00
|
|
|
dbpath = os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3")
|
2018-08-17 07:06:36 +02:00
|
|
|
orig_db = open(dbpath, "rb").read()
|
|
|
|
l2.start()
|
2018-08-17 06:16:34 +02:00
|
|
|
|
2019-09-10 05:27:51 +02:00
|
|
|
# l1 should have sent WIRE_CHANNEL_REESTABLISH with extra fields.
|
2018-10-26 07:49:53 +02:00
|
|
|
l1.daemon.wait_for_log(r"\[OUT\] 0088"
|
2018-08-17 06:16:34 +02:00
|
|
|
# channel_id
|
|
|
|
"[0-9a-f]{64}"
|
|
|
|
# next_local_commitment_number
|
2019-01-18 02:21:19 +01:00
|
|
|
"0000000000000001"
|
2018-08-17 06:16:34 +02:00
|
|
|
# next_remote_revocation_number
|
2019-01-18 02:21:19 +01:00
|
|
|
"0000000000000000"
|
2018-08-22 04:33:32 +02:00
|
|
|
# your_last_per_commitment_secret (funding_depth may
|
|
|
|
# trigger a fee-update and commit, hence this may not
|
|
|
|
# be zero)
|
|
|
|
"[0-9a-f]{64}"
|
2019-09-10 05:28:12 +02:00
|
|
|
# my_current_per_commitment_point
|
|
|
|
"0[23][0-9a-f]{64}'$")
|
2019-09-10 05:27:51 +02:00
|
|
|
|
2018-08-17 06:16:34 +02:00
|
|
|
# After an htlc, we should get different results (two more commits)
|
|
|
|
l1.pay(l2, 200000000)
|
|
|
|
|
|
|
|
# Make sure both sides consider it completely settled (has received both
|
|
|
|
# REVOKE_AND_ACK)
|
2018-10-26 07:49:53 +02:00
|
|
|
l1.daemon.wait_for_logs([r"\[IN\] 0085"] * 2)
|
|
|
|
l2.daemon.wait_for_logs([r"\[IN\] 0085"] * 2)
|
2018-08-17 06:16:34 +02:00
|
|
|
|
|
|
|
l2.restart()
|
|
|
|
|
2019-09-10 05:27:51 +02:00
|
|
|
# l1 should have sent WIRE_CHANNEL_REESTABLISH with extra fields.
|
2018-10-26 07:49:53 +02:00
|
|
|
l1.daemon.wait_for_log(r"\[OUT\] 0088"
|
2018-08-17 06:16:34 +02:00
|
|
|
# channel_id
|
|
|
|
"[0-9a-f]{64}"
|
|
|
|
# next_local_commitment_number
|
2018-08-22 04:33:32 +02:00
|
|
|
"000000000000000[1-9]"
|
2018-08-17 06:16:34 +02:00
|
|
|
# next_remote_revocation_number
|
2018-08-22 04:33:32 +02:00
|
|
|
"000000000000000[1-9]"
|
2018-08-17 06:16:34 +02:00
|
|
|
# your_last_per_commitment_secret
|
|
|
|
"[0-9a-f]{64}"
|
2019-09-10 05:28:12 +02:00
|
|
|
# my_current_per_commitment_point
|
|
|
|
"0[23][0-9a-f]{64}'$")
|
2018-08-17 07:06:36 +02:00
|
|
|
|
|
|
|
# Now, move l2 back in time.
|
|
|
|
l2.stop()
|
|
|
|
# Overwrite with OLD db.
|
|
|
|
open(dbpath, "wb").write(orig_db)
|
|
|
|
l2.start()
|
|
|
|
|
|
|
|
# l2 should freak out!
|
2018-08-23 03:08:48 +02:00
|
|
|
l2.daemon.wait_for_log("Peer permanent failure in CHANNELD_NORMAL: Awaiting unilateral close")
|
2018-08-17 07:06:36 +02:00
|
|
|
|
|
|
|
# l1 should drop to chain.
|
2018-09-19 06:06:07 +02:00
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
2018-08-17 07:06:36 +02:00
|
|
|
|
|
|
|
# l2 must NOT drop to chain.
|
|
|
|
l2.daemon.wait_for_log("Cannot broadcast our commitment tx: they have a future one")
|
|
|
|
assert not l2.daemon.is_in_log('sendrawtx exit 0')
|
|
|
|
|
2018-08-17 07:06:36 +02:00
|
|
|
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
|
|
|
|
|
|
|
|
# l2 should still recover something!
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
2018-08-22 04:33:32 +02:00
|
|
|
l2.daemon.wait_for_log("ERROR: Unknown commitment #[0-9], recovering our funds!")
|
2018-08-17 07:06:36 +02:00
|
|
|
|
|
|
|
# Restarting l2, and it should remember from db.
|
|
|
|
l2.restart()
|
|
|
|
|
2018-08-22 04:33:32 +02:00
|
|
|
l2.daemon.wait_for_log("ERROR: Unknown commitment #[0-9], recovering our funds!")
|
2018-08-17 07:06:36 +02:00
|
|
|
bitcoind.generate_block(100)
|
2019-05-22 21:36:11 +02:00
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
2018-08-17 07:06:36 +02:00
|
|
|
|
|
|
|
# l2 should have it in wallet.
|
|
|
|
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l2.rpc.listfunds()['outputs']])
|
2018-09-04 07:18:55 +02:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs dev_disconnect")
|
|
|
|
def test_restart_multi_htlc_rexmit(node_factory, bitcoind, executor):
|
|
|
|
# l1 disables commit timer once we send first htlc, dies on commit
|
|
|
|
disconnects = ['=WIRE_UPDATE_ADD_HTLC-nocommit',
|
|
|
|
'-WIRE_COMMITMENT_SIGNED']
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
|
|
|
|
'may_reconnect': True},
|
|
|
|
{'may_reconnect': True}])
|
|
|
|
|
|
|
|
executor.submit(l1.pay, l2, 20000)
|
|
|
|
executor.submit(l1.pay, l2, 30000)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_logs(['peer_out WIRE_UPDATE_ADD_HTLC'] * 2)
|
|
|
|
l1.rpc.dev_reenable_commit(l2.info['id'])
|
|
|
|
l1.daemon.wait_for_log('dev_disconnect: -WIRE_COMMITMENT_SIGNED')
|
|
|
|
|
|
|
|
# This will make it reconnect
|
|
|
|
l1.stop()
|
|
|
|
# Clear the disconnect so we can proceed normally
|
|
|
|
del l1.daemon.opts['dev-disconnect']
|
|
|
|
l1.start()
|
|
|
|
|
2019-02-23 06:00:04 +01:00
|
|
|
# Payments will fail due to restart, but we can see results in listsendpays.
|
|
|
|
print(l1.rpc.listsendpays())
|
|
|
|
wait_for(lambda: [p['status'] for p in l1.rpc.listsendpays()['payments']] == ['complete', 'complete'])
|
2018-10-09 10:46:52 +02:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "needs dev-disconnect")
|
|
|
|
def test_fulfill_incoming_first(node_factory, bitcoind):
|
|
|
|
"""Test that we handle the case where we completely resolve incoming htlc
|
|
|
|
before fulfilled outgoing htlc"""
|
|
|
|
|
|
|
|
# We agree on fee change first, then add HTLC, then remove; stop after remove.
|
|
|
|
disconnects = ['+WIRE_COMMITMENT_SIGNED*3']
|
|
|
|
# We manually reconnect l2 & l3, after 100 blocks; hence allowing manual
|
2018-10-10 00:39:05 +02:00
|
|
|
# reconnect, but disabling auto connect, and massive cltv so 2/3 doesn't
|
2018-10-09 10:46:52 +02:00
|
|
|
# time out.
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[{},
|
|
|
|
{'may_reconnect': True,
|
|
|
|
'dev-no-reconnect': None},
|
|
|
|
{'may_reconnect': True,
|
|
|
|
'dev-no-reconnect': None,
|
|
|
|
'disconnect': disconnects,
|
|
|
|
'cltv-final': 200}],
|
2018-12-08 00:27:14 +01:00
|
|
|
wait_for_announce=True)
|
2018-10-09 10:46:52 +02:00
|
|
|
|
|
|
|
# This succeeds.
|
|
|
|
l1.rpc.pay(l3.rpc.invoice(200000000, 'test_fulfill_incoming_first', 'desc')['bolt11'])
|
|
|
|
|
|
|
|
# l1 can shutdown, fine.
|
|
|
|
l1.rpc.close(l2.info['id'])
|
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
|
|
|
|
# Now, l2 should restore from DB fine, even though outgoing HTLC no longer
|
|
|
|
# has an incoming.
|
|
|
|
l2.restart()
|
2018-10-09 10:49:52 +02:00
|
|
|
|
|
|
|
# Manually reconnect l2->l3.
|
|
|
|
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
|
|
|
|
# Fulfill should be retransmitted OK (ignored result).
|
|
|
|
l2.rpc.close(l3.info['id'])
|
|
|
|
l2.wait_for_channel_onchain(l3.info['id'])
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
l3.daemon.wait_for_log('onchaind complete, forgetting peer')
|
2018-10-09 10:53:52 +02:00
|
|
|
|
|
|
|
|
2019-02-16 10:12:18 +01:00
|
|
|
@unittest.skipIf(not DEVELOPER, "gossip without DEVELOPER=1 is slow")
|
2019-10-08 03:09:24 +02:00
|
|
|
def test_restart_many_payments(node_factory, bitcoind):
|
2018-10-09 10:53:52 +02:00
|
|
|
l1 = node_factory.get_node(may_reconnect=True)
|
|
|
|
|
|
|
|
# On my laptop, these take 74 seconds and 44 seconds (with restart commented out)
|
|
|
|
if VALGRIND:
|
|
|
|
num = 2
|
|
|
|
else:
|
|
|
|
num = 5
|
|
|
|
|
|
|
|
# Nodes with channels into the main node
|
|
|
|
innodes = node_factory.get_nodes(num, opts={'may_reconnect': True})
|
|
|
|
inchans = []
|
|
|
|
for n in innodes:
|
|
|
|
n.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
2018-11-22 03:17:29 +01:00
|
|
|
inchans.append(n.fund_channel(l1, 10**6, False))
|
2018-10-09 10:53:52 +02:00
|
|
|
|
|
|
|
# Nodes with channels out of the main node
|
|
|
|
outnodes = node_factory.get_nodes(len(innodes), opts={'may_reconnect': True})
|
|
|
|
outchans = []
|
|
|
|
for n in outnodes:
|
|
|
|
n.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
2018-11-22 03:17:29 +01:00
|
|
|
outchans.append(l1.fund_channel(n, 10**6, False))
|
|
|
|
|
2019-10-08 03:09:24 +02:00
|
|
|
# Make sure they're all announced.
|
|
|
|
bitcoind.generate_block(5)
|
|
|
|
|
|
|
|
# We wait for each node to see each dir active, and its own
|
|
|
|
# channel CHANNELD_NORMAL
|
|
|
|
logs = ([r'update for channel {}/0 now ACTIVE'.format(scid)
|
|
|
|
for scid in inchans + outchans]
|
|
|
|
+ [r'update for channel {}/1 now ACTIVE'.format(scid)
|
|
|
|
for scid in inchans + outchans]
|
|
|
|
+ ['to CHANNELD_NORMAL'])
|
|
|
|
|
2018-11-22 03:17:29 +01:00
|
|
|
# Now do all the waiting at once: if !DEVELOPER, this can be *very* slow!
|
2019-10-08 03:09:24 +02:00
|
|
|
for n in innodes + outnodes:
|
|
|
|
n.daemon.wait_for_logs(logs)
|
2018-10-09 10:53:52 +02:00
|
|
|
|
|
|
|
# Manually create routes, get invoices
|
2018-10-10 00:39:05 +02:00
|
|
|
Payment = namedtuple('Payment', ['innode', 'route', 'payment_hash'])
|
|
|
|
|
|
|
|
to_pay = []
|
2018-10-09 10:53:52 +02:00
|
|
|
for i in range(len(innodes)):
|
|
|
|
# This one will cause WIRE_INCORRECT_CLTV_EXPIRY from l1.
|
2018-10-10 00:39:05 +02:00
|
|
|
route = [{'msatoshi': 100001001,
|
|
|
|
'id': l1.info['id'],
|
|
|
|
'delay': 10,
|
|
|
|
'channel': inchans[i]},
|
|
|
|
{'msatoshi': 100000000,
|
|
|
|
'id': outnodes[i].info['id'],
|
|
|
|
'delay': 5,
|
|
|
|
'channel': outchans[i]}]
|
|
|
|
payment_hash = outnodes[i].rpc.invoice(100000000, "invoice", "invoice")['payment_hash']
|
|
|
|
to_pay.append(Payment(innodes[i], route, payment_hash))
|
|
|
|
|
2018-10-09 10:53:52 +02:00
|
|
|
# This one should be routed through to the outnode.
|
2018-10-10 00:39:05 +02:00
|
|
|
route = [{'msatoshi': 100001001,
|
|
|
|
'id': l1.info['id'],
|
|
|
|
'delay': 11,
|
|
|
|
'channel': inchans[i]},
|
|
|
|
{'msatoshi': 100000000,
|
|
|
|
'id': outnodes[i].info['id'],
|
|
|
|
'delay': 5,
|
|
|
|
'channel': outchans[i]}]
|
|
|
|
payment_hash = outnodes[i].rpc.invoice(100000000, "invoice2", "invoice2")['payment_hash']
|
|
|
|
to_pay.append(Payment(innodes[i], route, payment_hash))
|
2018-10-09 10:53:52 +02:00
|
|
|
|
|
|
|
# sendpay is async.
|
2018-10-10 00:39:05 +02:00
|
|
|
for p in to_pay:
|
|
|
|
p.innode.rpc.sendpay(p.route, p.payment_hash)
|
2018-10-09 10:53:52 +02:00
|
|
|
|
|
|
|
# Now restart l1 while traffic is flowing...
|
|
|
|
l1.restart()
|
|
|
|
|
|
|
|
# Wait for them to finish.
|
2018-10-10 00:39:05 +02:00
|
|
|
for n in innodes:
|
2019-02-23 06:00:04 +01:00
|
|
|
wait_for(lambda: 'pending' not in [p['status'] for p in n.rpc.listsendpays()['payments']])
|
2019-02-21 02:22:47 +01:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "need dev-disconnect")
|
|
|
|
def test_fail_unconfirmed(node_factory, bitcoind, executor):
|
|
|
|
"""Test that if we crash with an unconfirmed connection to a known
|
|
|
|
peer, we don't have a dangling peer in db"""
|
|
|
|
# = is a NOOP disconnect, but sets up file.
|
|
|
|
l1 = node_factory.get_node(disconnect=['=WIRE_OPEN_CHANNEL'])
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
# First one, we close by mutual agreement.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.fund_channel(l2, 200000, wait_for_active=True)
|
|
|
|
l1.rpc.close(l2.info['id'])
|
|
|
|
|
|
|
|
# Make sure it's closed
|
|
|
|
l1.wait_for_channel_onchain(l2.info['id'])
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log('State changed from CLOSINGD_COMPLETE to FUNDING_SPEND_SEEN')
|
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
# Mangle disconnect file so this time it blackholes....
|
|
|
|
with open(l1.daemon.disconnect_file, "w") as f:
|
|
|
|
f.write("0WIRE_OPEN_CHANNEL\n")
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# Now we establish a new channel, which gets stuck.
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.fundwallet(10**7)
|
|
|
|
executor.submit(l1.rpc.fundchannel, l2.info['id'], 100000)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log("dev_disconnect")
|
|
|
|
|
|
|
|
# Now complete old channel.
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
|
|
|
|
# And crash l1, which is stuck.
|
|
|
|
l1.daemon.kill()
|
|
|
|
|
|
|
|
# Now, restart and see if it can connect OK.
|
|
|
|
del l1.daemon.opts['dev-disconnect']
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l1.fund_channel(l2, 200000, wait_for_active=True)
|
2019-02-21 21:19:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_change_chaining(node_factory, bitcoind):
|
|
|
|
"""Test change chaining of unconfirmed fundings
|
|
|
|
|
|
|
|
Change chaining is the case where one transaction is broadcast but not
|
|
|
|
confirmed yet and we already build a followup on top of the change. If the
|
|
|
|
first transaction doesn't confirm we may end up creating a series of
|
|
|
|
unconfirmable transactions. This is why we generally disallow chaining.
|
|
|
|
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.get_nodes(3)
|
|
|
|
l1.fundwallet(10**8) # This will create an output with 1 confirmation
|
|
|
|
|
|
|
|
# Now fund a channel from l1 to l2, that should succeed, with minconf=1 but not before
|
|
|
|
l1.connect(l2)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**7, minconf=2)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**7) # Defaults to minconf=1
|
|
|
|
|
|
|
|
# We don't have confirmed outputs anymore, so this should fail without minconf=0
|
|
|
|
l1.connect(l3)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.fundchannel(l3.info['id'], 10**7) # Defaults to minconf=1
|
|
|
|
l1.rpc.fundchannel(l3.info['id'], 10**7, minconf=0)
|
2019-06-08 04:57:25 +02:00
|
|
|
|
|
|
|
|
2019-09-18 21:55:41 +02:00
|
|
|
def test_feerate_spam(node_factory, chainparams):
|
2019-06-08 04:57:25 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2)
|
|
|
|
|
2019-09-09 18:11:24 +02:00
|
|
|
# We constrain the value the opener has at its disposal so we get the
|
2020-02-27 17:07:18 +01:00
|
|
|
# REMOTE feerate we are looking for below. This may be fragile and depends
|
|
|
|
# on the transactions we generate.
|
|
|
|
slack = 45000000 if not chainparams['elements'] else 68000000
|
|
|
|
|
2019-06-08 04:57:25 +02:00
|
|
|
# Pay almost everything to l2.
|
2019-09-18 21:55:41 +02:00
|
|
|
l1.pay(l2, 10**9 - slack)
|
2019-06-08 04:57:25 +02:00
|
|
|
|
|
|
|
# It will send this once (may have happened before line_graph's wait)
|
|
|
|
wait_for(lambda: l1.daemon.is_in_log('Setting REMOTE feerate to 15000'))
|
|
|
|
wait_for(lambda: l1.daemon.is_in_log('peer_out WIRE_UPDATE_FEE'))
|
|
|
|
|
|
|
|
# Now change feerates to something l1 can't afford.
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((100000, 100000, 100000, 100000))
|
2019-06-08 04:57:25 +02:00
|
|
|
|
2020-03-12 12:29:18 +01:00
|
|
|
# It will raise as far as it can (48000)
|
|
|
|
l1.daemon.wait_for_log('Setting REMOTE feerate to 48000')
|
2019-06-08 04:57:25 +02:00
|
|
|
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
|
|
|
|
|
|
|
|
# But it won't do it again once it's at max.
|
|
|
|
with pytest.raises(TimeoutError):
|
|
|
|
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE', timeout=5)
|
2019-10-28 04:33:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "need dev-feerate")
|
|
|
|
def test_feerate_stress(node_factory, executor):
|
|
|
|
# Third node makes HTLC traffic less predictable.
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts={'commit-time': 100,
|
|
|
|
'may_reconnect': True})
|
|
|
|
|
|
|
|
l1.pay(l2, 10**9 // 2)
|
|
|
|
scid12 = l1.get_channel_scid(l2)
|
|
|
|
scid23 = l2.get_channel_scid(l3)
|
|
|
|
|
|
|
|
routel1l3 = [{'msatoshi': '10002msat', 'id': l2.info['id'], 'delay': 11, 'channel': scid12},
|
|
|
|
{'msatoshi': '10000msat', 'id': l3.info['id'], 'delay': 5, 'channel': scid23}]
|
|
|
|
routel2l1 = [{'msatoshi': '10000msat', 'id': l1.info['id'], 'delay': 5, 'channel': scid12}]
|
|
|
|
|
|
|
|
rate = 1875
|
|
|
|
NUM_ATTEMPTS = 25
|
|
|
|
l1done = 0
|
|
|
|
l2done = 0
|
|
|
|
prev_log = 0
|
|
|
|
while l1done < NUM_ATTEMPTS and l2done < NUM_ATTEMPTS:
|
|
|
|
try:
|
|
|
|
r = random.randrange(6)
|
|
|
|
if r == 5:
|
|
|
|
l1.rpc.sendpay(routel1l3, "{:064x}".format(l1done))
|
|
|
|
l1done += 1
|
|
|
|
elif r == 4:
|
|
|
|
l2.rpc.sendpay(routel2l1, "{:064x}".format(l2done))
|
|
|
|
l2done += 1
|
|
|
|
elif r > 0:
|
|
|
|
l1.rpc.call('dev-feerate', [l2.info['id'], rate])
|
|
|
|
rate += 5
|
|
|
|
else:
|
|
|
|
l2.rpc.disconnect(l1.info['id'], True)
|
|
|
|
time.sleep(1)
|
|
|
|
except RpcError:
|
|
|
|
time.sleep(0.01)
|
|
|
|
assert not l1.daemon.is_in_log('Bad.*signature', start=prev_log)
|
|
|
|
prev_log = len(l1.daemon.logs)
|
|
|
|
|
|
|
|
# Make sure it's reconnected, and wait for last payment.
|
|
|
|
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'])
|
2020-05-04 09:32:27 +02:00
|
|
|
# We can get TEMPORARY_CHANNEL_FAILURE due to disconnect, too.
|
|
|
|
with pytest.raises(RpcError, match='WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS|WIRE_TEMPORARY_CHANNEL_FAILURE'):
|
2019-10-28 04:33:42 +01:00
|
|
|
l1.rpc.waitsendpay("{:064x}".format(l1done - 1))
|
2020-05-04 09:32:27 +02:00
|
|
|
with pytest.raises(RpcError, match='WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS|WIRE_TEMPORARY_CHANNEL_FAILURE'):
|
2019-10-28 04:33:42 +01:00
|
|
|
l2.rpc.waitsendpay("{:064x}".format(l2done - 1))
|
|
|
|
l1.rpc.call('dev-feerate', [l2.info['id'], rate - 5])
|
|
|
|
assert not l1.daemon.is_in_log('Bad.*signature')
|
|
|
|
assert not l2.daemon.is_in_log('Bad.*signature')
|
2019-11-04 17:54:48 +01:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not DEVELOPER, "need dev_disconnect")
|
|
|
|
def test_pay_disconnect_stress(node_factory, executor):
|
|
|
|
"""Expose race in htlc restoration in channeld: 50% chance of failure"""
|
2019-11-06 05:15:19 +01:00
|
|
|
if SLOW_MACHINE and VALGRIND:
|
|
|
|
NUM_RUNS = 2
|
|
|
|
else:
|
|
|
|
NUM_RUNS = 5
|
|
|
|
for i in range(NUM_RUNS):
|
2019-11-04 17:54:48 +01:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
|
|
|
|
{'may_reconnect': True,
|
|
|
|
'disconnect': ['=WIRE_UPDATE_ADD_HTLC',
|
|
|
|
'-WIRE_COMMITMENT_SIGNED']}])
|
|
|
|
|
|
|
|
scid12 = l1.get_channel_scid(l2)
|
|
|
|
routel2l1 = [{'msatoshi': '10000msat', 'id': l1.info['id'], 'delay': 5, 'channel': scid12}]
|
|
|
|
|
|
|
|
# Get invoice from l1 to pay.
|
|
|
|
payhash1 = l1.rpc.invoice(10000, "invoice", "invoice")['payment_hash']
|
|
|
|
|
|
|
|
# Start balancing payment.
|
|
|
|
fut = executor.submit(l1.pay, l2, 10**9 // 2)
|
|
|
|
|
|
|
|
# As soon as reverse payment is accepted, reconnect.
|
|
|
|
while True:
|
|
|
|
l2.rpc.sendpay(routel2l1, payhash1)
|
|
|
|
try:
|
|
|
|
# This will usually fail with Capacity exceeded
|
|
|
|
l2.rpc.waitsendpay(payhash1, timeout=TIMEOUT)
|
|
|
|
break
|
|
|
|
except RpcError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
fut.result()
|
2020-04-03 02:03:58 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_wumbo_channels(node_factory, bitcoind):
|
2020-04-28 02:33:03 +02:00
|
|
|
f = bytes.fromhex(expected_peer_features())
|
2020-04-03 02:03:58 +02:00
|
|
|
|
|
|
|
# OPT_LARGE_CHANNELS = 18 (19 for us). 0x080000
|
|
|
|
f = (f[:-3] + bytes([f[-3] | 0x08]) + f[-2:]).hex()
|
|
|
|
|
|
|
|
l1, l2, l3 = node_factory.get_nodes(3,
|
|
|
|
opts=[{'large-channels': None},
|
|
|
|
{'large-channels': None},
|
|
|
|
{}])
|
|
|
|
conn = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
|
|
|
|
assert conn['features'] == f
|
|
|
|
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['features'] == f
|
|
|
|
|
|
|
|
# Now, can we open a giant channel?
|
|
|
|
l1.fundwallet(1 << 26)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 1 << 24)
|
|
|
|
|
|
|
|
# Get that mined, and announced.
|
|
|
|
bitcoind.generate_block(6, wait_for_mempool=1)
|
|
|
|
|
|
|
|
# Connect l3, get gossip.
|
|
|
|
l3.rpc.connect(l1.info['id'], 'localhost', port=l1.port)
|
|
|
|
wait_for(lambda: len(l3.rpc.listnodes(l1.info['id'])['nodes']) == 1)
|
|
|
|
wait_for(lambda: 'features' in only_one(l3.rpc.listnodes(l1.info['id'])['nodes']))
|
|
|
|
|
|
|
|
# Make sure channel capacity is what we expected.
|
|
|
|
assert ([c['amount_msat'] for c in l3.rpc.listchannels()['channels']]
|
|
|
|
== [Millisatoshi(str(1 << 24) + "sat")] * 2)
|
|
|
|
|
|
|
|
# Make sure we can't open a wumbo channel if we don't agree.
|
|
|
|
with pytest.raises(RpcError, match='Amount exceeded'):
|
|
|
|
l1.rpc.fundchannel(l3.info['id'], 1 << 24)
|
|
|
|
|
|
|
|
# But we can open and announce a normal one.
|
|
|
|
l1.rpc.fundchannel(l3.info['id'], 'all')
|
|
|
|
bitcoind.generate_block(6, wait_for_mempool=1)
|
|
|
|
wait_for(lambda: l1.channel_state(l3) == 'CHANNELD_NORMAL')
|
|
|
|
|
|
|
|
# Make sure l2 sees correct size.
|
|
|
|
wait_for(lambda: [c['amount_msat'] for c in l2.rpc.listchannels(l1.get_channel_scid(l3))['channels']]
|
|
|
|
== [Millisatoshi(str((1 << 24) - 1) + "sat")] * 2)
|
2020-04-03 02:03:58 +02:00
|
|
|
|
|
|
|
# Make sure 'all' works with wumbo peers.
|
|
|
|
l1.rpc.close(l2.info['id'])
|
|
|
|
bitcoind.generate_block(1, wait_for_mempool=1)
|
|
|
|
wait_for(lambda: l1.channel_state(l2) == 'ONCHAIN')
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 'all')
|
|
|
|
bitcoind.generate_block(1, wait_for_mempool=1)
|
|
|
|
wait_for(lambda: 'CHANNELD_NORMAL' in [c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']])
|
|
|
|
|
|
|
|
# Exact amount depends on fees, but it will be wumbo!
|
|
|
|
amount = [c['funding_msat'][l1.info['id']] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'] if c['state'] == 'CHANNELD_NORMAL'][0]
|
|
|
|
assert Millisatoshi(amount) > Millisatoshi(str((1 << 24) - 1) + "sat")
|