2018-08-03 17:29:38 +02:00
from fixtures import * # noqa: F401,F403
2019-06-29 18:54:39 +02:00
from fixtures import TEST_NETWORK
2021-10-18 02:13:33 +02:00
from ephemeral_port_reserve import reserve # type: ignore
2020-04-03 02:03:58 +02:00
from pyln . client import RpcError , Millisatoshi
2021-10-18 02:13:33 +02:00
import pyln . proto . wire as wire
2020-01-06 12:20:12 +01:00
from utils import (
2021-04-26 21:58:58 +02:00
only_one , wait_for , sync_blockheight , TIMEOUT ,
2020-08-07 05:14:59 +02:00
expected_peer_features , expected_node_features ,
2020-05-19 22:46:56 +02:00
expected_channel_features ,
2020-08-14 03:30:42 +02:00
check_coin_moves , first_channel_id , account_balance , basic_fee ,
2021-04-18 06:38:22 +02:00
scriptpubkey_addr ,
2022-06-01 12:14:56 +02:00
EXPERIMENTAL_FEATURES , mine_funding_to_announce
2020-01-06 12:20:12 +01:00
)
2021-05-21 02:36:12 +02:00
from pyln . testing . utils import SLOW_MACHINE , VALGRIND , EXPERIMENTAL_DUAL_FUND , FUNDAMOUNT
2018-08-03 17:29:38 +02:00
2018-08-10 08:19:06 +02:00
import os
2018-08-03 17:29:38 +02:00
import pytest
2018-08-10 08:19:06 +02:00
import random
2019-05-23 01:28:18 +02:00
import re
2019-05-31 23:58:58 +02:00
import time
2018-08-03 17:29:38 +02:00
import unittest
2021-10-18 02:13:33 +02:00
import websocket
2018-08-03 17:29:38 +02:00
2022-02-24 16:48:16 +01:00
def test_connect_basic ( node_factory ) :
2018-08-03 17:29:38 +02:00
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = False )
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
# These should be in openingd.
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ]
assert l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' connected ' ]
assert len ( l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' channels ' ] ) == 0
assert len ( l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' channels ' ] ) == 0
2018-08-03 17:29:38 +02:00
# Reconnect should be a noop
ret = l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , port = l2 . port )
assert ret [ ' id ' ] == l2 . info [ ' id ' ]
2021-03-16 04:44:36 +01:00
assert ret [ ' address ' ] == { ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : l2 . port }
2018-08-03 17:29:38 +02:00
ret = l2 . rpc . connect ( l1 . info [ ' id ' ] , host = ' localhost ' , port = l1 . port )
assert ret [ ' id ' ] == l1 . info [ ' id ' ]
2021-03-16 04:44:36 +01:00
# FIXME: This gives a bogus address (since they connected to us): better to give none!
assert ' address ' in ret
2018-08-03 17:29:38 +02:00
# Should still only have one peer!
assert len ( l1 . rpc . listpeers ( ) ) == 1
assert len ( l2 . rpc . listpeers ( ) ) == 1
# Should get reasonable error if unknown addr for peer.
2019-12-04 19:45:18 +01:00
with pytest . raises ( RpcError , match = r ' Unable to connect, no address known ' ) :
2018-08-03 17:29:38 +02:00
l1 . rpc . connect ( ' 032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e ' )
# Should get reasonable error if connection refuse.
with pytest . raises ( RpcError , match = r ' Connection establishment: Connection refused ' ) :
l1 . rpc . connect ( ' 032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e ' , ' localhost ' , 1 )
# Should get reasonable error if wrong key for peer.
2018-10-18 02:52:43 +02:00
with pytest . raises ( RpcError , match = r ' Cryptographic handshake: peer closed connection \ (wrong key \ ? \ ) ' ) :
2018-08-03 17:29:38 +02:00
l1 . rpc . connect ( ' 032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e ' , ' localhost ' , l2 . port )
2022-06-01 12:14:56 +02:00
@pytest.mark.developer ( " needs DEVELOPER=1 for fast gossip and --dev-allow-localhost for local remote_addr " )
2022-02-24 15:17:17 +01:00
def test_remote_addr ( node_factory , bitcoind ) :
""" Check address discovery (BOLT1 #917) init remote_addr works as designed:
` node_announcement ` update must only be send out when :
- at least two peers
- we have a channel with
- report the same ` remote_addr `
2022-06-01 12:14:56 +02:00
We perform logic tests on L2 , setup :
l1 - - > [ l2 ] < - - l3
2022-02-24 15:17:17 +01:00
"""
# don't announce anything per se
2022-06-01 12:14:56 +02:00
opts = { ' may_reconnect ' : True ,
' dev-allow-localhost ' : None ,
' dev-no-reconnect ' : None }
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts )
# Disable announcing local autobind addresses with dev-allow-localhost.
# We need to have l2 opts 'bind-addr' to the (generated) value of 'addr'.
# So we stop, set 'bind-addr' option, delete 'addr' and restart first.
l2 . stop ( )
l2 . daemon . opts [ ' bind-addr ' ] = l2 . daemon . opts [ ' addr ' ]
del l2 . daemon . opts [ ' addr ' ]
l2 . start ( )
2022-02-24 15:17:17 +01:00
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
2022-06-01 13:18:55 +02:00
logmsg = l2 . daemon . wait_for_log ( " Peer says it sees our address as: 127.0.0.1:[0-9] {5} " )
# check 'listpeers' contains the 'remote_addr' as logged
assert logmsg . endswith ( l2 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' remote_addr ' ] )
2022-02-24 15:17:17 +01:00
# Fund first channel so initial node_announcement is send
2022-06-01 12:14:56 +02:00
# and also check no addresses have been announced yet
2022-02-24 15:17:17 +01:00
l1 . fundchannel ( l2 )
bitcoind . generate_block ( 5 )
l1 . daemon . wait_for_log ( f " Received node_announcement for node { l2 . info [ ' id ' ] } " )
2022-06-01 12:14:56 +02:00
assert ( len ( l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] [ ' addresses ' ] ) == 0 )
2022-02-24 15:17:17 +01:00
# when we restart l1 with a channel and reconnect, node_annoucement update
# must not yet be send as we need the same `remote_addr` confirmed from a
# another peer we have a channel with.
# Note: In this state l2 stores remote_addr as reported by l1
assert not l2 . daemon . is_in_log ( " Update our node_announcement for discovered address: 127.0.0.1:9735 " )
l1 . restart ( )
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
l2 . daemon . wait_for_log ( " Peer says it sees our address as: 127.0.0.1:[0-9] {5} " )
2022-06-01 12:14:56 +02:00
# Now l1 sees l2 but without announced addresses.
2022-02-24 15:17:17 +01:00
assert ( len ( l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] [ ' addresses ' ] ) == 0 )
assert not l2 . daemon . is_in_log ( " Update our node_announcement for discovered address: 127.0.0.1:9735 " )
# connect second node. This will not yet trigger `node_annoucement` update,
# as we again do not have a channel at the time we connected.
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l2 . daemon . wait_for_log ( " Peer says it sees our address as: 127.0.0.1:[0-9] {5} " )
# fund channel and check we didn't send Update earlier already
l2 . fundchannel ( l3 , wait_for_active = True )
bitcoind . generate_block ( 5 )
assert not l2 . daemon . is_in_log ( " Update our node_announcement for discovered address: 127.0.0.1:9735 " )
# restart, reconnect and re-check for updated node_annoucement. This time
# l2 sees that two different peers with channel reported the same `remote_addr`.
l3 . restart ( )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l2 . daemon . wait_for_log ( " Peer says it sees our address as: 127.0.0.1:[0-9] {5} " )
l2 . daemon . wait_for_log ( " Update our node_announcement for discovered address: 127.0.0.1:9735 " )
l1 . daemon . wait_for_log ( f " Received node_announcement for node { l2 . info [ ' id ' ] } " )
address = l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] [ ' addresses ' ] [ 0 ]
assert address [ ' type ' ] == " ipv4 "
assert address [ ' address ' ] == " 127.0.0.1 "
assert address [ ' port ' ] == 9735
2022-06-01 12:14:56 +02:00
@pytest.mark.developer ( " needs DEVELOPER=1 for fast gossip and --dev-allow-localhost for local remote_addr " )
2022-03-30 23:21:45 +02:00
def test_remote_addr_disabled ( node_factory , bitcoind ) :
""" Simply tests that IP address discovery annoucements can be turned off
2022-06-01 12:14:56 +02:00
We perform logic tests on L2 , setup :
l1 - - > [ l2 ] < - - l3
2022-03-30 23:21:45 +02:00
"""
2022-06-01 12:14:56 +02:00
opts = { ' dev-allow-localhost ' : None ,
' disable-ip-discovery ' : None ,
' may_reconnect ' : True ,
' dev-no-reconnect ' : None }
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts = [ opts , opts , opts ] )
2022-03-30 23:21:45 +02:00
# l1->l2
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
l2 . daemon . wait_for_log ( " Peer says it sees our address as: 127.0.0.1:[0-9] {5} " )
l1 . fundchannel ( l2 )
bitcoind . generate_block ( 5 )
l1 . daemon . wait_for_log ( f " Received node_announcement for node { l2 . info [ ' id ' ] } " )
# l2->l3
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l2 . daemon . wait_for_log ( " Peer says it sees our address as: 127.0.0.1:[0-9] {5} " )
l2 . fundchannel ( l3 )
bitcoind . generate_block ( 5 )
# restart both and wait for channels to be ready
l1 . restart ( )
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
l2 . daemon . wait_for_log ( " Already have funding locked in " )
l3 . restart ( )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l2 . daemon . wait_for_log ( " Already have funding locked in " )
# if ip discovery would have been enabled, we would have send an updated
# node_annoucement by now. Check we didn't...
assert not l2 . daemon . is_in_log ( " Update our node_announcement for discovered address " )
2018-08-03 17:29:38 +02:00
def test_connect_standard_addr ( node_factory ) :
""" Test standard node@host:port address
"""
l1 , l2 , l3 = node_factory . get_nodes ( 3 )
# node@host
ret = l1 . rpc . connect ( " {} @ {} " . format ( l2 . info [ ' id ' ] , ' localhost ' ) , port = l2 . port )
assert ret [ ' id ' ] == l2 . info [ ' id ' ]
2021-03-16 04:44:36 +01:00
assert ret [ ' address ' ] == { ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : l2 . port }
2018-08-03 17:29:38 +02:00
# node@host:port
ret = l1 . rpc . connect ( " {} @localhost: {} " . format ( l3 . info [ ' id ' ] , l3 . port ) )
assert ret [ ' id ' ] == l3 . info [ ' id ' ]
# node@[ipv6]:port --- not supported by our CI
# ret = l1.rpc.connect("{}@[::1]:{}".format(l3.info['id'], l3.port))
# assert ret['id'] == l3.info['id']
def test_reconnect_channel_peers ( node_factory , executor ) :
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
l2 . restart ( )
# Should reconnect.
wait_for ( lambda : only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' connected ' ] )
wait_for ( lambda : only_one ( l2 . rpc . listpeers ( l1 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' connected ' ] )
# Connect command should succeed.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Stop l2 and wait for l1 to notice.
l2 . stop ( )
wait_for ( lambda : not only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' connected ' ] )
# Now should fail.
2021-10-09 18:27:54 +02:00
with pytest . raises ( RpcError , match = r ' (Connection refused|Bad file descriptor) ' ) :
2018-08-03 17:29:38 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Wait for exponential backoff to give us a 2 second window.
2018-08-09 02:25:29 +02:00
l1 . daemon . wait_for_log ( ' Will try reconnect in 2 seconds ' )
2018-08-03 17:29:38 +02:00
# It should now succeed when it restarts.
l2 . start ( )
# Multiples should be fine!
fut1 = executor . submit ( l1 . rpc . connect , l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
fut2 = executor . submit ( l1 . rpc . connect , l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
fut3 = executor . submit ( l1 . rpc . connect , l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
fut1 . result ( 10 )
fut2 . result ( 10 )
fut3 . result ( 10 )
2020-11-11 01:23:21 +01:00
def test_connection_moved ( node_factory , executor ) :
slow_start = os . path . join ( os . getcwd ( ) , ' tests/plugins/slow_start.py ' )
options = { ' may_reconnect ' : True , ' plugin ' : slow_start }
l1 , l2 = node_factory . get_nodes ( 2 , opts = options )
# Set up the plugin to wait for a connection
executor . submit ( l1 . rpc . waitconn )
log = l1 . daemon . wait_for_log ( ' listening for connections ' )
match = re . search ( r ' on port ( \ d*) ' , log )
assert match and len ( match . groups ( ) ) == 1
2022-04-01 06:13:33 +02:00
hang_port = int ( match . groups ( ) [ 0 ] )
2020-11-11 01:23:21 +01:00
# Attempt connection
fut_hang = executor . submit ( l1 . rpc . connect , l2 . info [ ' id ' ] ,
' localhost ' , hang_port )
l1 . daemon . wait_for_log ( ' connection from ' )
# Provide correct connection details
2021-03-16 04:44:36 +01:00
ret = l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
assert ret [ ' address ' ] == { ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : l2 . port }
2020-11-11 01:23:21 +01:00
# If we failed to update the connection, this call will error
fut_hang . result ( TIMEOUT )
2018-08-03 17:29:38 +02:00
def test_balance ( node_factory ) :
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = True )
p1 = only_one ( l1 . rpc . getpeer ( peer_id = l2 . info [ ' id ' ] , level = ' info ' ) [ ' channels ' ] )
p2 = only_one ( l2 . rpc . getpeer ( l1 . info [ ' id ' ] , ' info ' ) [ ' channels ' ] )
2022-06-20 12:22:09 +02:00
assert p1 [ ' to_us_msat ' ] == 10 * * 6 * 1000
assert p1 [ ' total_msat ' ] == 10 * * 6 * 1000
assert p2 [ ' to_us_msat ' ] == 0
assert p2 [ ' total_msat ' ] == 10 * * 6 * 1000
2018-08-03 17:29:38 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_bad_opening ( node_factory ) :
# l1 asks for a too-long locktime
l1 = node_factory . get_node ( options = { ' watchtime-blocks ' : 100 } )
l2 = node_factory . get_node ( options = { ' max-locktime-blocks ' : 99 } )
ret = l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
assert ret [ ' id ' ] == l2 . info [ ' id ' ]
2020-11-30 22:48:39 +01:00
l1 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
l2 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
2018-08-03 17:29:38 +02:00
l1 . fundwallet ( 10 * * 6 + 1000000 )
with pytest . raises ( RpcError ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 )
l2 . daemon . wait_for_log ( ' to_self_delay 100 larger than 99 ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " gossip without DEVELOPER=1 is slow " )
2019-09-18 21:55:41 +02:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , " Fee computation and limits are network specific " )
2020-08-07 05:14:59 +02:00
@pytest.mark.slow_test
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2019-04-03 10:20:39 +02:00
def test_opening_tiny_channel ( node_factory ) :
# Test custom min-capacity-sat parameters
#
2020-08-24 13:26:47 +02:00
# [l1]-----> [l2] (~6000) - technical minimal value that wont be rejected
2019-04-03 10:20:39 +02:00
# \
2020-08-24 13:26:47 +02:00
# o---> [l3] (10000) - the current default
2019-04-15 22:48:10 +02:00
# \
2020-08-24 13:26:47 +02:00
# o-> [l4] (20000) - a node with a higher minimal value
2019-04-03 10:20:39 +02:00
#
# For each:
2020-08-24 13:26:47 +02:00
# 1. Try to establish channel with capacity 1sat smaller than min_capacity_sat
# 2. Try to establish channel with capacity exact min_capacity_sat
2019-04-03 10:20:39 +02:00
#
# BOLT2
# The receiving node MAY fail the channel if:
# - funding_satoshis is too small
# - it considers `feerate_per_kw` too small for timely processing or unreasonably large.
#
dustlimit = 546
reserves = 2 * dustlimit
2020-08-14 03:30:39 +02:00
min_commit_tx_fees = basic_fee ( 7500 )
2020-08-24 13:26:47 +02:00
overhead = reserves + min_commit_tx_fees
2021-05-06 21:11:11 +02:00
if EXPERIMENTAL_FEATURES or EXPERIMENTAL_DUAL_FUND :
2020-08-24 13:27:11 +02:00
# Gotta fund those anchors too!
overhead + = 660
2020-08-24 13:26:47 +02:00
l2_min_capacity = 1 # just enough to get past capacity filter
l3_min_capacity = 10000 # the current default
l4_min_capacity = 20000 # a server with more than default minimum
2021-05-07 20:39:23 +02:00
opts = [ { ' min-capacity-sat ' : 0 } ,
{ ' min-capacity-sat ' : l2_min_capacity } ,
{ ' min-capacity-sat ' : l3_min_capacity } ,
{ ' min-capacity-sat ' : l4_min_capacity } ]
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 , opts = opts )
2019-04-03 10:20:39 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l1 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
2020-08-24 13:26:47 +02:00
2021-10-10 22:20:44 +02:00
with pytest . raises ( RpcError , match = r ' They sent [error|warning].*channel capacity is .*, which is below .*sat ' ) :
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , l2_min_capacity + overhead - 1 )
2021-05-07 20:39:23 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , l2_min_capacity + overhead )
2020-08-24 13:26:47 +02:00
2021-10-10 22:20:44 +02:00
with pytest . raises ( RpcError , match = r ' They sent [error|warning].*channel capacity is .*, which is below .*sat ' ) :
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l3 , l3_min_capacity + overhead - 1 )
2021-05-07 20:39:23 +02:00
l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l3 , l3_min_capacity + overhead )
2020-08-24 13:26:47 +02:00
2021-10-10 22:20:44 +02:00
with pytest . raises ( RpcError , match = r ' They sent [error|warning].*channel capacity is .*, which is below .*sat ' ) :
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l4 , l4_min_capacity + overhead - 1 )
2021-05-07 20:39:23 +02:00
l1 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l4 , l4_min_capacity + overhead )
2020-08-24 13:26:47 +02:00
# Note that this check applies locally too, so you can't open it if
# you would reject it.
l3 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-10-10 22:20:44 +02:00
with pytest . raises ( RpcError , match = r " channel capacity is .*, which is below .*sat " ) :
2020-09-24 14:06:36 +02:00
l3 . fundchannel ( l2 , l3_min_capacity + overhead - 1 )
2021-05-07 20:39:23 +02:00
l3 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l3 . fundchannel ( l2 , l3_min_capacity + overhead )
2019-04-03 10:20:39 +02:00
2018-08-03 17:29:38 +02:00
def test_second_channel ( node_factory ) :
2020-08-07 05:14:55 +02:00
l1 , l2 , l3 = node_factory . get_nodes ( 3 )
2018-08-03 17:29:38 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
l1 . fundchannel ( l3 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
2021-05-26 03:19:05 +02:00
def test_channel_abandon ( node_factory , bitcoind ) :
""" Our open tx isn ' t mined, we doublespend it away """
l1 , l2 = node_factory . get_nodes ( 2 )
SATS = 10 * * 6
# Add some for fees
l1 . fundwallet ( SATS + 10000 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , SATS , feerate = ' 1875perkw ' )
opening_utxo = only_one ( [ o for o in l1 . rpc . listfunds ( ) [ ' outputs ' ] if o [ ' reserved ' ] ] )
2022-04-01 06:12:45 +02:00
psbt = l1 . rpc . utxopsbt ( 0 , " 253perkw " , 0 , [ opening_utxo [ ' txid ' ] + ' : ' + str ( opening_utxo [ ' output ' ] ) ] , reserve = 0 , reservedok = True ) [ ' psbt ' ]
2021-05-26 03:19:05 +02:00
2021-05-26 03:19:38 +02:00
# We expect a reservation for 2016 blocks; unreserve it.
reservations = only_one ( l1 . rpc . unreserveinputs ( psbt , reserve = 2015 ) [ ' reservations ' ] )
assert reservations [ ' reserved ' ]
assert reservations [ ' reserved_to_block ' ] == bitcoind . rpc . getblockchaininfo ( ) [ ' blocks ' ] + 1
assert only_one ( l1 . rpc . unreserveinputs ( psbt , reserve = 1 ) [ ' reservations ' ] ) [ ' reserved ' ] is False
2021-05-26 03:19:05 +02:00
# Now it's unreserved, we can doublespend it (as long as we exceed
# previous fee to RBF!).
withdraw = l1 . rpc . withdraw ( l1 . rpc . newaddr ( ) [ ' bech32 ' ] , " all " )
assert bitcoind . rpc . decoderawtransaction ( withdraw [ ' tx ' ] ) [ ' vout ' ] [ 0 ] [ ' value ' ] > SATS / 10 * * 8
bitcoind . generate_block ( 1 , wait_for_mempool = withdraw [ ' txid ' ] )
# FIXME: lightningd should notice channel will never now open!
print ( l1 . rpc . listpeers ( ) )
assert ( only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' state ' ]
== ' CHANNELD_AWAITING_LOCKIN ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_disconnect ( node_factory ) :
# These should all make us fail
disconnects = [ ' -WIRE_INIT ' ,
' +WIRE_INIT ' ]
l1 = node_factory . get_node ( disconnect = disconnects )
l2 = node_factory . get_node ( )
with pytest . raises ( RpcError ) :
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError ) :
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Should have 3 connect fails.
for d in disconnects :
2019-11-18 01:26:17 +01:00
l1 . daemon . wait_for_log ( ' {} -.*Failed connected out '
2018-08-03 17:29:38 +02:00
. format ( l2 . info [ ' id ' ] ) )
# Should still only have one peer!
assert len ( l1 . rpc . listpeers ( ) ) == 1
assert len ( l2 . rpc . listpeers ( ) ) == 1
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2019-09-09 18:11:24 +02:00
def test_disconnect_opener ( node_factory ) :
# Now error on opener side during channel open.
2018-08-03 17:29:38 +02:00
disconnects = [ ' -WIRE_OPEN_CHANNEL ' ,
' +WIRE_OPEN_CHANNEL ' ,
2021-12-28 00:27:09 +01:00
' -WIRE_FUNDING_CREATED ' ]
2020-12-17 01:09:25 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnects = [ ' -WIRE_OPEN_CHANNEL2 ' ,
' +WIRE_OPEN_CHANNEL2 ' ,
' -WIRE_TX_ADD_INPUT ' ,
' +WIRE_TX_ADD_INPUT ' ,
' -WIRE_TX_ADD_OUTPUT ' ,
' +WIRE_TX_ADD_OUTPUT ' ,
' -WIRE_TX_COMPLETE ' ,
' +WIRE_TX_COMPLETE ' ]
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( disconnect = disconnects )
2021-05-11 01:54:40 +02:00
l2 = node_factory . get_node ( may_reconnect = EXPERIMENTAL_DUAL_FUND )
2018-08-03 17:29:38 +02:00
l1 . fundwallet ( 2000000 )
for d in disconnects :
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError ) :
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) is None
# This one will succeed.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
# Should still only have one peer!
2022-03-22 09:54:13 +01:00
assert len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 1
assert len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 1
def test_remote_disconnect ( node_factory ) :
l1 , l2 = node_factory . get_nodes ( 2 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2022-03-22 21:27:29 +01:00
wait_for ( lambda : l2 . rpc . listpeers ( ) [ ' peers ' ] != [ ] )
2022-03-22 09:54:13 +01:00
l2 . rpc . disconnect ( l1 . info [ ' id ' ] )
# l1 should notice!
wait_for ( lambda : l1 . rpc . listpeers ( ) [ ' peers ' ] == [ ] )
2018-08-03 17:29:38 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_disconnect_fundee ( node_factory ) :
# Now error on fundee side during channel open.
disconnects = [ ' -WIRE_ACCEPT_CHANNEL ' ,
' +WIRE_ACCEPT_CHANNEL ' ]
2020-12-17 22:31:09 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnects = [ ' -WIRE_ACCEPT_CHANNEL2 ' ,
' +WIRE_ACCEPT_CHANNEL2 ' ,
' -WIRE_TX_COMPLETE ' ,
' +WIRE_TX_COMPLETE ' ]
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( disconnect = disconnects )
l1 . fundwallet ( 2000000 )
for d in disconnects :
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError ) :
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) is None
# This one will succeed.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
# Should still only have one peer!
assert len ( l1 . rpc . listpeers ( ) ) == 1
assert len ( l2 . rpc . listpeers ( ) ) == 1
2021-03-12 01:19:40 +01:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , ' elementsd doesnt yet support PSBT features we need ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-04-26 21:23:40 +02:00
@pytest.mark.openchannel ( ' v2 ' )
2020-12-17 22:31:09 +01:00
def test_disconnect_fundee_v2 ( node_factory ) :
# Now error on fundee side during channel open, with them funding
disconnects = [ ' -WIRE_ACCEPT_CHANNEL2 ' ,
' +WIRE_ACCEPT_CHANNEL2 ' ,
' -WIRE_TX_ADD_INPUT ' ,
' +WIRE_TX_ADD_INPUT ' ,
' -WIRE_TX_ADD_OUTPUT ' ,
' +WIRE_TX_ADD_OUTPUT ' ,
' -WIRE_TX_COMPLETE ' ,
' +WIRE_TX_COMPLETE ' ]
2021-04-26 21:23:40 +02:00
l1 = node_factory . get_node ( )
2020-12-17 22:31:09 +01:00
l2 = node_factory . get_node ( disconnect = disconnects ,
2021-04-26 18:59:52 +02:00
options = { ' funder-policy ' : ' match ' ,
' funder-policy-mod ' : 100 ,
2021-08-05 19:31:49 +02:00
' funder-fuzz-percent ' : 0 ,
' funder-lease-requests-only ' : False } )
2020-12-17 22:31:09 +01:00
l1 . fundwallet ( 2000000 )
l2 . fundwallet ( 2000000 )
for d in disconnects :
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) is None
# This one will succeed.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
# Should still only have one peer!
assert len ( l1 . rpc . listpeers ( ) ) == 1
assert len ( l2 . rpc . listpeers ( ) ) == 1
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_disconnect_half_signed ( node_factory ) :
# Now, these are the corner cases. Fundee sends funding_signed,
2019-09-09 18:11:24 +02:00
# but opener doesn't receive it.
2021-12-28 00:27:09 +01:00
disconnects = [ ' -WIRE_FUNDING_SIGNED ' ]
2020-12-18 17:38:12 +01:00
if EXPERIMENTAL_DUAL_FUND :
2021-12-28 00:27:09 +01:00
disconnects = [ ' -WIRE_COMMITMENT_SIGNED ' ]
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( disconnect = disconnects )
l1 . fundwallet ( 2000000 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError ) :
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
2019-09-09 18:11:24 +02:00
# Peer remembers, opener doesn't.
2018-08-03 17:29:38 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) is None
assert l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' id ' ] == l1 . info [ ' id ' ]
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_reconnect_signed ( node_factory ) :
# This will fail *after* both sides consider channel opening.
disconnects = [ ' +WIRE_FUNDING_SIGNED ' ]
2020-12-18 17:38:12 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnects = [ ' +WIRE_COMMITMENT_SIGNED ' ]
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( disconnect = disconnects ,
may_reconnect = True )
l1 . fundwallet ( 2000000 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
# They haven't forgotten each other.
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' id ' ] == l2 . info [ ' id ' ]
assert l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' id ' ] == l1 . info [ ' id ' ]
# Technically, this is async to fundchannel (and could reconnect first)
2020-12-18 17:38:12 +01:00
if EXPERIMENTAL_DUAL_FUND :
l1 . daemon . wait_for_logs ( [ ' sendrawtx exit 0 ' ,
' Peer has reconnected, state DUALOPEND_OPEN_INIT ' ] )
else :
l1 . daemon . wait_for_logs ( [ ' sendrawtx exit 0 ' ,
' Peer has reconnected, state CHANNELD_AWAITING_LOCKIN ' ] )
2018-08-03 17:29:38 +02:00
l1 . bitcoin . generate_block ( 6 )
l1 . daemon . wait_for_log ( ' to CHANNELD_NORMAL ' )
l2 . daemon . wait_for_log ( ' to CHANNELD_NORMAL ' )
2021-12-28 00:26:09 +01:00
@pytest.mark.skip ( ' needs blackhold support ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_reconnect_openingd ( node_factory ) :
2019-09-09 18:11:24 +02:00
# Openingd thinks we're still opening; opener reconnects..
2018-08-03 17:29:38 +02:00
disconnects = [ ' 0WIRE_ACCEPT_CHANNEL ' ]
2020-12-18 17:31:58 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnects = [ ' 0WIRE_ACCEPT_CHANNEL2 ' ]
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( disconnect = disconnects ,
may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 2000000 )
# l2 closes on l1, l1 forgets.
with pytest . raises ( RpcError ) :
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) is None
# Reconnect.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2018-08-09 02:25:29 +02:00
# We should get a message about reconnecting.
2021-05-11 18:58:00 +02:00
l2 . daemon . wait_for_log ( ' Killing opening daemon: Reconnected ' )
2020-11-30 22:48:39 +01:00
l2 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
2018-08-03 17:29:38 +02:00
# Should work fine.
2020-08-25 06:46:35 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 25000 )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' sendrawtx exit 0 ' )
2019-05-22 21:36:11 +02:00
l1 . bitcoin . generate_block ( 3 )
# Just to be sure, second openingd hand over to channeld. This log line is about channeld being started
2019-11-18 01:27:18 +01:00
l2 . daemon . wait_for_log ( r ' channeld-chan#[0-9]: pid [0-9]+, msgfd [0-9]+ ' )
2018-08-03 17:29:38 +02:00
2021-12-28 00:26:09 +01:00
@pytest.mark.skip ( ' needs blackhold support ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2018-08-10 08:18:33 +02:00
def test_reconnect_gossiping ( node_factory ) :
# connectd thinks we're still gossiping; peer reconnects.
2021-10-07 14:54:18 +02:00
disconnects = [ ' 0INVALID 33333 ' ]
2018-08-10 08:18:33 +02:00
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( disconnect = disconnects ,
may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2019-05-22 14:41:35 +02:00
# Make sure l2 knows about l1
wait_for ( lambda : l2 . rpc . listpeers ( l1 . info [ ' id ' ] ) [ ' peers ' ] != [ ] )
2018-08-10 08:18:33 +02:00
2021-10-07 14:54:18 +02:00
l2 . rpc . sendcustommsg ( l1 . info [ ' id ' ] , bytes ( [ 0x82 , 0x35 ] ) . hex ( ) )
2018-09-28 05:24:14 +02:00
wait_for ( lambda : l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] == [ ] )
2018-08-10 08:18:33 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l2 . daemon . wait_for_log ( ' processing now old peer gone ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev-disconnect " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2020-12-17 10:32:59 +01:00
def test_reconnect_no_update ( node_factory , executor , bitcoind ) :
""" Test that funding_locked is retransmitted on reconnect if new channel
2019-09-08 17:26:44 +02:00
This tests if the ` funding_locked ` is sent if we receive a
2020-12-17 10:32:59 +01:00
` channel_reestablish ` message with ` next_commitment_number ` == 1
and our ` next_commitment_number ` == 1.
This test makes extensive use of disconnects followed by automatic
reconnects . See comments for details .
2019-09-08 17:26:44 +02:00
"""
2021-12-28 00:27:09 +01:00
disconnects = [ " -WIRE_FUNDING_LOCKED " , " -WIRE_SHUTDOWN " ]
2019-09-08 17:26:44 +02:00
# Allow bad gossip because it might receive WIRE_CHANNEL_UPDATE before
2021-05-11 22:50:25 +02:00
# announcement of the disconnection
2019-09-08 17:26:44 +02:00
l1 = node_factory . get_node ( may_reconnect = True , allow_bad_gossip = True )
l2 = node_factory . get_node ( disconnect = disconnects , may_reconnect = True )
# For channeld reconnection
l1 . rpc . connect ( l2 . info [ " id " ] , " localhost " , l2 . port )
2020-12-17 10:32:59 +01:00
# LightningNode.fundchannel will fund the channel and generate a
# block. The block triggers the funding_locked message, which
# causes a disconnect. The retransmission is then caused by the
# automatic retry.
2020-09-24 14:06:36 +02:00
fundchannel_exec = executor . submit ( l1 . fundchannel , l2 , 10 * * 6 , False )
2020-12-11 20:28:01 +01:00
if l1 . config ( ' experimental-dual-fund ' ) :
l1 . daemon . wait_for_log ( r " dualopend.* Retransmitting funding_locked for channel " )
else :
l1 . daemon . wait_for_log ( r " channeld.* Retransmitting funding_locked for channel " )
2020-12-17 10:32:59 +01:00
sync_blockheight ( bitcoind , [ l1 , l2 ] )
fundchannel_exec . result ( )
2019-09-08 17:26:44 +02:00
l1 . stop ( )
# For closingd reconnection
l1 . daemon . start ( )
2021-12-28 00:27:09 +01:00
# Close will trigger the -WIRE_SHUTDOWN and we then wait for the
2020-12-17 10:32:59 +01:00
# automatic reconnection to trigger the retransmission.
l1 . rpc . close ( l2 . info [ ' id ' ] , 0 )
2021-06-14 23:09:49 +02:00
l2 . daemon . wait_for_log ( r " channeld.* Retransmitting funding_locked for channel " )
2019-09-08 17:26:44 +02:00
l1 . daemon . wait_for_log ( r " CLOSINGD_COMPLETE " )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_reconnect_normal ( node_factory ) :
# Should reconnect fine even if locked message gets lost.
disconnects = [ ' -WIRE_FUNDING_LOCKED ' ,
' +WIRE_FUNDING_LOCKED ' ]
l1 = node_factory . get_node ( disconnect = disconnects ,
may_reconnect = True )
l2 = node_factory . get_node ( may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_reconnect_sender_add1 ( node_factory ) :
# Fail after add is OK, will cause payment failure though.
2022-01-11 02:17:12 +01:00
# Make sure it doesn't send commit before it sees disconnect though.
2021-12-29 04:26:40 +01:00
disconnects = [ ' -WIRE_UPDATE_ADD_HTLC ' ,
' +WIRE_UPDATE_ADD_HTLC ' ]
2018-08-03 17:29:38 +02:00
2018-08-23 01:27:17 +02:00
# Feerates identical so we don't get gratuitous commit to update them
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( disconnect = disconnects ,
2018-08-23 01:27:17 +02:00
may_reconnect = True ,
2022-01-11 02:17:12 +01:00
options = { ' commit-time ' : 2000 } ,
2020-03-10 19:31:24 +01:00
feerates = ( 7500 , 7500 , 7500 , 7500 ) )
2018-08-03 17:29:38 +02:00
l2 = node_factory . get_node ( may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
amt = 200000000
2021-07-12 08:49:19 +02:00
inv = l2 . rpc . invoice ( amt , ' test_reconnect_sender_add1 ' , ' desc ' )
rhash = inv [ ' payment_hash ' ]
2018-08-03 17:29:38 +02:00
assert only_one ( l2 . rpc . listinvoices ( ' test_reconnect_sender_add1 ' ) [ ' invoices ' ] ) [ ' status ' ] == ' unpaid '
2022-06-20 12:22:09 +02:00
route = [ { ' amount_msat ' : amt , ' id ' : l2 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : ' 1x1x1 ' } ]
2018-08-03 17:29:38 +02:00
for i in range ( 0 , len ( disconnects ) ) :
with pytest . raises ( RpcError ) :
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( route , rhash , payment_secret = inv [ ' payment_secret ' ] )
2018-08-03 17:29:38 +02:00
l1 . rpc . waitsendpay ( rhash )
# Wait for reconnection.
l1 . daemon . wait_for_log ( ' Already have funding locked in ' )
# This will send commit, so will reconnect as required.
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( route , rhash , payment_secret = inv [ ' payment_secret ' ] )
2018-08-03 17:29:38 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_reconnect_sender_add ( node_factory ) :
disconnects = [ ' -WIRE_COMMITMENT_SIGNED ' ,
' +WIRE_COMMITMENT_SIGNED ' ,
' -WIRE_REVOKE_AND_ACK ' ,
' +WIRE_REVOKE_AND_ACK ' ]
2020-12-18 17:30:15 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnects = [ ' =WIRE_COMMITMENT_SIGNED ' ] + disconnects
2018-08-23 01:27:17 +02:00
# Feerates identical so we don't get gratuitous commit to update them
2018-08-03 17:29:38 +02:00
l1 = node_factory . get_node ( disconnect = disconnects ,
2018-08-23 01:27:17 +02:00
may_reconnect = True ,
2020-03-10 19:31:24 +01:00
feerates = ( 7500 , 7500 , 7500 , 7500 ) )
2018-08-03 17:29:38 +02:00
l2 = node_factory . get_node ( may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
amt = 200000000
2021-07-12 08:49:19 +02:00
inv = l2 . rpc . invoice ( amt , ' testpayment ' , ' desc ' )
rhash = inv [ ' payment_hash ' ]
2018-08-03 17:29:38 +02:00
assert only_one ( l2 . rpc . listinvoices ( ' testpayment ' ) [ ' invoices ' ] ) [ ' status ' ] == ' unpaid '
2022-06-20 12:22:09 +02:00
route = [ { ' amount_msat ' : amt , ' id ' : l2 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : ' 1x1x1 ' } ]
2018-08-03 17:29:38 +02:00
# This will send commit, so will reconnect as required.
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( route , rhash , payment_secret = inv [ ' payment_secret ' ] )
2018-08-03 17:29:38 +02:00
# Should have printed this for every reconnect.
for i in range ( 0 , len ( disconnects ) ) :
l1 . daemon . wait_for_log ( ' Already have funding locked in ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_reconnect_receiver_add ( node_factory ) :
disconnects = [ ' -WIRE_COMMITMENT_SIGNED ' ,
' +WIRE_COMMITMENT_SIGNED ' ,
' -WIRE_REVOKE_AND_ACK ' ,
' +WIRE_REVOKE_AND_ACK ' ]
2020-12-18 17:26:49 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnects = [ ' =WIRE_COMMITMENT_SIGNED ' ] + disconnects
2018-08-23 01:27:17 +02:00
# Feerates identical so we don't get gratuitous commit to update them
2020-03-10 19:31:24 +01:00
l1 = node_factory . get_node ( may_reconnect = True , feerates = ( 7500 , 7500 , 7500 , 7500 ) )
2018-08-03 17:29:38 +02:00
l2 = node_factory . get_node ( disconnect = disconnects ,
may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
amt = 200000000
2021-07-12 08:49:19 +02:00
inv = l2 . rpc . invoice ( amt , ' testpayment2 ' , ' desc ' )
rhash = inv [ ' payment_hash ' ]
2018-08-03 17:29:38 +02:00
assert only_one ( l2 . rpc . listinvoices ( ' testpayment2 ' ) [ ' invoices ' ] ) [ ' status ' ] == ' unpaid '
2022-06-20 12:22:09 +02:00
route = [ { ' amount_msat ' : amt , ' id ' : l2 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : ' 1x1x1 ' } ]
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( route , rhash , payment_secret = inv [ ' payment_secret ' ] )
2018-08-03 17:29:38 +02:00
for i in range ( len ( disconnects ) ) :
l1 . daemon . wait_for_log ( ' Already have funding locked in ' )
assert only_one ( l2 . rpc . listinvoices ( ' testpayment2 ' ) [ ' invoices ' ] ) [ ' status ' ] == ' paid '
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2018-08-03 17:29:38 +02:00
def test_reconnect_receiver_fulfill ( node_factory ) :
# Ordering matters: after +WIRE_UPDATE_FULFILL_HTLC, channeld
# will continue and try to send WIRE_COMMITMENT_SIGNED: if
# that's the next failure, it will do two in one run.
2021-12-28 00:27:09 +01:00
disconnects = [ ' +WIRE_UPDATE_FULFILL_HTLC ' ,
2018-08-03 17:29:38 +02:00
' -WIRE_UPDATE_FULFILL_HTLC ' ,
' -WIRE_COMMITMENT_SIGNED ' ,
' +WIRE_COMMITMENT_SIGNED ' ,
' -WIRE_REVOKE_AND_ACK ' ,
' +WIRE_REVOKE_AND_ACK ' ]
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( disconnect = disconnects ,
may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
amt = 200000000
2021-07-12 08:49:19 +02:00
inv = l2 . rpc . invoice ( amt , ' testpayment2 ' , ' desc ' )
rhash = inv [ ' payment_hash ' ]
2018-08-03 17:29:38 +02:00
assert only_one ( l2 . rpc . listinvoices ( ' testpayment2 ' ) [ ' invoices ' ] ) [ ' status ' ] == ' unpaid '
2022-06-20 12:22:09 +02:00
route = [ { ' amount_msat ' : amt , ' id ' : l2 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : ' 1x1x1 ' } ]
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( route , rhash , payment_secret = inv [ ' payment_secret ' ] )
2018-08-03 17:29:38 +02:00
for i in range ( len ( disconnects ) ) :
l1 . daemon . wait_for_log ( ' Already have funding locked in ' )
assert only_one ( l2 . rpc . listinvoices ( ' testpayment2 ' ) [ ' invoices ' ] ) [ ' status ' ] == ' paid '
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_shutdown_reconnect ( node_factory ) :
disconnects = [ ' -WIRE_SHUTDOWN ' ,
' +WIRE_SHUTDOWN ' ]
l1 = node_factory . get_node ( disconnect = disconnects ,
2019-08-08 08:48:44 +02:00
may_reconnect = True )
2018-08-03 17:29:38 +02:00
l2 = node_factory . get_node ( may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-10-15 20:10:31 +02:00
chan , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
l1 . pay ( l2 , 200000000 )
assert l1 . bitcoin . rpc . getmempoolinfo ( ) [ ' size ' ] == 0
2019-08-08 08:48:44 +02:00
# This should wait until we're closed.
l1 . rpc . close ( chan )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' to CHANNELD_SHUTTING_DOWN ' )
l2 . daemon . wait_for_log ( ' to CHANNELD_SHUTTING_DOWN ' )
l1 . daemon . wait_for_log ( ' to CLOSINGD_SIGEXCHANGE ' )
l2 . daemon . wait_for_log ( ' to CLOSINGD_SIGEXCHANGE ' )
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1 . daemon . wait_for_logs ( [ ' sendrawtx exit 0 ' , ' to CLOSINGD_COMPLETE ' ] )
l2 . daemon . wait_for_logs ( [ ' sendrawtx exit 0 ' , ' to CLOSINGD_COMPLETE ' ] )
assert l1 . bitcoin . rpc . getmempoolinfo ( ) [ ' size ' ] == 1
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2019-06-03 14:17:06 +02:00
def test_reconnect_remote_sends_no_sigs ( node_factory ) :
""" We re-announce, even when remote node doesn ' t send its announcement_signatures on reconnect.
"""
l1 , l2 = node_factory . line_graph ( 2 , wait_for_announce = True , opts = { ' may_reconnect ' : True } )
# When l1 restarts (with rescan=1), make it think it hasn't
# reached announce_depth, so it wont re-send announcement_signatures
def no_blocks_above ( req ) :
if req [ ' params ' ] [ 0 ] > 107 :
return { " result " : None ,
" error " : { " code " : - 8 , " message " : " Block height out of range " } , " id " : req [ ' id ' ] }
else :
return { ' result ' : l1 . bitcoin . rpc . getblockhash ( req [ ' params ' ] [ 0 ] ) ,
" error " : None , ' id ' : req [ ' id ' ] }
l1 . daemon . rpcproxy . mock_rpc ( ' getblockhash ' , no_blocks_above )
l1 . restart ( )
# l2 will now uses (REMOTE's) announcement_signatures it has stored
wait_for ( lambda : only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ) [ ' status ' ] == [
' CHANNELD_NORMAL:Reconnected, and reestablished. ' ,
' CHANNELD_NORMAL:Funding transaction locked. Channel announced. ' ] )
# But l2 still sends its own sigs on reconnect
l2 . daemon . wait_for_logs ( [ r ' peer_out WIRE_ANNOUNCEMENT_SIGNATURES ' ,
r ' peer_out WIRE_ANNOUNCEMENT_SIGNATURES ' ] )
# l1 only did send them the first time
assert ( ' ' . join ( l1 . daemon . logs ) . count ( r ' peer_out WIRE_ANNOUNCEMENT_SIGNATURES ' ) == 1 )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_shutdown_awaiting_lockin ( node_factory , bitcoind ) :
2019-08-08 08:48:44 +02:00
l1 = node_factory . get_node ( )
2018-08-03 17:29:38 +02:00
l2 = node_factory . get_node ( options = { ' funding-confirms ' : 3 } )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 10 * * 6 + 1000000 )
chanid = l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 ) [ ' channel_id ' ]
# Technically, this is async to fundchannel.
l1 . daemon . wait_for_log ( ' sendrawtx exit 0 ' )
bitcoind . generate_block ( 1 )
2019-08-08 08:48:44 +02:00
l1 . rpc . close ( chanid )
2018-08-03 17:29:38 +02:00
2020-12-11 23:22:59 +01:00
l1_state = ' DUALOPEND ' if l1 . config ( ' experimental-dual-fund ' ) else ' CHANNELD '
l2_state = ' DUALOPEND ' if l1 . config ( ' experimental-dual-fund ' ) else ' CHANNELD '
l1 . daemon . wait_for_log ( ' {} _AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN ' . format ( l1_state ) )
l2 . daemon . wait_for_log ( ' {} _AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN ' . format ( l2_state ) )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' CHANNELD_SHUTTING_DOWN to CLOSINGD_SIGEXCHANGE ' )
l2 . daemon . wait_for_log ( ' CHANNELD_SHUTTING_DOWN to CLOSINGD_SIGEXCHANGE ' )
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1 . daemon . wait_for_logs ( [ ' sendrawtx exit 0 ' , ' to CLOSINGD_COMPLETE ' ] )
l2 . daemon . wait_for_logs ( [ ' sendrawtx exit 0 ' , ' to CLOSINGD_COMPLETE ' ] )
assert bitcoind . rpc . getmempoolinfo ( ) [ ' size ' ] == 1
bitcoind . generate_block ( 1 )
l1 . daemon . wait_for_log ( ' to ONCHAIN ' )
l2 . daemon . wait_for_log ( ' to ONCHAIN ' )
bitcoind . generate_block ( 100 )
wait_for ( lambda : l1 . rpc . listpeers ( ) [ ' peers ' ] == [ ] )
wait_for ( lambda : l2 . rpc . listpeers ( ) [ ' peers ' ] == [ ] )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_funding_change ( node_factory , bitcoind ) :
""" Add some funds, fund a channel, and make sure we remember the change
"""
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = False )
l1 . fundwallet ( 10000000 )
bitcoind . generate_block ( 1 )
sync_blockheight ( bitcoind , [ l1 ] )
outputs = l1 . db_query ( ' SELECT value FROM outputs WHERE status=0; ' )
assert only_one ( outputs ) [ ' value ' ] == 10000000
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 1000000 )
2020-08-28 04:40:57 +02:00
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
sync_blockheight ( bitcoind , [ l1 ] )
2018-08-03 17:29:38 +02:00
outputs = { r [ ' status ' ] : r [ ' value ' ] for r in l1 . db_query (
' SELECT status, SUM(value) AS value FROM outputs GROUP BY status; ' ) }
# The 10m out is spent and we have a change output of 9m-fee
assert outputs [ 0 ] > 8990000
assert outputs [ 2 ] == 10000000
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_funding_all ( node_factory , bitcoind ) :
""" Add some funds, fund a channel using all funds, make sure no funds remain
"""
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = False )
l1 . fundwallet ( 0.1 * 10 * * 8 )
bitcoind . generate_block ( 1 )
sync_blockheight ( bitcoind , [ l1 ] )
outputs = l1 . db_query ( ' SELECT value FROM outputs WHERE status=0; ' )
assert only_one ( outputs ) [ ' value ' ] == 10000000
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , " all " )
outputs = l1 . db_query ( ' SELECT value FROM outputs WHERE status=0; ' )
assert len ( outputs ) == 0
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_funding_all_too_much ( node_factory ) :
""" Add more than max possible funds, fund a channel using all funds we can.
"""
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = False )
2020-08-28 04:40:57 +02:00
addr , txid = l1 . fundwallet ( 2 * * 24 + 10000 )
2018-08-03 17:29:38 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , " all " )
2020-08-28 04:40:57 +02:00
# One reserved, confirmed output spent above, and one change.
outputs = l1 . rpc . listfunds ( ) [ ' outputs ' ]
spent = only_one ( [ o for o in outputs if o [ ' status ' ] == ' confirmed ' ] )
assert spent [ ' txid ' ] == txid
assert spent [ ' address ' ] == addr
assert spent [ ' reserved ' ] is True
pending = only_one ( [ o for o in outputs if o [ ' status ' ] != ' confirmed ' ] )
assert pending [ ' status ' ] == ' unconfirmed '
assert pending [ ' reserved ' ] is False
2022-06-20 12:22:09 +02:00
assert only_one ( l1 . rpc . listfunds ( ) [ ' channels ' ] ) [ ' amount_msat ' ] == Millisatoshi ( str ( 2 * * 24 - 1 ) + " sat " )
2018-08-03 17:29:38 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_funding_fail ( node_factory , bitcoind ) :
""" Add some funds, fund a channel without enough funds """
# Previous runs with same bitcoind can leave funds!
max_locktime = 5 * 6 * 24
l1 = node_factory . get_node ( random_hsm = True , options = { ' max-locktime-blocks ' : max_locktime } )
l2 = node_factory . get_node ( options = { ' watchtime-blocks ' : max_locktime + 1 } )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
funds = 1000000
2019-03-04 04:13:49 +01:00
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
2018-08-03 17:29:38 +02:00
l1 . bitcoin . rpc . sendtoaddress ( addr , funds / 10 * * 8 )
bitcoind . generate_block ( 1 )
# Wait for it to arrive.
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ ' outputs ' ] ) > 0 )
# Fail because l1 dislikes l2's huge locktime.
with pytest . raises ( RpcError , match = r ' to_self_delay \ d+ larger than \ d+ ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , int ( funds / 10 ) )
2022-03-22 21:26:29 +01:00
# channels disconnect on failure
2022-03-23 00:01:36 +01:00
wait_for ( lambda : len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
2018-08-03 17:29:38 +02:00
# Restart l2 without ridiculous locktime.
del l2 . daemon . opts [ ' watchtime-blocks ' ]
l2 . restart ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# We don't have enough left to cover fees if we try to spend it all.
2020-08-21 11:58:20 +02:00
with pytest . raises ( RpcError , match = r ' not afford ' ) :
2018-08-03 17:29:38 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , funds )
2021-05-07 20:39:23 +02:00
# Should still be connected (we didn't contact the peer)
2018-08-03 17:29:38 +02:00
assert only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' connected ' ]
2020-11-30 22:48:39 +01:00
l2 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
2018-08-03 17:29:38 +02:00
assert only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' connected ' ]
# This works.
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , int ( funds / 10 ) )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_funding_toolarge ( node_factory , bitcoind ) :
""" Try to create a giant channel """
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Send funds.
amount = 2 * * 24
2019-03-04 04:13:49 +01:00
bitcoind . rpc . sendtoaddress ( l1 . rpc . newaddr ( ) [ ' bech32 ' ] , amount / 10 * * 8 + 0.01 )
2018-08-03 17:29:38 +02:00
bitcoind . generate_block ( 1 )
# Wait for it to arrive.
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ ' outputs ' ] ) > 0 )
# Fail to open (too large)
with pytest . raises ( RpcError , match = r ' Amount exceeded 16777215 ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , amount )
# This should work.
amount = amount - 1
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , amount )
2020-09-18 01:31:39 +02:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , ' elementsd doesnt yet support PSBT features we need ' )
2021-04-26 21:23:40 +02:00
@pytest.mark.openchannel ( ' v2 ' )
2020-09-16 03:53:15 +02:00
def test_v2_open ( node_factory , bitcoind , chainparams ) :
2021-04-26 21:23:40 +02:00
l1 , l2 = node_factory . get_nodes ( 2 )
2020-09-16 03:53:15 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
amount = 2 * * 24
bitcoind . rpc . sendtoaddress ( l1 . rpc . newaddr ( ) [ ' bech32 ' ] , amount / 10 * * 8 + 0.01 )
bitcoind . generate_block ( 1 )
# Wait for it to arrive.
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ ' outputs ' ] ) > 0 )
2021-02-04 20:15:56 +01:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , ' all ' )
2020-09-16 03:53:15 +02:00
bitcoind . generate_block ( 1 )
sync_blockheight ( bitcoind , [ l1 ] )
l1 . daemon . wait_for_log ( ' to CHANNELD_NORMAL ' )
2021-02-04 20:15:56 +01:00
# Send a payment over the channel
p = l2 . rpc . invoice ( 100000 , ' testpayment ' , ' desc ' )
l1 . rpc . pay ( p [ ' bolt11 ' ] )
result = l1 . rpc . waitsendpay ( p [ ' payment_hash ' ] )
assert ( result [ ' status ' ] == ' complete ' )
2020-09-16 03:53:15 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2020-06-23 03:46:35 +02:00
def test_funding_push ( node_factory , bitcoind , chainparams ) :
2019-12-21 06:53:12 +01:00
""" Try to push peer some sats """
2020-04-02 05:08:22 +02:00
# We track balances, to verify that accounting is ok.
coin_mvt_plugin = os . path . join ( os . getcwd ( ) , ' tests/plugins/coin_movements.py ' )
l1 = node_factory . get_node ( options = { ' plugin ' : coin_mvt_plugin } )
2021-12-08 18:42:07 +01:00
l2 = node_factory . get_node ( options = { ' plugin ' : coin_mvt_plugin } )
2019-12-21 06:53:12 +01:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Send funds.
amount = 2 * * 24
2022-06-20 12:22:09 +02:00
push_msat = 20000 * 1000
2019-12-21 06:53:12 +01:00
bitcoind . rpc . sendtoaddress ( l1 . rpc . newaddr ( ) [ ' bech32 ' ] , amount / 10 * * 8 + 0.01 )
bitcoind . generate_block ( 1 )
# Wait for it to arrive.
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ ' outputs ' ] ) > 0 )
# Fail to open (try to push too much)
with pytest . raises ( RpcError , match = r ' Requested to push_msat of 20000000msat is greater than available funding amount 10000sat ' ) :
2022-06-20 12:22:09 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10000 , push_msat = push_msat )
2019-12-21 06:53:12 +01:00
# This should work.
amount = amount - 1
2022-06-20 12:22:09 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , amount , push_msat = push_msat )
2020-04-02 05:08:22 +02:00
2019-12-21 06:53:12 +01:00
bitcoind . generate_block ( 1 )
sync_blockheight ( bitcoind , [ l1 ] )
funds = only_one ( l1 . rpc . listfunds ( ) [ ' channels ' ] )
2022-06-20 12:22:09 +02:00
assert funds [ ' our_amount_msat ' ] + push_msat == funds [ ' amount_msat ' ]
2019-12-21 06:53:12 +01:00
2020-04-02 05:08:22 +02:00
chanid = first_channel_id ( l2 , l1 )
2021-12-08 18:42:07 +01:00
channel_mvts_1 = [
2022-06-19 09:19:11 +02:00
{ ' type ' : ' chain_mvt ' , ' credit_msat ' : 16777215000 , ' debit_msat ' : 0 , ' tags ' : [ ' channel_open ' , ' opener ' ] } ,
{ ' type ' : ' channel_mvt ' , ' credit_msat ' : 0 , ' debit_msat ' : 20000000 , ' tags ' : [ ' pushed ' ] , ' fees_msat ' : ' 0msat ' } ,
2021-12-08 18:42:07 +01:00
]
channel_mvts_2 = [
2022-06-19 09:19:11 +02:00
{ ' type ' : ' chain_mvt ' , ' credit_msat ' : 0 , ' debit_msat ' : 0 , ' tags ' : [ ' channel_open ' ] } ,
{ ' type ' : ' channel_mvt ' , ' credit_msat ' : 20000000 , ' debit_msat ' : 0 , ' tags ' : [ ' pushed ' ] , ' fees_msat ' : ' 0msat ' } ,
2020-04-02 05:08:22 +02:00
]
2021-12-08 18:42:07 +01:00
check_coin_moves ( l1 , chanid , channel_mvts_1 , chainparams )
check_coin_moves ( l2 , chanid , channel_mvts_2 , chainparams )
2022-06-20 12:22:09 +02:00
assert account_balance ( l1 , chanid ) == amount * 1000 - push_msat
2020-04-02 05:08:22 +02:00
2019-12-21 06:53:12 +01:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2021-05-11 17:28:38 +02:00
@pytest.mark.developer
2019-06-08 15:57:24 +02:00
def test_funding_by_utxos ( node_factory , bitcoind ) :
""" Fund a channel with specific utxos """
l1 , l2 , l3 = node_factory . line_graph ( 3 , fundchannel = False )
# Get 3 differents utxo
l1 . fundwallet ( 0.01 * 10 * * 8 )
l1 . fundwallet ( 0.01 * 10 * * 8 )
l1 . fundwallet ( 0.01 * 10 * * 8 )
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ " outputs " ] ) == 3 )
utxos = [ utxo [ " txid " ] + " : " + str ( utxo [ " output " ] ) for utxo in l1 . rpc . listfunds ( ) [ " outputs " ] ]
# Fund with utxos we don't own
2020-08-28 04:37:57 +02:00
with pytest . raises ( RpcError , match = r " Unknown UTXO " ) :
2019-06-08 15:57:24 +02:00
l3 . rpc . fundchannel ( l2 . info [ " id " ] , int ( 0.01 * 10 * * 8 ) , utxos = utxos )
# Fund with an empty array
with pytest . raises ( RpcError , match = r " Please specify an array of \\ ' txid:output_index \\ ' , not \" * \" " ) :
l1 . rpc . fundchannel ( l2 . info [ " id " ] , int ( 0.01 * 10 * * 8 ) , utxos = [ ] )
# Fund a channel from some of the utxos, without change
l1 . rpc . fundchannel ( l2 . info [ " id " ] , " all " , utxos = utxos [ 0 : 2 ] )
# Fund a channel from the rest of utxos, with change
l1 . rpc . connect ( l3 . info [ " id " ] , " localhost " , l3 . port )
l1 . rpc . fundchannel ( l3 . info [ " id " ] , int ( 0.007 * 10 * * 8 ) , utxos = [ utxos [ 2 ] ] )
2020-08-28 04:40:57 +02:00
# Fund another channel with already reserved utxos
with pytest . raises ( RpcError , match = r " UTXO.*already reserved " ) :
l1 . rpc . fundchannel ( l3 . info [ " id " ] , int ( 0.01 * 10 * * 8 ) , utxos = utxos )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
sync_blockheight ( bitcoind , [ l1 ] )
2019-06-08 15:57:24 +02:00
# Fund another channel with already spent utxos
2020-08-28 04:37:57 +02:00
with pytest . raises ( RpcError , match = r " Already spent UTXO " ) :
2019-06-08 15:57:24 +02:00
l1 . rpc . fundchannel ( l3 . info [ " id " ] , int ( 0.01 * 10 * * 8 ) , utxos = utxos )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev_forget_channel " )
2021-04-26 21:59:42 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2019-05-23 01:28:18 +02:00
def test_funding_external_wallet_corners ( node_factory , bitcoind ) :
2021-06-15 07:07:15 +02:00
l1 = node_factory . get_node ( may_reconnect = True )
2019-12-12 01:50:30 +01:00
l2 = node_factory . get_node ( may_reconnect = True )
2019-05-23 01:28:18 +02:00
amount = 2 * * 24
2019-09-03 21:06:38 +02:00
l1 . fundwallet ( amount + 10000000 )
2019-05-23 01:28:18 +02:00
amount = amount - 1
2021-03-15 05:27:59 +01:00
# make sure we can generate PSBTs.
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
bitcoind . rpc . sendtoaddress ( addr , ( amount + 1000000 ) / 10 * * 8 )
bitcoind . generate_block ( 1 )
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ " outputs " ] ) != 0 )
# Some random (valid) psbt
2022-04-01 06:12:45 +02:00
psbt = l1 . rpc . fundpsbt ( amount , ' 253perkw ' , 250 , reserve = 0 ) [ ' psbt ' ]
2019-05-23 01:28:18 +02:00
with pytest . raises ( RpcError , match = r ' Unknown peer ' ) :
l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount )
with pytest . raises ( RpcError , match = r ' Unknown peer ' ) :
2021-03-15 05:27:59 +01:00
l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , psbt )
2019-05-23 01:28:18 +02:00
# Should not be able to continue without being in progress.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError , match = r ' No channel funding in progress. ' ) :
2021-03-15 05:27:59 +01:00
l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , psbt )
2019-05-23 01:28:18 +02:00
2020-04-03 02:03:55 +02:00
# Fail to open (too large)
with pytest . raises ( RpcError , match = r ' Amount exceeded 16777215 ' ) :
l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount + 1 )
2021-03-15 05:27:59 +01:00
start = l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount )
2019-05-31 23:57:04 +02:00
with pytest . raises ( RpcError , match = r ' Already funding channel ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , amount )
2021-03-16 00:58:58 +01:00
# Can't complete with incorrect amount (unchecked on Elements)
if TEST_NETWORK == ' regtest ' :
wrongamt = l1 . rpc . txprepare ( [ { start [ ' funding_address ' ] : amount - 1 } ] )
with pytest . raises ( RpcError , match = r ' Output to open channel is .*, should be .* ' ) :
l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , wrongamt [ ' psbt ' ] )
l1 . rpc . txdiscard ( wrongamt [ ' txid ' ] )
2021-03-15 05:27:59 +01:00
# Can't complete with incorrect address.
wrongaddr = l1 . rpc . txprepare ( [ { l1 . rpc . newaddr ( ) [ ' bech32 ' ] : amount } ] )
with pytest . raises ( RpcError , match = r ' No output to open channel ' ) :
l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , wrongaddr [ ' psbt ' ] )
l1 . rpc . txdiscard ( wrongaddr [ ' txid ' ] )
2019-05-31 23:57:04 +02:00
l1 . rpc . fundchannel_cancel ( l2 . info [ ' id ' ] )
2022-03-22 21:26:29 +01:00
# Cancelling causes disconnection.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2019-08-24 16:39:01 +02:00
amount2 = 1000000
funding_addr = l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount2 ) [ ' funding_address ' ]
# Create the funding transaction
prep = l1 . rpc . txprepare ( [ { funding_addr : amount2 } ] )
decode = bitcoind . rpc . decoderawtransaction ( prep [ ' unsigned_tx ' ] )
assert decode [ ' txid ' ] == prep [ ' txid ' ]
# Be sure fundchannel_complete is successful
2021-03-15 05:27:59 +01:00
assert l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , prep [ ' psbt ' ] ) [ ' commitments_secured ' ]
2019-12-12 01:46:08 +01:00
# Peer shouldn't be able to cancel channel
with pytest . raises ( RpcError , match = r ' Cannot cancel channel that was initiated by peer ' ) :
l2 . rpc . fundchannel_cancel ( l1 . info [ ' id ' ] )
# We can cancel channel after fundchannel_complete
2019-08-24 16:39:01 +02:00
assert l1 . rpc . fundchannel_cancel ( l2 . info [ ' id ' ] ) [ ' cancelled ' ]
2021-03-15 05:27:59 +01:00
# But must unreserve inputs manually.
l1 . rpc . txdiscard ( prep [ ' txid ' ] )
2019-08-24 16:39:01 +02:00
2019-12-12 01:50:30 +01:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-03-15 05:27:59 +01:00
funding_addr = l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount ) [ ' funding_address ' ]
prep = l1 . rpc . txprepare ( [ { funding_addr : amount } ] )
assert l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , prep [ ' psbt ' ] ) [ ' commitments_secured ' ]
2019-12-12 01:50:30 +01:00
# Check that can still cancel when peer is disconnected
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
wait_for ( lambda : not only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' connected ' ] )
2021-01-14 18:07:49 +01:00
wait_for ( lambda : only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' state ' ]
== ' CHANNELD_AWAITING_LOCKIN ' )
2019-12-12 01:50:30 +01:00
assert l1 . rpc . fundchannel_cancel ( l2 . info [ ' id ' ] ) [ ' cancelled ' ]
assert len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 0
# l2 still has the channel open/waiting
wait_for ( lambda : only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' state ' ]
== ' CHANNELD_AWAITING_LOCKIN ' )
# on reconnect, channel should get destroyed
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2022-03-22 21:27:30 +01:00
l1 . daemon . wait_for_log ( ' Unknown channel .* for WIRE_CHANNEL_REESTABLISH ' )
2019-12-12 01:50:30 +01:00
wait_for ( lambda : len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
2021-03-15 05:27:59 +01:00
# But must unreserve inputs manually.
l1 . rpc . txdiscard ( prep [ ' txid ' ] )
2019-12-12 01:50:30 +01:00
# we have to connect again, because we got disconnected when everything errored
2019-08-24 16:39:01 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-03-15 05:27:59 +01:00
funding_addr = l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount ) [ ' funding_address ' ]
prep = l1 . rpc . txprepare ( [ { funding_addr : amount } ] )
2020-02-04 01:32:26 +01:00
# A successful funding_complete will always have a commitments_secured that is true,
# otherwise it would have failed
2021-03-15 05:27:59 +01:00
assert l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , prep [ ' psbt ' ] ) [ ' commitments_secured ' ]
2019-08-24 16:39:01 +02:00
l1 . rpc . txsend ( prep [ ' txid ' ] )
with pytest . raises ( RpcError , match = r ' .* been broadcast.* ' ) :
l1 . rpc . fundchannel_cancel ( l2 . info [ ' id ' ] )
l1 . rpc . close ( l2 . info [ ' id ' ] )
2019-05-31 23:57:04 +02:00
2019-05-23 01:28:18 +02:00
2021-05-12 00:23:49 +02:00
@pytest.mark.developer ( " needs dev_forget_channel " )
@pytest.mark.openchannel ( ' v2 ' )
def test_funding_v2_corners ( node_factory , bitcoind ) :
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( may_reconnect = True )
amount = 2 * * 24
l1 . fundwallet ( amount + 10000000 )
amount = amount - 1
# make sure we can generate PSBTs.
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
bitcoind . rpc . sendtoaddress ( addr , ( amount + 1000000 ) / 10 * * 8 )
bitcoind . generate_block ( 1 )
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ " outputs " ] ) != 0 )
# Some random (valid) psbt
2022-04-01 06:12:45 +02:00
psbt = l1 . rpc . fundpsbt ( amount , ' 253perkw ' , 250 , reserve = 0 ) [ ' psbt ' ]
2021-05-12 00:23:49 +02:00
nonexist_chanid = ' 11 ' * 32
with pytest . raises ( RpcError , match = r ' Unknown peer ' ) :
l1 . rpc . openchannel_init ( l2 . info [ ' id ' ] , amount , psbt )
with pytest . raises ( RpcError , match = r ' Unknown channel ' ) :
l1 . rpc . openchannel_update ( nonexist_chanid , psbt )
# Should not be able to continue without being in progress.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
with pytest . raises ( RpcError , match = r ' Unknown channel ' ) :
l1 . rpc . openchannel_signed ( nonexist_chanid , psbt )
# Fail to open (too large)
with pytest . raises ( RpcError , match = r ' Amount exceeded 16777215 ' ) :
l1 . rpc . openchannel_init ( l2 . info [ ' id ' ] , amount + 1 , psbt )
start = l1 . rpc . openchannel_init ( l2 . info [ ' id ' ] , amount , psbt )
with pytest . raises ( RpcError , match = r ' Channel funding in-progress. DUALOPEND_OPEN_INIT ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , amount )
# We can abort a channel
l1 . rpc . openchannel_abort ( start [ ' channel_id ' ] )
# Should be able to 'restart' after canceling
amount2 = 1000000
l1 . rpc . unreserveinputs ( psbt )
2022-04-01 06:12:45 +02:00
psbt = l1 . rpc . fundpsbt ( amount2 , ' 253perkw ' , 250 , reserve = 0 ) [ ' psbt ' ]
2021-05-12 00:23:49 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
start = l1 . rpc . openchannel_init ( l2 . info [ ' id ' ] , amount2 , psbt )
# Check that we're connected.
# This caused a valgrind crash prior to this commit
assert only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] )
# Disconnect peer.
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
wait_for ( lambda : len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
with pytest . raises ( RpcError , match = r ' Unknown channel ' ) :
l1 . rpc . openchannel_abort ( start [ ' channel_id ' ] )
2021-08-02 21:09:16 +02:00
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
2021-05-12 00:23:49 +02:00
with pytest . raises ( RpcError , match = r ' Unknown channel ' ) :
l2 . rpc . openchannel_abort ( start [ ' channel_id ' ] )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
start = l1 . rpc . openchannel_init ( l2 . info [ ' id ' ] , amount2 , psbt )
# Be sure fundchannel_complete is successful
assert l1 . rpc . openchannel_update ( start [ ' channel_id ' ] , start [ ' psbt ' ] ) [ ' commitments_secured ' ]
2020-12-21 13:09:49 +01:00
@unittest.skipIf ( SLOW_MACHINE and not VALGRIND , " Way too taxing on CI machines " )
2021-04-26 21:59:42 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2019-06-12 08:22:53 +02:00
def test_funding_cancel_race ( node_factory , bitcoind , executor ) :
l1 = node_factory . get_node ( )
2019-06-14 05:38:02 +02:00
2021-03-15 05:27:59 +01:00
# make sure we can generate PSBTs.
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
bitcoind . rpc . sendtoaddress ( addr , 200000 / 10 * * 8 )
bitcoind . generate_block ( 1 )
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ " outputs " ] ) != 0 )
2020-08-07 05:14:59 +02:00
if node_factory . valgrind :
2019-06-14 05:38:02 +02:00
num = 5
else :
num = 100
2021-02-01 18:07:40 +01:00
# Allow the other nodes to log unexpected WIRE_FUNDING_CREATED messages
nodes = node_factory . get_nodes ( num , opts = { ' allow_broken_log ' : True } )
2019-06-12 08:22:53 +02:00
num_complete = 0
num_cancel = 0
2019-06-30 02:34:06 +02:00
for count , n in enumerate ( nodes ) :
2019-06-12 08:22:53 +02:00
l1 . rpc . connect ( n . info [ ' id ' ] , ' localhost ' , n . port )
2021-03-15 05:27:59 +01:00
start = l1 . rpc . fundchannel_start ( n . info [ ' id ' ] , " 100000sat " )
2019-06-12 08:22:53 +02:00
2021-03-15 05:27:59 +01:00
prep = l1 . rpc . txprepare ( [ { start [ ' funding_address ' ] : " 100000sat " } ] )
# Submit two of each at once.
2019-06-30 02:34:06 +02:00
completes = [ ]
cancels = [ ]
# Switch order around.
for i in range ( 4 ) :
if ( i + count ) % 2 == 0 :
2021-03-15 05:27:59 +01:00
completes . append ( executor . submit ( l1 . rpc . fundchannel_complete , n . info [ ' id ' ] , prep [ ' psbt ' ] ) )
2019-06-30 02:34:06 +02:00
else :
cancels . append ( executor . submit ( l1 . rpc . fundchannel_cancel , n . info [ ' id ' ] ) )
2019-06-12 08:22:53 +02:00
lightningd/opening_control.c: Remove 'Try fundchannel_cancel again' error.
Changelog-Changed: `fundchannel_cancel` will now succeed even when executed while a `fundchannel_complete` is ongoing; in that case, it will be considered as cancelling the funding *after* the `fundchannel_complete` succeeds.
Let me introduce the concept of "Sequential Consistency":
All operations on parallel processes form a single total order agreed upon by all processes.
So for example, suppose we have parallel invocations of `fundchannel_complete` and `fundchannel_cancel`:
+--[fundchannel_complete]-->
|
--[fundchannel_start]-+
|
+--[fundchannel_cancel]---->
What "Sequential Consistency" means is that the above parallel operations can be serialized as a single total order as:
--[fundchannel_start]--[fundchannel_complete]--[fundchannel_cancel]-->
Or:
--[fundchannel_start]--[fundchannel_cancel]--[fundchannel_complete]-->
In the first case, `fundchannel_complete` succeeds, and the `fundchannel_cancel` invocation also succeeds, sending an `error` to the peer to make them forget the chanel.
In the second case, `fundchannel_cancel` succeeds, and the succeeding `fundchannel_complete` invocation fails, since the funding is already cancelled and there is nothing to complete.
Note that in both cases, `fundchannel_cancel` **always** succeeds.
Unfortunately, prior to this commit, `fundchannel_cancel` could fail with a `Try fundchannel_cancel again` error if the `fundchannel_complete` is ongoing when the `fundchannel_cancel` is initiated.
This violates Sequential Consistency, as there is no single total order that would have caused `fundchannel_cancel` to fail.
This commit is a minimal patch which just reschedules `fundchannel_cancel` to occur after any `fundchannel_complete` that is ongoing.
2020-06-18 05:49:17 +02:00
# Only up to one should succeed.
2019-06-12 08:22:53 +02:00
success = False
for c in completes :
try :
c . result ( TIMEOUT )
num_complete + = 1
assert not success
success = True
except RpcError :
pass
lightningd/opening_control.c: Remove 'Try fundchannel_cancel again' error.
Changelog-Changed: `fundchannel_cancel` will now succeed even when executed while a `fundchannel_complete` is ongoing; in that case, it will be considered as cancelling the funding *after* the `fundchannel_complete` succeeds.
Let me introduce the concept of "Sequential Consistency":
All operations on parallel processes form a single total order agreed upon by all processes.
So for example, suppose we have parallel invocations of `fundchannel_complete` and `fundchannel_cancel`:
+--[fundchannel_complete]-->
|
--[fundchannel_start]-+
|
+--[fundchannel_cancel]---->
What "Sequential Consistency" means is that the above parallel operations can be serialized as a single total order as:
--[fundchannel_start]--[fundchannel_complete]--[fundchannel_cancel]-->
Or:
--[fundchannel_start]--[fundchannel_cancel]--[fundchannel_complete]-->
In the first case, `fundchannel_complete` succeeds, and the `fundchannel_cancel` invocation also succeeds, sending an `error` to the peer to make them forget the chanel.
In the second case, `fundchannel_cancel` succeeds, and the succeeding `fundchannel_complete` invocation fails, since the funding is already cancelled and there is nothing to complete.
Note that in both cases, `fundchannel_cancel` **always** succeeds.
Unfortunately, prior to this commit, `fundchannel_cancel` could fail with a `Try fundchannel_cancel again` error if the `fundchannel_complete` is ongoing when the `fundchannel_cancel` is initiated.
This violates Sequential Consistency, as there is no single total order that would have caused `fundchannel_cancel` to fail.
This commit is a minimal patch which just reschedules `fundchannel_cancel` to occur after any `fundchannel_complete` that is ongoing.
2020-06-18 05:49:17 +02:00
# At least one of these must succeed, regardless of whether
# the completes succeeded or not.
2019-06-12 08:22:53 +02:00
cancelled = False
for c in cancels :
try :
c . result ( TIMEOUT )
cancelled = True
except RpcError :
pass
lightningd/opening_control.c: Remove 'Try fundchannel_cancel again' error.
Changelog-Changed: `fundchannel_cancel` will now succeed even when executed while a `fundchannel_complete` is ongoing; in that case, it will be considered as cancelling the funding *after* the `fundchannel_complete` succeeds.
Let me introduce the concept of "Sequential Consistency":
All operations on parallel processes form a single total order agreed upon by all processes.
So for example, suppose we have parallel invocations of `fundchannel_complete` and `fundchannel_cancel`:
+--[fundchannel_complete]-->
|
--[fundchannel_start]-+
|
+--[fundchannel_cancel]---->
What "Sequential Consistency" means is that the above parallel operations can be serialized as a single total order as:
--[fundchannel_start]--[fundchannel_complete]--[fundchannel_cancel]-->
Or:
--[fundchannel_start]--[fundchannel_cancel]--[fundchannel_complete]-->
In the first case, `fundchannel_complete` succeeds, and the `fundchannel_cancel` invocation also succeeds, sending an `error` to the peer to make them forget the chanel.
In the second case, `fundchannel_cancel` succeeds, and the succeeding `fundchannel_complete` invocation fails, since the funding is already cancelled and there is nothing to complete.
Note that in both cases, `fundchannel_cancel` **always** succeeds.
Unfortunately, prior to this commit, `fundchannel_cancel` could fail with a `Try fundchannel_cancel again` error if the `fundchannel_complete` is ongoing when the `fundchannel_cancel` is initiated.
This violates Sequential Consistency, as there is no single total order that would have caused `fundchannel_cancel` to fail.
This commit is a minimal patch which just reschedules `fundchannel_cancel` to occur after any `fundchannel_complete` that is ongoing.
2020-06-18 05:49:17 +02:00
# cancel always succeeds, as per Sequential Consistency.
# Either the cancel occurred before complete, in which
# case it prevents complete from succeeding, or it
# occurred after complete, in which case it errors the
# channel to force the remote to forget it.
assert cancelled
num_cancel + = 1
2021-03-15 05:27:59 +01:00
# Free up funds for next time
l1 . rpc . txdiscard ( prep [ ' txid ' ] )
2019-06-12 08:22:53 +02:00
print ( " Cancelled {} complete {} " . format ( num_cancel , num_complete ) )
lightningd/opening_control.c: Remove 'Try fundchannel_cancel again' error.
Changelog-Changed: `fundchannel_cancel` will now succeed even when executed while a `fundchannel_complete` is ongoing; in that case, it will be considered as cancelling the funding *after* the `fundchannel_complete` succeeds.
Let me introduce the concept of "Sequential Consistency":
All operations on parallel processes form a single total order agreed upon by all processes.
So for example, suppose we have parallel invocations of `fundchannel_complete` and `fundchannel_cancel`:
+--[fundchannel_complete]-->
|
--[fundchannel_start]-+
|
+--[fundchannel_cancel]---->
What "Sequential Consistency" means is that the above parallel operations can be serialized as a single total order as:
--[fundchannel_start]--[fundchannel_complete]--[fundchannel_cancel]-->
Or:
--[fundchannel_start]--[fundchannel_cancel]--[fundchannel_complete]-->
In the first case, `fundchannel_complete` succeeds, and the `fundchannel_cancel` invocation also succeeds, sending an `error` to the peer to make them forget the chanel.
In the second case, `fundchannel_cancel` succeeds, and the succeeding `fundchannel_complete` invocation fails, since the funding is already cancelled and there is nothing to complete.
Note that in both cases, `fundchannel_cancel` **always** succeeds.
Unfortunately, prior to this commit, `fundchannel_cancel` could fail with a `Try fundchannel_cancel again` error if the `fundchannel_complete` is ongoing when the `fundchannel_cancel` is initiated.
This violates Sequential Consistency, as there is no single total order that would have caused `fundchannel_cancel` to fail.
This commit is a minimal patch which just reschedules `fundchannel_cancel` to occur after any `fundchannel_complete` that is ongoing.
2020-06-18 05:49:17 +02:00
assert num_cancel == len ( nodes )
2019-06-12 08:22:53 +02:00
# We should have raced at least once!
2020-08-07 05:14:59 +02:00
if not node_factory . valgrind :
2019-06-14 05:38:02 +02:00
assert num_cancel > 0
2021-05-12 19:07:13 +02:00
assert num_complete > 0
# Speed up shutdown by stopping them all concurrently
executor . map ( lambda n : n . stop ( ) , node_factory . nodes )
@unittest.skipIf ( SLOW_MACHINE and not VALGRIND , " Way too taxing on CI machines " )
@pytest.mark.openchannel ( ' v2 ' )
def test_funding_v2_cancel_race ( node_factory , bitcoind , executor ) :
l1 = node_factory . get_node ( )
# make sure we can generate PSBTs.
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
bitcoind . rpc . sendtoaddress ( addr , 2000000 / 10 * * 8 )
bitcoind . generate_block ( 1 )
wait_for ( lambda : len ( l1 . rpc . listfunds ( ) [ " outputs " ] ) != 0 )
if node_factory . valgrind :
num = 5
else :
num = 100
nodes = node_factory . get_nodes ( num )
num_complete = 0
num_cancel = 0
amount = 100000
for count , n in enumerate ( nodes ) :
l1 . rpc . connect ( n . info [ ' id ' ] , ' localhost ' , n . port )
2022-04-01 06:12:45 +02:00
psbt = l1 . rpc . fundpsbt ( amount , ' 7500perkw ' , 250 , reserve = 0 ,
2021-05-12 19:07:13 +02:00
excess_as_change = True ,
min_witness_weight = 110 ) [ ' psbt ' ]
start = l1 . rpc . openchannel_init ( n . info [ ' id ' ] , amount , psbt )
# Submit two of each at once.
completes = [ ]
cancels = [ ]
# Switch order around.
for i in range ( 4 ) :
if ( i + count ) % 2 == 0 :
completes . append ( executor . submit ( l1 . rpc . openchannel_update ,
start [ ' channel_id ' ] ,
start [ ' psbt ' ] ) )
else :
cancels . append ( executor . submit ( l1 . rpc . openchannel_abort ,
start [ ' channel_id ' ] ) )
# Only up to one should succeed.
success = False
for c in completes :
try :
c . result ( TIMEOUT )
num_complete + = 1
assert not success
success = True
except RpcError :
pass
for c in cancels :
try :
c . result ( TIMEOUT )
num_cancel + = 1
except RpcError :
pass
# Free up funds for next time
l1 . rpc . unreserveinputs ( psbt )
print ( " Cancelled {} complete {} " . format ( num_cancel , num_complete ) )
# We should have raced at least once!
if not node_factory . valgrind :
assert num_cancel > 0
2019-06-14 05:38:02 +02:00
assert num_complete > 0
2019-06-12 08:22:53 +02:00
2021-02-01 18:07:40 +01:00
# Speed up shutdown by stopping them all concurrently
executor . map ( lambda n : n . stop ( ) , node_factory . nodes )
2019-06-12 08:22:53 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2019-10-15 03:08:48 +02:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , " External wallet support doesn ' t work with elements yet. " )
def test_funding_close_upfront ( node_factory , bitcoind ) :
2020-08-21 12:38:51 +02:00
opts = { ' plugin ' : os . path . join ( os . getcwd ( ) , ' tests/plugins/openchannel_hook_accepter.py ' ) }
2020-10-15 20:12:00 +02:00
l1 = node_factory . get_node ( )
2019-11-21 01:47:44 +01:00
l2 = node_factory . get_node ( options = opts )
2020-10-15 20:12:00 +02:00
# The 'accepter_close_to' plugin uses the channel funding amount
# to determine whether or not to include a 'close_to' address
2020-08-21 12:38:51 +02:00
amt_normal = 100000 # continues without returning a close_to
amt_addr = 100003 # returns valid regtest address
2019-11-21 01:47:44 +01:00
remote_valid_addr = ' bcrt1q7gtnxmlaly9vklvmfj06amfdef3rtnrdazdsvw '
pytest: fix flaky test if one side hasn't processed close yet.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', "External wallet support doesn't work with elements yet.")
def test_funding_close_upfront(node_factory, bitcoind):
...
# check that you can provide a closing address upfront
addr = l1.rpc.newaddr()['bech32']
> _fundchannel(l1, l2, amt_normal, addr)
...
pyln.client.lightning.RpcError: RPC call failed: method: fundchannel_start, payload: {'id': '022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59', 'amount': 100000, 'announce': True, 'close_to': 'bcrt1qctx2k9cu9fd7nk449mzphqjcvvpyc4rxh6826x'}, error: {'code': -1, 'message': 'They sent error channel 2a1ca624cd1127761cb7a4395df2c3fd6d0abb3732c1f85a5345b0da716540d0: Multiple channels unsupported'}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2020-10-12 07:46:59 +02:00
def has_normal_channels ( l1 , l2 ) :
2022-03-23 00:01:36 +01:00
if l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] == [ ] :
return False
pytest: fix flaky test if one side hasn't processed close yet.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', "External wallet support doesn't work with elements yet.")
def test_funding_close_upfront(node_factory, bitcoind):
...
# check that you can provide a closing address upfront
addr = l1.rpc.newaddr()['bech32']
> _fundchannel(l1, l2, amt_normal, addr)
...
pyln.client.lightning.RpcError: RPC call failed: method: fundchannel_start, payload: {'id': '022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59', 'amount': 100000, 'announce': True, 'close_to': 'bcrt1qctx2k9cu9fd7nk449mzphqjcvvpyc4rxh6826x'}, error: {'code': -1, 'message': 'They sent error channel 2a1ca624cd1127761cb7a4395df2c3fd6d0abb3732c1f85a5345b0da716540d0: Multiple channels unsupported'}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2020-10-12 07:46:59 +02:00
return any ( [ c [ ' state ' ] == ' CHANNELD_AWAITING_LOCKIN '
or c [ ' state ' ] == ' CHANNELD_NORMAL '
for c in only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ] )
2019-11-21 01:47:44 +01:00
def _fundchannel ( l1 , l2 , amount , close_to ) :
2019-10-15 03:08:48 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
assert ( l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' id ' ] == l2 . info [ ' id ' ] )
pytest: fix flaky test if one side hasn't processed close yet.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', "External wallet support doesn't work with elements yet.")
def test_funding_close_upfront(node_factory, bitcoind):
...
# check that you can provide a closing address upfront
addr = l1.rpc.newaddr()['bech32']
> _fundchannel(l1, l2, amt_normal, addr)
...
pyln.client.lightning.RpcError: RPC call failed: method: fundchannel_start, payload: {'id': '022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59', 'amount': 100000, 'announce': True, 'close_to': 'bcrt1qctx2k9cu9fd7nk449mzphqjcvvpyc4rxh6826x'}, error: {'code': -1, 'message': 'They sent error channel 2a1ca624cd1127761cb7a4395df2c3fd6d0abb3732c1f85a5345b0da716540d0: Multiple channels unsupported'}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2020-10-12 07:46:59 +02:00
# Make sure both consider any previous channels closed.
wait_for ( lambda : not has_normal_channels ( l1 , l2 ) )
wait_for ( lambda : not has_normal_channels ( l2 , l1 ) )
2020-10-15 20:12:00 +02:00
_ , resp = l1 . fundchannel ( l2 , amount , close_to = close_to )
2019-10-15 03:08:48 +02:00
if close_to :
assert resp [ ' close_to ' ]
else :
assert ' close_to ' not in resp
for node in [ l1 , l2 ] :
2019-11-21 01:47:44 +01:00
channel = node . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] [ - 1 ]
2022-06-20 12:22:09 +02:00
assert amount * 1000 == channel [ ' total_msat ' ]
2019-10-15 03:08:48 +02:00
2021-01-21 12:43:55 +01:00
def _close ( src , dst , addr = None ) :
""" Close the channel from src to dst, with the specified address.
Returns the address of the outputs in the close tx . Raises an
error if some expectations are not met .
"""
r = l1 . rpc . close ( l2 . info [ ' id ' ] , destination = addr )
assert r [ ' type ' ] == ' mutual '
tx = bitcoind . rpc . decoderawtransaction ( r [ ' tx ' ] )
2021-04-18 06:38:22 +02:00
addrs = [ scriptpubkey_addr ( vout [ ' scriptPubKey ' ] ) for vout in tx [ ' vout ' ] ]
2021-01-21 12:43:55 +01:00
bitcoind . generate_block ( 1 , wait_for_mempool = [ r [ ' txid ' ] ] )
sync_blockheight ( bitcoind , [ l1 , l2 ] )
return addrs
2019-10-15 03:08:48 +02:00
# check that normal peer close works
2019-11-21 01:47:44 +01:00
_fundchannel ( l1 , l2 , amt_normal , None )
2021-01-21 12:43:55 +01:00
_close ( l1 , l2 )
2019-10-15 03:08:48 +02:00
# check that you can provide a closing address upfront
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
2019-11-21 01:47:44 +01:00
_fundchannel ( l1 , l2 , amt_normal , addr )
2019-10-29 18:20:34 +01:00
# confirm that it appears in listpeers
assert addr == only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] [ 1 ] [ ' close_to_addr ' ]
2021-01-21 12:43:55 +01:00
assert _close ( l1 , l2 ) == [ addr ]
2019-10-15 03:08:48 +02:00
# check that passing in the same addr to close works
2019-10-29 18:20:34 +01:00
addr = bitcoind . rpc . getnewaddress ( )
2019-11-21 01:47:44 +01:00
_fundchannel ( l1 , l2 , amt_normal , addr )
2019-10-29 18:20:34 +01:00
assert addr == only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] [ 2 ] [ ' close_to_addr ' ]
2021-01-21 12:43:55 +01:00
assert _close ( l1 , l2 , addr ) == [ addr ]
2019-10-15 03:08:48 +02:00
2019-11-21 01:47:44 +01:00
# check that remote peer closing works as expected (and that remote's close_to works)
_fundchannel ( l1 , l2 , amt_addr , addr )
# send some money to remote so that they have a closeout
l1 . rpc . pay ( l2 . rpc . invoice ( ( amt_addr / / 2 ) * 1000 , ' test_remote_close_to ' , ' desc ' ) [ ' bolt11 ' ] )
assert only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] [ - 1 ] [ ' close_to_addr ' ] == remote_valid_addr
2021-01-21 12:43:55 +01:00
# The tx outputs must be one of the two permutations
assert _close ( l2 , l1 ) in ( [ addr , remote_valid_addr ] , [ remote_valid_addr , addr ] )
2019-10-15 03:08:48 +02:00
# check that passing in a different addr to close causes an RPC error
addr2 = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
2019-11-21 01:47:44 +01:00
_fundchannel ( l1 , l2 , amt_normal , addr )
2019-10-15 03:08:48 +02:00
with pytest . raises ( RpcError , match = r ' does not match previous shutdown script ' ) :
l1 . rpc . close ( l2 . info [ ' id ' ] , destination = addr2 )
2019-06-29 18:54:39 +02:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , " External wallet support doesn ' t work with elements yet. " )
2021-04-26 21:59:42 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2019-05-23 01:28:18 +02:00
def test_funding_external_wallet ( node_factory , bitcoind ) :
2020-08-07 05:14:55 +02:00
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts = [ { ' funding-confirms ' : 2 } ,
{ ' funding-confirms ' : 2 } , { } ] )
2019-05-23 01:28:18 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
assert ( l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' id ' ] == l2 . info [ ' id ' ] )
amount = 2 * * 24 - 1
address = l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount ) [ ' funding_address ' ]
assert len ( address ) > 0
peer = l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ]
# Peer should still be connected and in state waiting for funding_txid
assert peer [ ' id ' ] == l2 . info [ ' id ' ]
r = re . compile ( ' Funding channel start: awaiting funding_txid with output to .* ' )
assert any ( r . match ( line ) for line in peer [ ' channels ' ] [ 0 ] [ ' status ' ] )
assert ' OPENINGD ' in peer [ ' channels ' ] [ 0 ] [ ' state ' ]
# Trying to start a second funding should not work, it's in progress.
with pytest . raises ( RpcError , match = r ' Already funding channel ' ) :
l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , amount )
2019-05-31 23:58:58 +02:00
# 'Externally' fund the address from fundchannel_start
2021-03-15 05:27:59 +01:00
psbt = bitcoind . rpc . walletcreatefundedpsbt ( [ ] , [ { address : amount / 10 * * 8 } ] ) [ ' psbt ' ]
assert l1 . rpc . fundchannel_complete ( l2 . info [ ' id ' ] , psbt ) [ ' commitments_secured ' ]
2019-05-31 23:58:58 +02:00
2020-06-24 06:34:26 +02:00
# Broadcast the transaction manually
2021-03-15 05:27:59 +01:00
process = bitcoind . rpc . walletprocesspsbt ( psbt )
assert process [ ' complete ' ] is True
tx = bitcoind . rpc . finalizepsbt ( process [ ' psbt ' ] )
txid = bitcoind . rpc . sendrawtransaction ( tx [ ' hex ' ] )
2019-05-31 23:58:58 +02:00
bitcoind . generate_block ( 1 )
2020-06-24 06:34:26 +02:00
l1 . daemon . wait_for_log ( r ' Funding tx {} depth 1 of 2 ' . format ( txid ) )
2019-05-31 23:58:58 +02:00
2019-09-06 07:34:24 +02:00
# Check that tx is broadcast by a third party can be catched.
# Only when the transaction (broadcast by a third pary) is onchain, we can catch it.
with pytest . raises ( RpcError , match = r ' .* been broadcast.* ' ) :
l1 . rpc . fundchannel_cancel ( l2 . info [ ' id ' ] )
2020-06-24 06:34:26 +02:00
# Confirm that channel locks in
bitcoind . generate_block ( 1 )
2019-05-31 23:58:58 +02:00
for node in [ l1 , l2 ] :
2019-06-17 23:58:27 +02:00
node . daemon . wait_for_log ( r ' State changed from CHANNELD_AWAITING_LOCKIN to CHANNELD_NORMAL ' )
2019-05-31 23:58:58 +02:00
channel = node . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] [ 0 ]
2022-06-20 12:22:09 +02:00
assert amount * 1000 == channel [ ' total_msat ' ]
2019-05-31 23:58:58 +02:00
2019-08-09 11:02:46 +02:00
# Test that we don't crash if peer disconnects after fundchannel_start
l2 . connect ( l3 )
l2 . rpc . fundchannel_start ( l3 . info [ " id " ] , amount )
l3 . rpc . close ( l2 . info [ " id " ] )
2019-05-23 01:28:18 +02:00
2021-03-12 01:19:40 +01:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , ' elementsd doesnt yet support PSBT features we need ' )
2021-04-26 21:23:40 +02:00
@pytest.mark.openchannel ( ' v1 ' ) # We manually turn on dual-funding for select nodes
2021-04-26 18:59:52 +02:00
def test_multifunding_v1_v2_mixed ( node_factory , bitcoind ) :
2020-10-22 21:47:08 +02:00
'''
Simple test for multifundchannel , using v1 + v2
'''
2020-12-11 20:58:35 +01:00
options = [ { ' experimental-dual-fund ' : None } ,
2021-04-26 18:59:52 +02:00
{ ' funder-policy ' : ' match ' ,
' funder-policy-mod ' : 100 ,
' funder-fuzz-percent ' : 0 ,
2020-12-11 20:58:35 +01:00
' experimental-dual-fund ' : None } ,
2021-04-26 18:59:52 +02:00
{ ' funder-policy ' : ' match ' ,
' funder-policy-mod ' : 100 ,
' funder-fuzz-percent ' : 0 ,
2020-12-11 20:58:35 +01:00
' experimental-dual-fund ' : None } ,
2020-10-22 21:47:08 +02:00
{ } ]
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 , opts = options )
l1 . fundwallet ( 2000000 )
l2 . fundwallet ( 2000000 )
l3 . fundwallet ( 2000000 )
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l4 . info [ ' id ' ] , l4 . port ) ,
" amount " : 50000 } ]
l1 . rpc . multifundchannel ( destinations )
2022-03-23 00:01:36 +01:00
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 ] , wait_for_mempool = 1 )
2020-10-22 21:47:08 +02:00
for node in [ l1 , l2 , l3 , l4 ] :
node . daemon . wait_for_log ( r ' to CHANNELD_NORMAL ' )
for ldest in [ l2 , l3 , l4 ] :
inv = ldest . rpc . invoice ( 5000 , ' inv ' , ' inv ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
2021-03-12 01:19:40 +01:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , ' elementsd doesnt yet support PSBT features we need ' )
2021-04-26 21:23:40 +02:00
@pytest.mark.openchannel ( ' v2 ' )
2020-10-22 21:47:08 +02:00
def test_multifunding_v2_exclusive ( node_factory , bitcoind ) :
'''
Simple test for multifundchannel , using v2
'''
# Two of three will reply with inputs of their own
2021-04-26 21:23:40 +02:00
options = [ { } ,
2021-04-26 18:59:52 +02:00
{ ' funder-policy ' : ' match ' ,
' funder-policy-mod ' : 100 ,
2021-08-05 19:31:49 +02:00
' funder-fuzz-percent ' : 0 ,
' funder-lease-requests-only ' : False } ,
2021-04-26 18:59:52 +02:00
{ ' funder-policy ' : ' match ' ,
' funder-policy-mod ' : 100 ,
2021-08-05 19:31:49 +02:00
' funder-fuzz-percent ' : 0 ,
' funder-lease-requests-only ' : False } ,
2021-04-26 21:23:40 +02:00
{ } ]
2020-10-22 21:47:08 +02:00
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 , opts = options )
l1 . fundwallet ( 2000000 )
l2 . fundwallet ( 2000000 )
l3 . fundwallet ( 2000000 )
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l4 . info [ ' id ' ] , l4 . port ) ,
" amount " : 50000 } ]
l1 . rpc . multifundchannel ( destinations )
bitcoind . generate_block ( 6 , wait_for_mempool = 1 )
for node in [ l1 , l2 , l3 , l4 ] :
node . daemon . wait_for_log ( r ' to CHANNELD_NORMAL ' )
2021-02-02 23:13:42 +01:00
# For dual-funded channels, pay from accepter to initiator
for ldest in [ l2 , l3 ] :
inv = l1 . rpc . invoice ( 5000 , ' inv ' + ldest . info [ ' id ' ] , ' inv ' ) [ ' bolt11 ' ]
ldest . rpc . pay ( inv )
# Then pay other direction
2020-10-22 21:47:08 +02:00
for ldest in [ l2 , l3 , l4 ] :
2021-02-02 23:13:42 +01:00
inv = ldest . rpc . invoice ( 10000 , ' inv ' , ' inv ' ) [ ' bolt11 ' ]
2020-10-22 21:47:08 +02:00
l1 . rpc . pay ( inv )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2020-09-09 05:12:37 +02:00
def test_multifunding_simple ( node_factory , bitcoind ) :
'''
Simple test for multifundchannel .
'''
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 )
l1 . fundwallet ( 2000000 )
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l4 . info [ ' id ' ] , l4 . port ) ,
" amount " : 50000 } ]
l1 . rpc . multifundchannel ( destinations )
bitcoind . generate_block ( 6 , wait_for_mempool = 1 )
for node in [ l1 , l2 , l3 , l4 ] :
node . daemon . wait_for_log ( r ' to CHANNELD_NORMAL ' )
for ldest in [ l2 , l3 , l4 ] :
inv = ldest . rpc . invoice ( 5000 , ' inv ' , ' inv ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
2022-04-21 08:47:29 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2022-04-21 08:44:17 +02:00
@pytest.mark.openchannel ( ' v2 ' )
def test_listpeers_crash ( node_factory , bitcoind , executor ) :
'''
Test for listpeers crash during dual - funding start
'''
l1 , l2 = node_factory . get_nodes ( 2 )
do_listpeers = True
# Do lots of listpeers while this is happening
def lots_of_listpeers ( node ) :
while do_listpeers :
node . rpc . listpeers ( )
fut = executor . submit ( lots_of_listpeers , l1 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 10 * * 6 + 1000000 )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 ) [ ' tx ' ]
do_listpeers = False
fut . result ( )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2020-09-09 05:12:37 +02:00
def test_multifunding_one ( node_factory , bitcoind ) :
'''
Test that multifunding can still fund to one destination .
'''
2020-09-09 05:13:37 +02:00
l1 , l2 , l3 = node_factory . get_nodes ( 3 )
2020-09-09 05:12:37 +02:00
l1 . fundwallet ( 2000000 )
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ]
l1 . rpc . multifundchannel ( destinations )
2020-09-09 05:13:37 +02:00
# Now check if we connect to the node first before
# multifundchannel.
l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , port = l3 . port )
# Omit the connect hint.
destinations = [ { " id " : ' {} ' . format ( l3 . info [ ' id ' ] ) ,
" amount " : 50000 } ]
l1 . rpc . multifundchannel ( destinations , minconf = 0 )
bitcoind . generate_block ( 6 , wait_for_mempool = 1 )
for node in [ l1 , l2 , l3 ] :
2020-09-09 05:12:37 +02:00
node . daemon . wait_for_log ( r ' to CHANNELD_NORMAL ' )
2020-09-09 05:13:37 +02:00
for ldest in [ l2 , l3 ] :
inv = ldest . rpc . invoice ( 5000 , ' inv ' , ' inv ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev-disconnect " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2020-09-09 05:13:37 +02:00
def test_multifunding_disconnect ( node_factory ) :
'''
Test disconnection during multifundchannel
'''
2021-12-28 00:27:09 +01:00
# TODO: Note that -WIRE_FUNDING_SIGNED does not
2020-09-09 05:13:37 +02:00
# work.
# See test_disconnect_half_signed.
# If disconnected when the peer believes it sent
# WIRE_FUNDING_SIGNED but before we actually
# receive it, the peer continues to monitor our
# funding tx, but we have forgotten it and will
# never send it.
disconnects = [ " -WIRE_INIT " ,
" -WIRE_ACCEPT_CHANNEL " ,
2021-12-28 00:27:09 +01:00
" +WIRE_ACCEPT_CHANNEL " ]
2020-09-09 05:13:37 +02:00
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( disconnect = disconnects )
l3 = node_factory . get_node ( )
l1 . fundwallet ( 2000000 )
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ]
# Funding to l2 will fail, and we should properly
# inform l3 to back out as well.
for d in disconnects :
with pytest . raises ( RpcError ) :
l1 . rpc . multifundchannel ( destinations )
# TODO: failing at the fundchannel_complete phase
2021-12-28 00:27:09 +01:00
# (-WIRE_FUNDING_SIGNED +-WIRE_FUNDING_SIGNED)
2020-09-09 05:13:37 +02:00
# leaves the peer (l2 in this case) in a state
# where it is waiting for an incoming channel,
# even though we no longer have a channel going to
# that peer.
# Reconnecting with the peer will clear up that
# confusion, but then the peer will disconnect
# after a random amount of time.
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ]
# This should succeed.
l1 . rpc . multifundchannel ( destinations )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2020-09-09 05:13:37 +02:00
def test_multifunding_wumbo ( node_factory ) :
'''
Test wumbo channel imposition in multifundchannel .
'''
l1 , l2 , l3 = node_factory . get_nodes ( 3 ,
opts = [ { ' large-channels ' : None } ,
{ ' large-channels ' : None } ,
{ } ] )
l1 . fundwallet ( 1 << 26 )
# This should fail.
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 1 << 24 } ]
with pytest . raises ( RpcError , match = ' Amount exceeded ' ) :
l1 . rpc . multifundchannel ( destinations )
# This should succeed.
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 1 << 24 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ]
l1 . rpc . multifundchannel ( destinations )
2020-10-19 21:03:26 +02:00
@unittest.skipIf ( TEST_NETWORK == ' liquid-regtest ' , " Fees on elements are different " )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " uses dev-fail " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' ) # v2 the weight calculation is off by 3
2020-10-19 21:03:26 +02:00
def test_multifunding_feerates ( node_factory , bitcoind ) :
'''
Test feerate parameters for multifundchannel
'''
funding_tx_feerate = ' 10000perkw '
2021-03-03 04:16:57 +01:00
commitment_tx_feerate_int = 2000
commitment_tx_feerate = str ( commitment_tx_feerate_int ) + ' perkw '
2020-10-19 21:03:26 +02:00
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts = { ' log-level ' : ' debug ' } )
l1 . fundwallet ( 1 << 26 )
def _connect_str ( node ) :
return ' {} @localhost: {} ' . format ( node . info [ ' id ' ] , node . port )
destinations = [ { " id " : _connect_str ( l2 ) , ' amount ' : 50000 } ]
res = l1 . rpc . multifundchannel ( destinations , feerate = funding_tx_feerate ,
commitment_feerate = commitment_tx_feerate )
entry = bitcoind . rpc . getmempoolentry ( res [ ' txid ' ] )
weight = entry [ ' weight ' ]
expected_fee = int ( funding_tx_feerate [ : - 5 ] ) * weight / / 1000
assert expected_fee == entry [ ' fees ' ] [ ' base ' ] * 10 * * 8
2021-03-03 04:16:57 +01:00
assert only_one ( only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' feerate ' ] [ ' perkw ' ] == commitment_tx_feerate_int
assert only_one ( only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' feerate ' ] [ ' perkb ' ] == commitment_tx_feerate_int * 4
2021-06-16 03:00:17 +02:00
txfee = only_one ( only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' last_tx_fee_msat ' ]
2021-03-03 04:16:57 +01:00
2020-10-19 21:03:26 +02:00
# We get the expected close txid, force close the channel, then fish
# the details about the transaction out of the mempoool entry
close_txid = only_one ( only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' scratch_txid ' ]
l1 . rpc . dev_fail ( l2 . info [ ' id ' ] )
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
entry = bitcoind . rpc . getmempoolentry ( close_txid )
# Because of how the anchor outputs protocol is designed,
# we *always* pay for 2 anchor outs and their weight
2021-05-06 21:11:11 +02:00
if EXPERIMENTAL_FEATURES or EXPERIMENTAL_DUAL_FUND : # opt_anchor_outputs
2020-10-19 21:03:26 +02:00
weight = 1124
else :
# the commitment transactions' feerate is calculated off
# of this fixed weight
weight = 724
expected_fee = int ( commitment_tx_feerate [ : - 5 ] ) * weight / / 1000
# At this point we only have one anchor output on the
# tx, but we subtract out the extra anchor output amount
# from the to_us output, so it ends up inflating
# our fee by that much.
2021-05-06 21:11:11 +02:00
if EXPERIMENTAL_FEATURES or EXPERIMENTAL_DUAL_FUND : # opt_anchor_outputs
2020-10-19 21:03:26 +02:00
expected_fee + = 330
assert expected_fee == entry [ ' fees ' ] [ ' base ' ] * 10 * * 8
2021-06-16 03:00:17 +02:00
assert Millisatoshi ( str ( expected_fee ) + ' sat ' ) == txfee
2020-10-19 21:03:26 +02:00
2020-09-09 05:13:37 +02:00
def test_multifunding_param_failures ( node_factory ) :
'''
Test that multifunding handles errors in parameters .
'''
l1 , l2 , l3 = node_factory . get_nodes ( 3 )
l1 . fundwallet ( 1 << 26 )
# No connection hint to unconnected node.
destinations = [ { " id " : ' {} ' . format ( l2 . info [ ' id ' ] ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ]
with pytest . raises ( RpcError ) :
l1 . rpc . multifundchannel ( destinations )
# Duplicated destination.
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ]
with pytest . raises ( RpcError ) :
l1 . rpc . multifundchannel ( destinations )
# Empty destinations.
with pytest . raises ( RpcError ) :
l1 . rpc . multifundchannel ( [ ] )
# Required destination fields missing.
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) } ]
with pytest . raises ( RpcError ) :
l1 . rpc . multifundchannel ( destinations )
destinations = [ { " amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ]
with pytest . raises ( RpcError ) :
l1 . rpc . multifundchannel ( destinations )
2020-09-09 05:12:37 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " disconnect=... needs DEVELOPER=1 " )
2020-09-09 05:14:37 +02:00
def test_multifunding_best_effort ( node_factory , bitcoind ) :
'''
Check that best_effort flag works .
'''
disconnects = [ " -WIRE_INIT " ,
" -WIRE_ACCEPT_CHANNEL " ,
" -WIRE_FUNDING_SIGNED " ]
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( )
l3 = node_factory . get_node ( disconnect = disconnects )
l4 = node_factory . get_node ( )
l1 . fundwallet ( 2000000 )
destinations = [ { " id " : ' {} @localhost: {} ' . format ( l2 . info [ ' id ' ] , l2 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l3 . info [ ' id ' ] , l3 . port ) ,
" amount " : 50000 } ,
{ " id " : ' {} @localhost: {} ' . format ( l4 . info [ ' id ' ] , l4 . port ) ,
" amount " : 50000 } ]
for i , d in enumerate ( disconnects ) :
# Should succeed due to best-effort flag.
l1 . rpc . multifundchannel ( destinations , minchannels = 2 )
bitcoind . generate_block ( 6 , wait_for_mempool = 1 )
# Only l3 should fail to have channels.
for node in [ l1 , l2 , l4 ] :
node . daemon . wait_for_log ( r ' to CHANNELD_NORMAL ' )
# There should be working channels to l2 and l4.
for ldest in [ l2 , l4 ] :
inv = ldest . rpc . invoice ( 5000 , ' i {} ' . format ( i ) , ' i {} ' . format ( i ) ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
# Function to find the SCID of the channel that is
# currently open.
# Cannot use LightningNode.get_channel_scid since
# it assumes the *first* channel found is the one
# wanted, but in our case we close channels and
# open again, so multiple channels may remain
# listed.
def get_funded_channel_scid ( n1 , n2 ) :
peers = n1 . rpc . listpeers ( n2 . info [ ' id ' ] ) [ ' peers ' ]
assert len ( peers ) == 1
peer = peers [ 0 ]
channels = peer [ ' channels ' ]
assert channels
for c in channels :
state = c [ ' state ' ]
if state in ( ' CHANNELD_AWAITING_LOCKIN ' , ' CHANNELD_NORMAL ' ) :
return c [ ' short_channel_id ' ]
assert False
# Now close channels to l2 and l4, for the next run.
l1 . rpc . close ( get_funded_channel_scid ( l1 , l2 ) )
l1 . rpc . close ( get_funded_channel_scid ( l1 , l4 ) )
for node in [ l1 , l2 , l4 ] :
node . daemon . wait_for_log ( r ' to CLOSINGD_COMPLETE ' )
# With 2 down, it will fail to fund channel
l2 . stop ( )
l3 . stop ( )
2021-10-09 18:27:54 +02:00
with pytest . raises ( RpcError , match = r ' (Connection refused|Bad file descriptor) ' ) :
2020-09-09 05:14:37 +02:00
l1 . rpc . multifundchannel ( destinations , minchannels = 2 )
# This works though.
l1 . rpc . multifundchannel ( destinations , minchannels = 1 )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_lockin_between_restart ( node_factory , bitcoind ) :
l1 = node_factory . get_node ( may_reconnect = True )
l2 = node_factory . get_node ( options = { ' funding-confirms ' : 3 } ,
may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 10 * * 6 + 1000000 )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 ) [ ' tx ' ]
# l1 goes down.
l1 . stop ( )
# Now 120 blocks go by...
bitcoind . generate_block ( 120 )
# Restart
l1 . start ( )
# All should be good.
l1 . daemon . wait_for_log ( ' to CHANNELD_NORMAL ' )
l2 . daemon . wait_for_log ( ' to CHANNELD_NORMAL ' )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_funding_while_offline ( node_factory , bitcoind ) :
l1 = node_factory . get_node ( )
2019-03-04 04:13:49 +01:00
addr = l1 . rpc . newaddr ( ) [ ' bech32 ' ]
2018-08-03 17:29:38 +02:00
sync_blockheight ( bitcoind , [ l1 ] )
# l1 goes down.
l1 . stop ( )
# We send funds
bitcoind . rpc . sendtoaddress ( addr , ( 10 * * 6 + 1000000 ) / 10 * * 8 )
# Now 120 blocks go by...
bitcoind . generate_block ( 120 )
# Restart
l1 . start ( )
sync_blockheight ( bitcoind , [ l1 ] )
assert len ( l1 . rpc . listfunds ( ) [ ' outputs ' ] ) == 1
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_channel_persistence ( node_factory , bitcoind , executor ) :
# Start two nodes and open a channel (to remember). l2 will
# mysteriously die while committing the first HTLC so we can
# check that HTLCs reloaded from the DB work.
2018-08-23 01:27:17 +02:00
# Feerates identical so we don't get gratuitous commit to update them
2021-12-29 04:26:40 +01:00
disable_commit_after = 1
2020-12-18 17:19:28 +01:00
if EXPERIMENTAL_DUAL_FUND :
2021-12-29 04:26:40 +01:00
disable_commit_after = 2
2020-12-18 17:19:28 +01:00
2020-03-10 19:31:24 +01:00
l1 = node_factory . get_node ( may_reconnect = True , feerates = ( 7500 , 7500 , 7500 ,
7500 ) )
2021-12-29 04:26:40 +01:00
l2 = node_factory . get_node ( options = { ' dev-disable-commit-after ' : disable_commit_after } ,
may_reconnect = True )
2018-08-03 17:29:38 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Neither node should have a channel open, they are just connected
for n in ( l1 , l2 ) :
assert ( n . db_query ( ' SELECT COUNT(id) as count FROM channels; ' ) [ 0 ] [ ' count ' ] == 0 )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 100000 )
2018-08-03 17:29:38 +02:00
peers = l1 . rpc . listpeers ( ) [ ' peers ' ]
assert ( only_one ( peers [ 0 ] [ ' channels ' ] ) [ ' state ' ] == ' CHANNELD_NORMAL ' )
# Both nodes should now have exactly one channel in the database
for n in ( l1 , l2 ) :
assert ( n . db_query ( ' SELECT COUNT(id) as count FROM channels; ' ) [ 0 ] [ ' count ' ] == 1 )
# Fire off a sendpay request, it'll get interrupted by a restart
executor . submit ( l1 . pay , l2 , 10000 )
# Wait for it to be committed to, i.e., stored in the DB
2020-12-18 17:19:28 +01:00
l1 . daemon . wait_for_log ( ' peer_in WIRE_FUNDING_LOCKED ' )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' peer_in WIRE_COMMITMENT_SIGNED ' )
# Stop l2, l1 will reattempt to connect
print ( " Killing l2 in mid HTLC " )
l2 . daemon . kill ( )
# Clear the disconnect and timer stop so we can proceed normally
2021-12-29 04:26:40 +01:00
del l2 . daemon . opts [ ' dev-disable-commit-after ' ]
2018-08-03 17:29:38 +02:00
# Wait for l1 to notice
wait_for ( lambda : ' connected ' not in only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ) )
# Now restart l2 and it should reload peers/channels from the DB
l2 . start ( )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 1 )
# Wait for the restored HTLC to finish
2022-06-20 12:22:09 +02:00
wait_for ( lambda : only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ) [ ' to_us_msat ' ] == 99990000 )
2018-08-03 17:29:38 +02:00
2018-10-10 01:11:55 +02:00
wait_for ( lambda : len ( [ p for p in l1 . rpc . listpeers ( ) [ ' peers ' ] if p [ ' connected ' ] ] ) )
wait_for ( lambda : len ( [ p for p in l2 . rpc . listpeers ( ) [ ' peers ' ] if p [ ' connected ' ] ] ) )
2018-08-03 17:29:38 +02:00
# Now make sure this is really functional by sending a payment
l1 . pay ( l2 , 10000 )
2022-06-20 12:22:09 +02:00
# L1 doesn't actually update to_us_msat until it receives
2018-08-03 17:29:38 +02:00
# revoke_and_ack from L2, which can take a little bit.
2022-06-20 12:22:09 +02:00
wait_for ( lambda : only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ) [ ' to_us_msat ' ] == 99980000 )
assert only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ) [ ' to_us_msat ' ] == 20000
2018-08-03 17:29:38 +02:00
# Finally restart l1, and make sure it remembers
l1 . restart ( )
2022-06-20 12:22:09 +02:00
assert only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ) [ ' to_us_msat ' ] == 99980000
2018-08-03 17:29:38 +02:00
# Now make sure l1 is watching for unilateral closes
l2 . rpc . dev_fail ( l1 . info [ ' id ' ] )
l2 . daemon . wait_for_log ( ' Failing due to dev-fail command ' )
2018-09-19 06:06:07 +02:00
l2 . wait_for_channel_onchain ( l1 . info [ ' id ' ] )
2018-08-03 17:29:38 +02:00
bitcoind . generate_block ( 1 )
# L1 must notice.
l1 . daemon . wait_for_log ( ' to ONCHAIN ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " gossip without DEVELOPER=1 is slow " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-12-08 00:38:41 +01:00
def test_private_channel ( node_factory ) :
l1 , l2 = node_factory . line_graph ( 2 , announce_channels = False , wait_for_announce = False )
l3 , l4 = node_factory . line_graph ( 2 , announce_channels = True , wait_for_announce = True )
assert l1 . daemon . is_in_log ( ' Will open private channel with node {} ' . format ( l2 . info [ ' id ' ] ) )
assert not l2 . daemon . is_in_log ( ' Will open private channel with node {} ' . format ( l1 . info [ ' id ' ] ) )
assert not l3 . daemon . is_in_log ( ' Will open private channel with node {} ' . format ( l4 . info [ ' id ' ] ) )
l3 . daemon . wait_for_log ( ' Received node_announcement for node {} ' . format ( l4 . info [ ' id ' ] ) )
l4 . daemon . wait_for_log ( ' Received node_announcement for node {} ' . format ( l3 . info [ ' id ' ] ) )
assert not l1 . daemon . is_in_log ( ' Received node_announcement for node {} ' . format ( l2 . info [ ' id ' ] ) )
assert not l2 . daemon . is_in_log ( ' Received node_announcement for node {} ' . format ( l1 . info [ ' id ' ] ) )
2019-01-08 00:47:39 +01:00
# test for 'private' flag in rpc output
assert only_one ( only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' private ' ]
# check non-private channel
assert not only_one ( only_one ( l4 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' private ' ]
2018-12-08 00:38:41 +01:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " gossip without DEVELOPER=1 is slow " )
2018-08-03 17:29:38 +02:00
def test_channel_reenable ( node_factory ) :
2018-12-08 00:27:14 +01:00
l1 , l2 = node_factory . line_graph ( 2 , opts = { ' may_reconnect ' : True } , fundchannel = True , wait_for_announce = True )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' Received node_announcement for node {} ' . format ( l2 . info [ ' id ' ] ) )
l2 . daemon . wait_for_log ( ' Received node_announcement for node {} ' . format ( l1 . info [ ' id ' ] ) )
# Both directions should be active before the restart
wait_for ( lambda : [ c [ ' active ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ] )
# Restart l2, will cause l1 to reconnect
2018-08-14 02:26:03 +02:00
l2 . stop ( )
wait_for ( lambda : [ c [ ' active ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] == [ False , False ] )
l2 . start ( )
2018-08-03 17:29:38 +02:00
2018-08-14 02:26:03 +02:00
# Updates may be suppressed if redundant; just test results.
2018-08-03 17:29:38 +02:00
wait_for ( lambda : [ c [ ' active ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ] )
2018-08-14 02:26:03 +02:00
wait_for ( lambda : [ c [ ' active ' ] for c in l2 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ] )
2018-08-03 17:29:38 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2018-08-03 17:29:38 +02:00
def test_update_fee ( node_factory , bitcoind ) :
2019-08-08 08:48:44 +02:00
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = True )
2018-08-03 17:29:38 +02:00
chanid = l1 . get_channel_scid ( l2 )
# Make l1 send out feechange.
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 14000 , 11000 , 7500 , 3750 ) )
2018-08-03 17:29:38 +02:00
# Now make sure an HTLC works.
# (First wait for route propagation.)
l1 . wait_channel_active ( chanid )
sync_blockheight ( bitcoind , [ l1 , l2 ] )
# Make payments.
l1 . pay ( l2 , 200000000 )
2020-04-09 07:00:06 +02:00
# First payment causes fee update.
2021-05-04 12:36:11 +02:00
l2 . daemon . wait_for_log ( ' peer updated fee to 11000 ' )
2018-08-03 17:29:38 +02:00
l2 . pay ( l1 , 100000000 )
# Now shutdown cleanly.
2019-08-08 08:48:44 +02:00
l1 . rpc . close ( chanid )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' to CLOSINGD_COMPLETE ' )
l2 . daemon . wait_for_log ( ' to CLOSINGD_COMPLETE ' )
# And should put closing into mempool.
2018-09-19 06:06:07 +02:00
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
l2 . wait_for_channel_onchain ( l1 . info [ ' id ' ] )
2018-08-03 17:29:38 +02:00
bitcoind . generate_block ( 1 )
l1 . daemon . wait_for_log ( ' to ONCHAIN ' )
l2 . daemon . wait_for_log ( ' to ONCHAIN ' )
bitcoind . generate_block ( 99 )
l1 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
l2 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2019-12-13 11:16:14 +01:00
def test_fee_limits ( node_factory , bitcoind ) :
2021-02-03 05:11:09 +01:00
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 , opts = [ { ' dev-max-fee-multiplier ' : 5 , ' may_reconnect ' : True ,
' allow_warning ' : True } ,
{ ' dev-max-fee-multiplier ' : 5 , ' may_reconnect ' : True ,
' allow_warning ' : True } ,
2020-08-07 05:14:55 +02:00
{ ' ignore-fee-limits ' : True , ' may_reconnect ' : True } ,
{ } ] )
node_factory . join_nodes ( [ l1 , l2 ] , fundchannel = True )
2018-08-03 17:29:38 +02:00
2020-04-09 07:00:06 +02:00
# Kick off fee adjustment using HTLC.
l1 . pay ( l2 , 1000 )
2018-08-23 01:27:24 +02:00
# L1 asks for stupid low fee (will actually hit the floor of 253)
l1 . stop ( )
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 15 , 15 , 15 , 15 ) , False )
2018-08-23 01:27:24 +02:00
l1 . start ( )
2018-08-03 17:29:38 +02:00
2021-02-02 13:49:01 +01:00
l1 . daemon . wait_for_log ( ' Peer transient failure in CHANNELD_NORMAL: channeld WARNING: .*: update_fee 253 outside range 1875-75000 ' )
# Closes, but does not error. Make sure it's noted in their status though.
assert ' update_fee 253 outside range 1875-75000 ' in only_one ( only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' status ' ] [ 0 ]
assert ' update_fee 253 outside range 1875-75000 ' in only_one ( only_one ( l2 . rpc . listpeers ( l1 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' status ' ] [ 0 ]
# Make l2 accept those fees, and it should recover.
l2 . stop ( )
l2 . set_feerates ( ( 15 , 15 , 15 , 15 ) , False )
l2 . start ( )
l1 . rpc . close ( l2 . info [ ' id ' ] )
2018-08-03 17:29:38 +02:00
# Make sure the resolution of this one doesn't interfere with the next!
# Note: may succeed, may fail with insufficient fee, depending on how
# bitcoind feels!
2019-12-13 11:16:14 +01:00
l2 . daemon . wait_for_log ( ' sendrawtx exit ' )
bitcoind . generate_block ( 1 )
sync_blockheight ( bitcoind , [ l1 , l2 ] )
2018-08-03 17:29:38 +02:00
2019-07-23 16:14:23 +02:00
# Trying to open a channel with too low a fee-rate is denied
l1 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
with pytest . raises ( RpcError , match = ' They sent error .* feerate_per_kw 253 below minimum ' ) :
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l4 , 10 * * 6 )
2019-07-23 16:14:23 +02:00
2018-08-03 17:29:38 +02:00
# Restore to normal.
2018-08-23 01:27:24 +02:00
l1 . stop ( )
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 15000 , 11000 , 7500 , 3750 ) , False )
2018-08-23 01:27:24 +02:00
l1 . start ( )
2018-08-03 17:29:38 +02:00
# Try with node which sets --ignore-fee-limits
l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2020-10-15 20:10:31 +02:00
chan , _ = l1 . fundchannel ( l3 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
2020-04-09 07:00:06 +02:00
# Kick off fee adjustment using HTLC.
l1 . pay ( l3 , 1000 )
2018-08-03 17:29:38 +02:00
# Try stupid high fees
2018-08-23 01:27:24 +02:00
l1 . stop ( )
2021-05-04 12:36:11 +02:00
l1 . set_feerates ( ( 15000 , 11000 * 10 , 7500 , 3750 ) , False )
2018-08-23 01:27:24 +02:00
l1 . start ( )
2018-08-03 17:29:38 +02:00
l3 . daemon . wait_for_log ( ' peer_in WIRE_UPDATE_FEE ' )
l3 . daemon . wait_for_log ( ' peer_in WIRE_COMMITMENT_SIGNED ' )
# We need to wait until both have committed and revoked the
# old state, otherwise we'll still try to commit with the old
# 15sat/byte fee
l1 . daemon . wait_for_log ( ' peer_out WIRE_REVOKE_AND_ACK ' )
# This should wait for close to complete
l1 . rpc . close ( chan )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev-no-fake-fees " )
2021-03-01 05:01:56 +01:00
def test_update_fee_dynamic ( node_factory , bitcoind ) :
# l1 has no fee estimates to start.
l1 = node_factory . get_node ( options = { ' log-level ' : ' io ' ,
' dev-no-fake-fees ' : True } , start = False )
l1 . daemon . rpcproxy . mock_rpc ( ' estimatesmartfee ' , {
' error ' : { " errors " : [ " Insufficient data or no feerate found " ] , " blocks " : 0 }
} )
l1 . start ( )
l2 = node_factory . get_node ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Fails due to lack of fee estimate.
with pytest . raises ( RpcError , match = ' Cannot estimate fees ' ) :
l1 . fundchannel ( l2 , 10 * * 6 )
# Explicit feerate works.
l1 . fundchannel ( l2 , 10 * * 6 , feerate = ' 10000perkw ' )
l1 . set_feerates ( ( 15000 , 11000 , 7500 , 3750 ) )
# It will send UPDATE_FEE when it tries to send HTLC.
inv = l2 . rpc . invoice ( 5000 , ' test_update_fee_dynamic ' , ' test_update_fee_dynamic ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
l2 . daemon . wait_for_log ( ' peer_in.*UPDATE_FEE ' )
# Now we take it away again!
l1 . daemon . rpcproxy . mock_rpc ( ' estimatesmartfee ' , {
' error ' : { " errors " : [ " Insufficient data or no feerate found " ] , " blocks " : 0 }
} )
# Make sure that registers! (DEVELOPER means polling every second)
time . sleep ( 2 )
inv = l2 . rpc . invoice ( 5000 , ' test_update_fee_dynamic2 ' , ' test_update_fee_dynamic2 ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
# Won't update fee.
assert not l2 . daemon . is_in_log ( ' peer_in.*UPDATE_FEE ' ,
start = l2 . daemon . logsearch_start )
# Bring it back.
l1 . set_feerates ( ( 14000 , 10000 , 7000 , 3000 ) )
# It will send UPDATE_FEE when it tries to send HTLC.
inv = l2 . rpc . invoice ( 5000 , ' test_update_fee_dynamic3 ' , ' test_update_fee_dynamic ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
l2 . daemon . wait_for_log ( ' peer_in.*UPDATE_FEE ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2018-08-03 17:29:38 +02:00
def test_update_fee_reconnect ( node_factory , bitcoind ) :
2020-04-09 07:00:06 +02:00
# Disconnect after commitsig for fee update.
disconnects = [ ' +WIRE_COMMITMENT_SIGNED*3 ' ]
2018-08-23 01:27:17 +02:00
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory . get_node ( disconnect = disconnects , may_reconnect = True ,
2020-03-10 19:31:24 +01:00
feerates = ( 15000 , 15000 , 15000 , 3750 ) )
2018-08-23 01:27:24 +02:00
# We match l2's later feerate, so we agree on same closing tx for simplicity.
l2 = node_factory . get_node ( may_reconnect = True ,
2020-03-10 19:31:24 +01:00
feerates = ( 14000 , 15000 , 14000 , 3750 ) )
2018-08-03 17:29:38 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-10-15 20:10:31 +02:00
chan , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
2020-04-09 07:00:06 +02:00
# Make an HTLC just to get us to do feechanges.
l1 . pay ( l2 , 1000 )
2018-08-03 17:29:38 +02:00
# Make l1 send out feechange; triggers disconnect/reconnect.
2018-08-23 01:27:24 +02:00
# (Note: < 10% change, so no smoothing here!)
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 14000 , 14000 , 14000 , 3750 ) )
2018-08-03 17:29:38 +02:00
l1 . daemon . wait_for_log ( ' Setting REMOTE feerate to 14000 ' )
l2 . daemon . wait_for_log ( ' Setting LOCAL feerate to 14000 ' )
2018-10-26 07:49:53 +02:00
l1 . daemon . wait_for_log ( r ' dev_disconnect: \ +WIRE_COMMITMENT_SIGNED ' )
2018-08-03 17:29:38 +02:00
# Wait for reconnect....
2019-12-12 15:07:53 +01:00
l1 . daemon . wait_for_log ( ' Feerate:.*LOCAL now 14000 ' )
2018-08-03 17:29:38 +02:00
l1 . pay ( l2 , 200000000 )
l2 . pay ( l1 , 100000000 )
# They should both have gotten commits with correct feerate.
assert l1 . daemon . is_in_log ( ' got commitsig [0-9]*: feerate 14000 ' )
assert l2 . daemon . is_in_log ( ' got commitsig [0-9]*: feerate 14000 ' )
# Now shutdown cleanly.
l1 . rpc . close ( chan )
# And should put closing into mempool.
2018-09-19 06:06:07 +02:00
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
l2 . wait_for_channel_onchain ( l1 . info [ ' id ' ] )
2018-08-03 17:29:38 +02:00
bitcoind . generate_block ( 1 )
l1 . daemon . wait_for_log ( ' to ONCHAIN ' )
l2 . daemon . wait_for_log ( ' to ONCHAIN ' )
bitcoind . generate_block ( 99 )
l1 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
l2 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " Too slow without --dev-bitcoind-poll " )
2018-08-03 17:29:38 +02:00
def test_multiple_channels ( node_factory ) :
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( )
for i in range ( 3 ) :
# FIXME: we shouldn't disconnect on close?
ret = l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
assert ret [ ' id ' ] == l2 . info [ ' id ' ]
2020-11-30 22:48:39 +01:00
l1 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
l2 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
2020-10-15 20:10:31 +02:00
chan , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
l1 . rpc . close ( chan )
2021-01-20 18:56:14 +01:00
# If we don't wait for l2 to make the transition we can end up
# attempting to re-estabilishing the channel
l2 . daemon . wait_for_log (
r ' State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE '
)
2018-08-03 17:29:38 +02:00
channels = only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ]
assert len ( channels ) == 3
# Most in state ONCHAIN, last is CLOSINGD_COMPLETE
for i in range ( len ( channels ) - 1 ) :
assert channels [ i ] [ ' state ' ] == ' ONCHAIN '
assert channels [ - 1 ] [ ' state ' ] == ' CLOSINGD_COMPLETE '
2021-04-26 21:58:58 +02:00
@pytest.mark.developer
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_forget_channel ( node_factory ) :
l1 = node_factory . get_node ( )
l2 = node_factory . get_node ( )
l1 . fundwallet ( 10 * * 6 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 5 )
assert len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 1
# This should fail, the funding tx is in the mempool and may confirm
with pytest . raises ( RpcError , match = r ' Cowardly refusing to forget channel ' ) :
l1 . rpc . dev_forget_channel ( l2 . info [ ' id ' ] )
assert len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 1
# Forcing should work
l1 . rpc . dev_forget_channel ( l2 . info [ ' id ' ] , True )
assert len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 0
# And restarting should keep that peer forgotten
l1 . restart ( )
assert len ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) == 0
2019-06-22 15:37:12 +02:00
# The entry in the channels table should still be there
assert l1 . db_query ( " SELECT count(*) as c FROM channels; " ) [ 0 ] [ ' c ' ] == 1
assert l2 . db_query ( " SELECT count(*) as c FROM channels; " ) [ 0 ] [ ' c ' ] == 1
2018-08-03 17:29:38 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_peerinfo ( node_factory , bitcoind ) :
2019-08-08 08:48:44 +02:00
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = False , opts = { ' may_reconnect ' : True } )
2020-12-18 17:19:54 +01:00
if l1 . config ( ' experimental-dual-fund ' ) :
2021-05-06 21:11:11 +02:00
lfeatures = expected_peer_features ( extra = [ 21 , 29 ] )
nfeatures = expected_node_features ( extra = [ 21 , 29 ] )
2020-12-18 17:19:54 +01:00
else :
lfeatures = expected_peer_features ( )
nfeatures = expected_node_features ( )
2018-08-03 17:29:38 +02:00
# Gossiping but no node announcement yet
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ]
assert len ( l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' channels ' ] ) == 0
2019-10-11 04:52:04 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' features ' ] == lfeatures
2018-08-03 17:29:38 +02:00
# Fund a channel to force a node announcement
2020-10-15 20:10:31 +02:00
chan , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-03 17:29:38 +02:00
# Now proceed to funding-depth and do a full gossip round
bitcoind . generate_block ( 5 )
l1 . daemon . wait_for_logs ( [ ' Received node_announcement for node ' + l2 . info [ ' id ' ] ] )
l2 . daemon . wait_for_logs ( [ ' Received node_announcement for node ' + l1 . info [ ' id ' ] ] )
2019-10-11 04:52:04 +02:00
# Should have announced the same features as told to peer.
2018-08-03 17:29:38 +02:00
nodes1 = l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ]
nodes2 = l2 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ]
peer1 = l1 . rpc . getpeer ( l2 . info [ ' id ' ] )
peer2 = l2 . rpc . getpeer ( l1 . info [ ' id ' ] )
2020-04-28 02:33:03 +02:00
# peer features != to node features now because of keysend, which adds a node feature
assert only_one ( nodes1 ) [ ' features ' ] == nfeatures
assert only_one ( nodes2 ) [ ' features ' ] == nfeatures
assert peer1 [ ' features ' ] == lfeatures
assert peer2 [ ' features ' ] == lfeatures
2018-08-17 06:14:39 +02:00
# If it reconnects after db load, it should know features.
l1 . restart ( )
wait_for ( lambda : l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ] )
wait_for ( lambda : l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' connected ' ] )
2019-10-11 04:52:04 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' features ' ] == lfeatures
assert l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' features ' ] == lfeatures
2018-08-17 06:14:39 +02:00
2018-08-03 17:29:38 +02:00
# Close the channel to forget the peer
2019-08-08 08:48:44 +02:00
l1 . rpc . close ( chan )
2018-08-03 17:29:38 +02:00
2018-09-28 05:24:14 +02:00
wait_for ( lambda : not only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' connected ' ] )
wait_for ( lambda : not only_one ( l2 . rpc . listpeers ( l1 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' connected ' ] )
2020-05-04 03:37:41 +02:00
# Make sure close tx hits mempool before we mine blocks.
bitcoind . generate_block ( 100 , wait_for_mempool = 1 )
2019-05-22 21:36:11 +02:00
l1 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
l2 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
2018-08-03 17:29:38 +02:00
# The only channel was closed, everybody should have forgotten the nodes
assert l1 . rpc . listnodes ( ) [ ' nodes ' ] == [ ]
assert l2 . rpc . listnodes ( ) [ ' nodes ' ] == [ ]
def test_disconnectpeer ( node_factory , bitcoind ) :
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts = { ' may_reconnect ' : False } )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
# Gossiping
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ]
assert len ( l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' channels ' ] ) == 0
assert l1 . rpc . getpeer ( l3 . info [ ' id ' ] ) [ ' connected ' ]
assert len ( l1 . rpc . getpeer ( l3 . info [ ' id ' ] ) [ ' channels ' ] ) == 0
2018-08-09 02:25:19 +02:00
wait_for ( lambda : l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) is not None )
2018-08-03 17:29:38 +02:00
# Disconnect l2 from l1
l1 . rpc . disconnect ( l2 . info [ ' id ' ] )
# Make sure listpeers no longer returns the disconnected node
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) is None
2018-08-09 02:25:19 +02:00
wait_for ( lambda : l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) is None )
2018-08-03 17:29:38 +02:00
# Make sure you cannot disconnect after disconnecting
2022-03-22 21:27:29 +01:00
with pytest . raises ( RpcError , match = r ' Unknown peer ' ) :
2018-08-03 17:29:38 +02:00
l1 . rpc . disconnect ( l2 . info [ ' id ' ] )
2022-03-22 21:27:29 +01:00
with pytest . raises ( RpcError , match = r ' Unknown peer ' ) :
2018-08-03 17:29:38 +02:00
l2 . rpc . disconnect ( l1 . info [ ' id ' ] )
# Fund channel l1 -> l3
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l3 , 10 * * 6 )
2022-01-30 04:37:23 +01:00
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2018-08-03 17:29:38 +02:00
# disconnecting a non gossiping peer results in error
2022-03-22 23:59:20 +01:00
with pytest . raises ( RpcError , match = r ' Peer has \ (at least one \ ) channel in state CHANNELD_NORMAL ' ) :
2018-08-03 17:29:38 +02:00
l1 . rpc . disconnect ( l3 . info [ ' id ' ] )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs --dev-max-funding-unconfirmed-blocks " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-03 17:29:38 +02:00
def test_fundee_forget_funding_tx_unconfirmed ( node_factory , bitcoind ) :
""" Test that fundee will forget the channel if
the funding tx has been unconfirmed for too long .
"""
# Keep this low (default is 2016), since everything
# is much slower in VALGRIND mode and wait_for_log
# could time out before lightningd processes all the
# blocks.
blocks = 200
2019-09-09 18:11:24 +02:00
# opener
2018-08-22 02:13:57 +02:00
l1 = node_factory . get_node ( )
2019-09-09 18:11:24 +02:00
# peer
2018-08-03 17:29:38 +02:00
l2 = node_factory . get_node ( options = { " dev-max-funding-unconfirmed-blocks " : blocks } )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2019-09-09 18:11:24 +02:00
# Give opener some funds.
2018-08-03 17:29:38 +02:00
l1 . fundwallet ( 10 * * 7 )
# Let blocks settle.
time . sleep ( 1 )
2018-09-05 10:43:32 +02:00
def mock_sendrawtransaction ( r ) :
2019-01-23 02:36:50 +01:00
return { ' id ' : r [ ' id ' ] , ' error ' : { ' code ' : 100 , ' message ' : ' sendrawtransaction disabled ' } }
2018-09-05 10:43:32 +02:00
2019-09-09 18:11:24 +02:00
# Prevent opener from broadcasting funding tx (any tx really).
2018-09-05 10:43:32 +02:00
l1 . daemon . rpcproxy . mock_rpc ( ' sendrawtransaction ' , mock_sendrawtransaction )
2018-08-03 17:29:38 +02:00
# Fund the channel.
2019-09-09 18:11:24 +02:00
# The process will complete, but opener will be unable
2018-08-03 17:29:38 +02:00
# to broadcast and confirm funding tx.
2019-01-16 14:24:30 +01:00
with pytest . raises ( RpcError , match = r ' sendrawtransaction disabled ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 )
2018-09-05 10:43:32 +02:00
2018-08-03 17:29:38 +02:00
# Generate blocks until unconfirmed.
bitcoind . generate_block ( blocks )
# fundee will forget channel!
l2 . daemon . wait_for_log ( ' Forgetting channel: It has been {} blocks ' . format ( blocks ) )
2018-09-05 10:43:32 +02:00
2018-08-03 17:29:38 +02:00
# fundee will also forget and disconnect from peer.
assert len ( l2 . rpc . listpeers ( l1 . info [ ' id ' ] ) [ ' peers ' ] ) == 0
2018-08-22 02:09:56 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev_fail " )
2018-08-23 01:27:25 +02:00
def test_no_fee_estimate ( node_factory , bitcoind , executor ) :
2021-03-01 03:32:05 +01:00
l1 = node_factory . get_node ( start = False , options = { ' dev-no-fake-fees ' : True } )
2018-09-05 01:01:50 +02:00
# Fail any fee estimation requests until we allow them further down
l1 . daemon . rpcproxy . mock_rpc ( ' estimatesmartfee ' , {
' error ' : { " errors " : [ " Insufficient data or no feerate found " ] , " blocks " : 0 }
} )
2018-08-23 01:27:25 +02:00
l1 . start ( )
l2 = node_factory . get_node ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Can't fund a channel.
l1 . fundwallet ( 10 * * 7 )
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 )
# Can't withdraw either.
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
2019-03-04 04:13:49 +01:00
l1 . rpc . withdraw ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , ' all ' )
2018-08-23 01:27:25 +02:00
2018-08-28 22:46:34 +02:00
# Can't use feerate names, either.
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
2019-03-04 04:13:49 +01:00
l1 . rpc . withdraw ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , ' all ' , ' urgent ' )
2021-02-22 20:31:26 +01:00
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
2019-03-04 04:13:49 +01:00
l1 . rpc . withdraw ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , ' all ' , ' normal ' )
2021-02-22 20:31:26 +01:00
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
2019-03-04 04:13:49 +01:00
l1 . rpc . withdraw ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , ' all ' , ' slow ' )
2018-08-28 22:46:34 +02:00
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 , ' urgent ' )
2021-02-22 20:31:26 +01:00
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
2018-08-28 22:46:34 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 , ' normal ' )
2021-02-22 20:31:26 +01:00
with pytest . raises ( RpcError , match = r ' Cannot estimate fees ' ) :
2018-08-28 22:46:34 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 , ' slow ' )
2018-08-27 07:13:57 +02:00
# Can with manual feerate.
2019-03-04 04:13:49 +01:00
l1 . rpc . withdraw ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , 10000 , ' 1500perkb ' )
2019-02-21 21:19:14 +01:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 , ' 2000perkw ' , minconf = 0 )
2018-08-27 07:13:57 +02:00
# Make sure we clean up cahnnel for later attempt.
l1 . daemon . wait_for_log ( ' sendrawtx exit 0 ' )
l1 . rpc . dev_fail ( l2 . info [ ' id ' ] )
2018-08-29 03:29:50 +02:00
l1 . daemon . wait_for_log ( ' Failing due to dev-fail command ' )
2018-09-19 06:06:07 +02:00
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
2018-08-27 07:13:57 +02:00
bitcoind . generate_block ( 6 )
wait_for ( lambda : only_one ( l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' channels ' ] ) [ ' state ' ] == ' ONCHAIN ' )
wait_for ( lambda : only_one ( l2 . rpc . getpeer ( l1 . info [ ' id ' ] ) [ ' channels ' ] ) [ ' state ' ] == ' ONCHAIN ' )
2018-08-23 01:27:25 +02:00
# But can accept incoming connections.
2018-08-27 07:13:57 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l2 . fundchannel ( l1 , 10 * * 6 )
2018-08-23 01:27:25 +02:00
# Can do HTLCs.
l2 . pay ( l1 , 10 * * 5 )
# Can do mutual close.
l1 . rpc . close ( l2 . info [ ' id ' ] )
2019-05-31 16:38:58 +02:00
wait_for ( lambda : len ( bitcoind . rpc . getrawmempool ( ) ) > 0 )
2018-08-23 01:27:25 +02:00
bitcoind . generate_block ( 100 )
2021-02-11 06:09:35 +01:00
sync_blockheight ( bitcoind , [ l1 , l2 ] )
2018-08-23 01:27:25 +02:00
# Can do unilateral close.
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
2020-09-24 14:06:36 +02:00
l2 . fundchannel ( l1 , 10 * * 6 )
2018-08-23 01:27:25 +02:00
l2 . pay ( l1 , 10 * * 9 / / 2 )
l1 . rpc . dev_fail ( l2 . info [ ' id ' ] )
2018-08-29 03:29:50 +02:00
l1 . daemon . wait_for_log ( ' Failing due to dev-fail command ' )
2018-09-19 06:06:07 +02:00
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
2018-08-23 01:27:25 +02:00
bitcoind . generate_block ( 5 )
2018-09-19 06:06:07 +02:00
wait_for ( lambda : len ( bitcoind . rpc . getrawmempool ( ) ) > 0 )
2018-08-23 01:27:25 +02:00
bitcoind . generate_block ( 100 )
2021-02-11 06:09:35 +01:00
sync_blockheight ( bitcoind , [ l1 , l2 ] )
2018-08-23 01:27:25 +02:00
2018-08-29 03:29:50 +02:00
# Start estimatesmartfee.
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 15000 , 11000 , 7500 , 3750 ) , True )
2018-08-23 01:27:25 +02:00
2018-08-28 22:46:34 +02:00
# Can now fund a channel (as a test, use slow feerate).
2018-08-23 01:27:25 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-02-22 20:31:26 +01:00
sync_blockheight ( bitcoind , [ l1 ] )
2018-08-28 22:46:34 +02:00
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 6 , ' slow ' )
2018-08-23 01:27:25 +02:00
2019-09-14 20:02:42 +02:00
# Can withdraw (use urgent feerate). `minconf` may be needed depending on
# the previous `fundchannel` selecting all confirmed outputs.
l1 . rpc . withdraw ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , ' all ' , ' urgent ' , minconf = 0 )
2018-08-23 01:27:25 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs --dev-disconnect " )
2019-09-09 18:11:24 +02:00
def test_opener_feerate_reconnect ( node_factory , bitcoind ) :
2018-08-22 02:09:56 +02:00
# l1 updates fees, then reconnect so l2 retransmits commitment_signed.
2020-04-09 07:00:06 +02:00
disconnects = [ ' -WIRE_COMMITMENT_SIGNED*3 ' ]
2018-08-23 13:45:33 +02:00
l1 = node_factory . get_node ( may_reconnect = True ,
2020-03-10 19:31:24 +01:00
feerates = ( 7500 , 7500 , 7500 , 7500 ) )
2018-08-22 02:09:56 +02:00
l2 = node_factory . get_node ( disconnect = disconnects , may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-22 02:09:56 +02:00
2020-04-09 07:00:06 +02:00
# Need a payment otherwise it won't update fee.
l1 . pay ( l2 , 10 * * 9 / / 2 )
2018-08-23 13:45:33 +02:00
# create fee update, causing disconnect.
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 15000 , 11000 , 7500 , 3750 ) )
2018-10-26 07:49:53 +02:00
l2 . daemon . wait_for_log ( r ' dev_disconnect: \ -WIRE_COMMITMENT_SIGNED ' )
2018-08-22 02:09:56 +02:00
# Wait until they reconnect.
l1 . daemon . wait_for_log ( ' Peer transient failure in CHANNELD_NORMAL ' )
2022-03-23 00:01:36 +01:00
l1 . daemon . wait_for_log ( ' peer_disconnect_done ' )
2018-08-22 02:09:56 +02:00
wait_for ( lambda : l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ] )
# Should work normally.
l1 . pay ( l2 , 200000000 )
2018-08-17 06:16:34 +02:00
2019-09-09 18:11:24 +02:00
def test_opener_simple_reconnect ( node_factory , bitcoind ) :
2018-12-10 02:03:42 +01:00
""" Sanity check that reconnection works with completely unused channels """
# Set fees even so it doesn't send any commitments.
l1 = node_factory . get_node ( may_reconnect = True ,
2020-03-10 19:31:24 +01:00
feerates = ( 7500 , 7500 , 7500 , 7500 ) )
2018-12-10 02:03:42 +01:00
l2 = node_factory . get_node ( may_reconnect = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-12-10 02:03:42 +01:00
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , True )
# Wait until they reconnect.
wait_for ( lambda : l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ] )
# Should work normally.
l1 . pay ( l2 , 200000000 )
2019-09-12 22:49:42 +02:00
@unittest.skipIf ( os . getenv ( ' TEST_DB_PROVIDER ' , ' sqlite3 ' ) != ' sqlite3 ' , " sqlite3-specific DB rollback " )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs LIGHTNINGD_DEV_LOG_IO " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2018-08-17 07:06:36 +02:00
def test_dataloss_protection ( node_factory , bitcoind ) :
2019-11-18 01:27:17 +01:00
l1 = node_factory . get_node ( may_reconnect = True , options = { ' log-level ' : ' io ' } ,
2020-03-10 19:31:24 +01:00
feerates = ( 7500 , 7500 , 7500 , 7500 ) )
2019-11-18 01:27:17 +01:00
l2 = node_factory . get_node ( may_reconnect = True , options = { ' log-level ' : ' io ' } ,
2020-03-10 19:31:24 +01:00
feerates = ( 7500 , 7500 , 7500 , 7500 ) , allow_broken_log = True )
2018-08-17 06:16:34 +02:00
2020-04-28 02:33:03 +02:00
lf = expected_peer_features ( )
2018-08-17 06:16:34 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# l1 should send out WIRE_INIT (0010)
2019-11-23 01:19:23 +01:00
l1 . daemon . wait_for_log ( r " \ [OUT \ ] 0010.* "
2019-09-10 04:21:27 +02:00
# lflen
+ format ( len ( lf ) / / 2 , ' 04x ' )
2019-08-28 06:09:04 +02:00
+ lf )
2018-08-17 06:16:34 +02:00
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 10 * * 6 )
2018-08-17 07:06:36 +02:00
l2 . stop ( )
# Save copy of the db.
2019-11-23 02:46:40 +01:00
dbpath = os . path . join ( l2 . daemon . lightning_dir , TEST_NETWORK , " lightningd.sqlite3 " )
2018-08-17 07:06:36 +02:00
orig_db = open ( dbpath , " rb " ) . read ( )
l2 . start ( )
2018-08-17 06:16:34 +02:00
2019-09-10 05:27:51 +02:00
# l1 should have sent WIRE_CHANNEL_REESTABLISH with extra fields.
2018-10-26 07:49:53 +02:00
l1 . daemon . wait_for_log ( r " \ [OUT \ ] 0088 "
2018-08-17 06:16:34 +02:00
# channel_id
" [0-9a-f] {64} "
# next_local_commitment_number
2019-01-18 02:21:19 +01:00
" 0000000000000001 "
2018-08-17 06:16:34 +02:00
# next_remote_revocation_number
2019-01-18 02:21:19 +01:00
" 0000000000000000 "
2018-08-22 04:33:32 +02:00
# your_last_per_commitment_secret (funding_depth may
# trigger a fee-update and commit, hence this may not
# be zero)
" [0-9a-f] {64} "
2019-09-10 05:28:12 +02:00
# my_current_per_commitment_point
2020-10-06 11:57:51 +02:00
" 0[23][0-9a-f] {64} " )
2019-09-10 05:27:51 +02:00
2018-08-17 06:16:34 +02:00
# After an htlc, we should get different results (two more commits)
l1 . pay ( l2 , 200000000 )
# Make sure both sides consider it completely settled (has received both
# REVOKE_AND_ACK)
2018-10-26 07:49:53 +02:00
l1 . daemon . wait_for_logs ( [ r " \ [IN \ ] 0085 " ] * 2 )
l2 . daemon . wait_for_logs ( [ r " \ [IN \ ] 0085 " ] * 2 )
2018-08-17 06:16:34 +02:00
l2 . restart ( )
2019-09-10 05:27:51 +02:00
# l1 should have sent WIRE_CHANNEL_REESTABLISH with extra fields.
2018-10-26 07:49:53 +02:00
l1 . daemon . wait_for_log ( r " \ [OUT \ ] 0088 "
2018-08-17 06:16:34 +02:00
# channel_id
" [0-9a-f] {64} "
# next_local_commitment_number
2018-08-22 04:33:32 +02:00
" 000000000000000[1-9] "
2018-08-17 06:16:34 +02:00
# next_remote_revocation_number
2018-08-22 04:33:32 +02:00
" 000000000000000[1-9] "
2018-08-17 06:16:34 +02:00
# your_last_per_commitment_secret
" [0-9a-f] {64} "
2019-09-10 05:28:12 +02:00
# my_current_per_commitment_point
2020-10-06 11:57:51 +02:00
" 0[23][0-9a-f] {64} " )
2018-08-17 07:06:36 +02:00
# Now, move l2 back in time.
l2 . stop ( )
# Overwrite with OLD db.
open ( dbpath , " wb " ) . write ( orig_db )
l2 . start ( )
# l2 should freak out!
2018-08-23 03:08:48 +02:00
l2 . daemon . wait_for_log ( " Peer permanent failure in CHANNELD_NORMAL: Awaiting unilateral close " )
2018-08-17 07:06:36 +02:00
# l1 should drop to chain.
2018-09-19 06:06:07 +02:00
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
2018-08-17 07:06:36 +02:00
# l2 must NOT drop to chain.
l2 . daemon . wait_for_log ( " Cannot broadcast our commitment tx: they have a future one " )
2020-12-11 23:17:21 +01:00
assert not l2 . daemon . is_in_log ( ' sendrawtx exit 0 ' ,
start = l2 . daemon . logsearch_start )
2018-08-17 07:06:36 +02:00
2018-08-17 07:06:36 +02:00
closetxid = only_one ( bitcoind . rpc . getrawmempool ( False ) )
# l2 should still recover something!
bitcoind . generate_block ( 1 )
2018-08-22 04:33:32 +02:00
l2 . daemon . wait_for_log ( " ERROR: Unknown commitment #[0-9], recovering our funds! " )
2018-08-17 07:06:36 +02:00
# Restarting l2, and it should remember from db.
l2 . restart ( )
2018-08-22 04:33:32 +02:00
l2 . daemon . wait_for_log ( " ERROR: Unknown commitment #[0-9], recovering our funds! " )
2018-08-17 07:06:36 +02:00
bitcoind . generate_block ( 100 )
2019-05-22 21:36:11 +02:00
l2 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
2018-08-17 07:06:36 +02:00
# l2 should have it in wallet.
assert ( closetxid , " confirmed " ) in set ( [ ( o [ ' txid ' ] , o [ ' status ' ] ) for o in l2 . rpc . listfunds ( ) [ ' outputs ' ] ] )
2018-09-04 07:18:55 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev_disconnect " )
2018-09-04 07:18:55 +02:00
def test_restart_multi_htlc_rexmit ( node_factory , bitcoind , executor ) :
# l1 disables commit timer once we send first htlc, dies on commit
2021-12-29 04:26:40 +01:00
l1 , l2 = node_factory . line_graph ( 2 , opts = [ { ' disconnect ' : [ ' -WIRE_COMMITMENT_SIGNED ' ] ,
' may_reconnect ' : True ,
' dev-disable-commit-after ' : 0 } ,
2018-09-04 07:18:55 +02:00
{ ' may_reconnect ' : True } ] )
executor . submit ( l1 . pay , l2 , 20000 )
executor . submit ( l1 . pay , l2 , 30000 )
l1 . daemon . wait_for_logs ( [ ' peer_out WIRE_UPDATE_ADD_HTLC ' ] * 2 )
l1 . rpc . dev_reenable_commit ( l2 . info [ ' id ' ] )
l1 . daemon . wait_for_log ( ' dev_disconnect: -WIRE_COMMITMENT_SIGNED ' )
# This will make it reconnect
l1 . stop ( )
# Clear the disconnect so we can proceed normally
del l1 . daemon . opts [ ' dev-disconnect ' ]
l1 . start ( )
2019-02-23 06:00:04 +01:00
# Payments will fail due to restart, but we can see results in listsendpays.
print ( l1 . rpc . listsendpays ( ) )
wait_for ( lambda : [ p [ ' status ' ] for p in l1 . rpc . listsendpays ( ) [ ' payments ' ] ] == [ ' complete ' , ' complete ' ] )
2018-10-09 10:46:52 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs dev_disconnect " )
2018-10-09 10:46:52 +02:00
def test_fulfill_incoming_first ( node_factory , bitcoind ) :
""" Test that we handle the case where we completely resolve incoming htlc
before fulfilled outgoing htlc """
# We agree on fee change first, then add HTLC, then remove; stop after remove.
disconnects = [ ' +WIRE_COMMITMENT_SIGNED*3 ' ]
# We manually reconnect l2 & l3, after 100 blocks; hence allowing manual
2018-10-10 00:39:05 +02:00
# reconnect, but disabling auto connect, and massive cltv so 2/3 doesn't
2018-10-09 10:46:52 +02:00
# time out.
2020-07-13 17:38:12 +02:00
l1 , l2 , l3 = node_factory . line_graph ( 3 , opts = [ { ' disable-mpp ' : None } ,
2018-10-09 10:46:52 +02:00
{ ' may_reconnect ' : True ,
' dev-no-reconnect ' : None } ,
{ ' may_reconnect ' : True ,
' dev-no-reconnect ' : None ,
' disconnect ' : disconnects ,
' cltv-final ' : 200 } ] ,
2018-12-08 00:27:14 +01:00
wait_for_announce = True )
2018-10-09 10:46:52 +02:00
# This succeeds.
l1 . rpc . pay ( l3 . rpc . invoice ( 200000000 , ' test_fulfill_incoming_first ' , ' desc ' ) [ ' bolt11 ' ] )
# l1 can shutdown, fine.
l1 . rpc . close ( l2 . info [ ' id ' ] )
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
bitcoind . generate_block ( 100 )
l2 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
# Now, l2 should restore from DB fine, even though outgoing HTLC no longer
# has an incoming.
l2 . restart ( )
2018-10-09 10:49:52 +02:00
# Manually reconnect l2->l3.
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
# Fulfill should be retransmitted OK (ignored result).
l2 . rpc . close ( l3 . info [ ' id ' ] )
l2 . wait_for_channel_onchain ( l3 . info [ ' id ' ] )
bitcoind . generate_block ( 100 )
l2 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
l3 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
2018-10-09 10:53:52 +02:00
2021-12-28 00:26:09 +01:00
@pytest.mark.skip ( ' needs blackhold support ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " need dev-disconnect " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2019-02-21 02:22:47 +01:00
def test_fail_unconfirmed ( node_factory , bitcoind , executor ) :
""" Test that if we crash with an unconfirmed connection to a known
peer , we don ' t have a dangling peer in db " " "
2020-12-17 22:31:09 +01:00
if EXPERIMENTAL_DUAL_FUND :
disconnect = [ ' =WIRE_OPEN_CHANNEL2 ' ]
else :
disconnect = [ ' =WIRE_OPEN_CHANNEL ' ]
2019-02-21 02:22:47 +01:00
# = is a NOOP disconnect, but sets up file.
2020-12-17 22:31:09 +01:00
l1 = node_factory . get_node ( disconnect = disconnect )
2019-02-21 02:22:47 +01:00
l2 = node_factory . get_node ( )
# First one, we close by mutual agreement.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 200000 , wait_for_active = True )
2019-02-21 02:22:47 +01:00
l1 . rpc . close ( l2 . info [ ' id ' ] )
# Make sure it's closed
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
bitcoind . generate_block ( 1 )
l1 . daemon . wait_for_log ( ' State changed from CLOSINGD_COMPLETE to FUNDING_SPEND_SEEN ' )
l1 . stop ( )
# Mangle disconnect file so this time it blackholes....
with open ( l1 . daemon . disconnect_file , " w " ) as f :
2020-12-18 17:19:28 +01:00
if EXPERIMENTAL_DUAL_FUND :
f . write ( " 0WIRE_OPEN_CHANNEL2 \n " )
else :
f . write ( " 0WIRE_OPEN_CHANNEL \n " )
2019-02-21 02:22:47 +01:00
l1 . start ( )
# Now we establish a new channel, which gets stuck.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 10 * * 7 )
executor . submit ( l1 . rpc . fundchannel , l2 . info [ ' id ' ] , 100000 )
l1 . daemon . wait_for_log ( " dev_disconnect " )
# Now complete old channel.
bitcoind . generate_block ( 100 )
l1 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
# And crash l1, which is stuck.
l1 . daemon . kill ( )
# Now, restart and see if it can connect OK.
del l1 . daemon . opts [ ' dev-disconnect ' ]
l1 . start ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 200000 , wait_for_active = True )
2019-02-21 21:19:14 +01:00
2021-12-28 00:26:09 +01:00
@pytest.mark.skip ( ' needs blackhold support ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " need dev-disconnect " )
2021-03-12 01:19:40 +01:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , ' elementsd doesnt yet support PSBT features we need ' )
2021-04-26 21:23:40 +02:00
@pytest.mark.openchannel ( ' v2 ' )
2020-12-11 23:31:01 +01:00
def test_fail_unconfirmed_openchannel2 ( node_factory , bitcoind , executor ) :
""" Test that if we crash with an unconfirmed connection to a known
peer , we don ' t have a dangling peer in db " " "
# = is a NOOP disconnect, but sets up file.
2021-04-26 21:23:40 +02:00
l1 = node_factory . get_node ( disconnect = [ ' =WIRE_OPEN_CHANNEL2 ' ] )
l2 = node_factory . get_node ( )
2020-12-11 23:31:01 +01:00
# First one, we close by mutual agreement.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundchannel ( l2 , 200000 , wait_for_active = True )
l1 . rpc . close ( l2 . info [ ' id ' ] )
# Make sure it's closed
l1 . wait_for_channel_onchain ( l2 . info [ ' id ' ] )
bitcoind . generate_block ( 1 )
l1 . daemon . wait_for_log ( ' State changed from CLOSINGD_COMPLETE to FUNDING_SPEND_SEEN ' )
l1 . stop ( )
# Mangle disconnect file so this time it blackholes....
with open ( l1 . daemon . disconnect_file , " w " ) as f :
f . write ( " 0WIRE_OPEN_CHANNEL2 \n " )
l1 . start ( )
# Now we establish a new channel, which gets stuck.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 10 * * 7 )
executor . submit ( l1 . rpc . fundchannel , l2 . info [ ' id ' ] , 100000 )
l1 . daemon . wait_for_log ( " dev_disconnect " )
# Now complete old channel.
bitcoind . generate_block ( 100 )
l1 . daemon . wait_for_log ( ' onchaind complete, forgetting peer ' )
# And crash l1, which is stuck.
l1 . daemon . kill ( )
# Now, restart and see if it can connect OK.
del l1 . daemon . opts [ ' dev-disconnect ' ]
l1 . start ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundchannel ( l2 , 200000 , wait_for_active = True )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2019-02-21 21:19:14 +01:00
def test_change_chaining ( node_factory , bitcoind ) :
""" Test change chaining of unconfirmed fundings
Change chaining is the case where one transaction is broadcast but not
confirmed yet and we already build a followup on top of the change . If the
first transaction doesn ' t confirm we may end up creating a series of
unconfirmable transactions . This is why we generally disallow chaining .
"""
l1 , l2 , l3 = node_factory . get_nodes ( 3 )
l1 . fundwallet ( 10 * * 8 ) # This will create an output with 1 confirmation
# Now fund a channel from l1 to l2, that should succeed, with minconf=1 but not before
l1 . connect ( l2 )
with pytest . raises ( RpcError ) :
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 7 , minconf = 2 )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 7 ) # Defaults to minconf=1
# We don't have confirmed outputs anymore, so this should fail without minconf=0
l1 . connect ( l3 )
with pytest . raises ( RpcError ) :
l1 . rpc . fundchannel ( l3 . info [ ' id ' ] , 10 * * 7 ) # Defaults to minconf=1
l1 . rpc . fundchannel ( l3 . info [ ' id ' ] , 10 * * 7 , minconf = 0 )
2019-06-08 04:57:25 +02:00
2022-01-26 03:12:34 +01:00
@unittest.skipIf ( TEST_NETWORK == ' liquid-regtest ' , " Fees on elements are different " )
2019-09-18 21:55:41 +02:00
def test_feerate_spam ( node_factory , chainparams ) :
2019-06-08 04:57:25 +02:00
l1 , l2 = node_factory . line_graph ( 2 )
2019-09-09 18:11:24 +02:00
# We constrain the value the opener has at its disposal so we get the
2020-02-27 17:07:18 +01:00
# REMOTE feerate we are looking for below. This may be fragile and depends
# on the transactions we generate.
2022-01-26 03:12:34 +01:00
slack = 45000000
2020-02-27 17:07:18 +01:00
2019-06-08 04:57:25 +02:00
# Pay almost everything to l2.
2019-09-18 21:55:41 +02:00
l1 . pay ( l2 , 10 * * 9 - slack )
2019-06-08 04:57:25 +02:00
# It will send this once (may have happened before line_graph's wait)
2021-05-04 12:36:11 +02:00
wait_for ( lambda : l1 . daemon . is_in_log ( ' Setting REMOTE feerate to 11000 ' ) )
2019-06-08 04:57:25 +02:00
wait_for ( lambda : l1 . daemon . is_in_log ( ' peer_out WIRE_UPDATE_FEE ' ) )
# Now change feerates to something l1 can't afford.
2020-03-10 19:31:24 +01:00
l1 . set_feerates ( ( 100000 , 100000 , 100000 , 100000 ) )
2019-06-08 04:57:25 +02:00
2020-08-14 03:30:42 +02:00
# It will raise as far as it can (48000) (30000 for option_anchor_outputs)
maxfeerate = 30000 if EXPERIMENTAL_FEATURES else 48000
l1 . daemon . wait_for_log ( ' Setting REMOTE feerate to {} ' . format ( maxfeerate ) )
2019-06-08 04:57:25 +02:00
l1 . daemon . wait_for_log ( ' peer_out WIRE_UPDATE_FEE ' )
# But it won't do it again once it's at max.
with pytest . raises ( TimeoutError ) :
l1 . daemon . wait_for_log ( ' peer_out WIRE_UPDATE_FEE ' , timeout = 5 )
2019-10-28 04:33:42 +01:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " need dev-feerate " )
2019-10-28 04:33:42 +01:00
def test_feerate_stress ( node_factory , executor ) :
# Third node makes HTLC traffic less predictable.
l1 , l2 , l3 = node_factory . line_graph ( 3 , opts = { ' commit-time ' : 100 ,
' may_reconnect ' : True } )
l1 . pay ( l2 , 10 * * 9 / / 2 )
scid12 = l1 . get_channel_scid ( l2 )
scid23 = l2 . get_channel_scid ( l3 )
2022-06-20 12:22:09 +02:00
routel1l3 = [ { ' amount_msat ' : ' 10002msat ' , ' id ' : l2 . info [ ' id ' ] , ' delay ' : 11 , ' channel ' : scid12 } ,
{ ' amount_msat ' : ' 10000msat ' , ' id ' : l3 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : scid23 } ]
routel2l1 = [ { ' amount_msat ' : ' 10000msat ' , ' id ' : l1 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : scid12 } ]
2019-10-28 04:33:42 +01:00
rate = 1875
NUM_ATTEMPTS = 25
l1done = 0
l2done = 0
prev_log = 0
while l1done < NUM_ATTEMPTS and l2done < NUM_ATTEMPTS :
try :
r = random . randrange ( 6 )
if r == 5 :
l1 . rpc . sendpay ( routel1l3 , " {:064x} " . format ( l1done ) )
l1done + = 1
elif r == 4 :
l2 . rpc . sendpay ( routel2l1 , " {:064x} " . format ( l2done ) )
l2done + = 1
elif r > 0 :
l1 . rpc . call ( ' dev-feerate ' , [ l2 . info [ ' id ' ] , rate ] )
rate + = 5
else :
l2 . rpc . disconnect ( l1 . info [ ' id ' ] , True )
time . sleep ( 1 )
except RpcError :
time . sleep ( 0.01 )
assert not l1 . daemon . is_in_log ( ' Bad.*signature ' , start = prev_log )
prev_log = len ( l1 . daemon . logs )
# Make sure it's reconnected, and wait for last payment.
wait_for ( lambda : l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ] )
2020-05-04 09:32:27 +02:00
# We can get TEMPORARY_CHANNEL_FAILURE due to disconnect, too.
with pytest . raises ( RpcError , match = ' WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS|WIRE_TEMPORARY_CHANNEL_FAILURE ' ) :
2019-10-28 04:33:42 +01:00
l1 . rpc . waitsendpay ( " {:064x} " . format ( l1done - 1 ) )
2020-05-04 09:32:27 +02:00
with pytest . raises ( RpcError , match = ' WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS|WIRE_TEMPORARY_CHANNEL_FAILURE ' ) :
2019-10-28 04:33:42 +01:00
l2 . rpc . waitsendpay ( " {:064x} " . format ( l2done - 1 ) )
l1 . rpc . call ( ' dev-feerate ' , [ l2 . info [ ' id ' ] , rate - 5 ] )
assert not l1 . daemon . is_in_log ( ' Bad.*signature ' )
assert not l2 . daemon . is_in_log ( ' Bad.*signature ' )
2019-11-04 17:54:48 +01:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " need dev_disconnect " )
2020-08-07 05:14:59 +02:00
@pytest.mark.slow_test
2019-11-04 17:54:48 +01:00
def test_pay_disconnect_stress ( node_factory , executor ) :
""" Expose race in htlc restoration in channeld: 50 % c hance of failure """
2020-08-07 05:14:59 +02:00
if node_factory . valgrind :
2019-11-06 05:15:19 +01:00
NUM_RUNS = 2
else :
NUM_RUNS = 5
for i in range ( NUM_RUNS ) :
2019-11-04 17:54:48 +01:00
l1 , l2 = node_factory . line_graph ( 2 , opts = [ { ' may_reconnect ' : True } ,
{ ' may_reconnect ' : True ,
' disconnect ' : [ ' =WIRE_UPDATE_ADD_HTLC ' ,
' -WIRE_COMMITMENT_SIGNED ' ] } ] )
scid12 = l1 . get_channel_scid ( l2 )
2022-06-20 12:22:09 +02:00
routel2l1 = [ { ' amount_msat ' : ' 10000msat ' , ' id ' : l1 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : scid12 } ]
2019-11-04 17:54:48 +01:00
# Get invoice from l1 to pay.
2021-07-12 08:49:19 +02:00
inv = l1 . rpc . invoice ( 10000 , " invoice " , " invoice " )
payhash1 = inv [ ' payment_hash ' ]
2019-11-04 17:54:48 +01:00
# Start balancing payment.
fut = executor . submit ( l1 . pay , l2 , 10 * * 9 / / 2 )
# As soon as reverse payment is accepted, reconnect.
while True :
2021-07-12 08:49:19 +02:00
l2 . rpc . sendpay ( routel2l1 , payhash1 , payment_secret = inv [ ' payment_secret ' ] )
2019-11-04 17:54:48 +01:00
try :
# This will usually fail with Capacity exceeded
l2 . rpc . waitsendpay ( payhash1 , timeout = TIMEOUT )
break
except RpcError :
pass
fut . result ( )
2020-04-03 02:03:58 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2020-04-03 02:03:58 +02:00
def test_wumbo_channels ( node_factory , bitcoind ) :
l1 , l2 , l3 = node_factory . get_nodes ( 3 ,
opts = [ { ' large-channels ' : None } ,
{ ' large-channels ' : None } ,
{ } ] )
conn = l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , port = l2 . port )
2020-12-11 22:40:57 +01:00
expected_features = expected_peer_features ( wumbo_channels = True )
if l1 . config ( ' experimental-dual-fund ' ) :
expected_features = expected_peer_features ( wumbo_channels = True ,
2021-05-06 21:11:11 +02:00
extra = [ 21 , 29 ] )
2020-12-11 22:40:57 +01:00
assert conn [ ' features ' ] == expected_features
assert only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' features ' ] == expected_features
2020-04-03 02:03:58 +02:00
# Now, can we open a giant channel?
l1 . fundwallet ( 1 << 26 )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 1 << 24 )
# Get that mined, and announced.
bitcoind . generate_block ( 6 , wait_for_mempool = 1 )
# Connect l3, get gossip.
l3 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , port = l1 . port )
wait_for ( lambda : len ( l3 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) == 1 )
wait_for ( lambda : ' features ' in only_one ( l3 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) )
# Make sure channel capacity is what we expected.
assert ( [ c [ ' amount_msat ' ] for c in l3 . rpc . listchannels ( ) [ ' channels ' ] ]
== [ Millisatoshi ( str ( 1 << 24 ) + " sat " ) ] * 2 )
2020-05-19 22:46:56 +02:00
# Make sure channel features are right from channel_announcement
assert ( [ c [ ' features ' ] for c in l3 . rpc . listchannels ( ) [ ' channels ' ] ]
== [ expected_channel_features ( wumbo_channels = True ) ] * 2 )
2020-04-03 02:03:58 +02:00
# Make sure we can't open a wumbo channel if we don't agree.
with pytest . raises ( RpcError , match = ' Amount exceeded ' ) :
l1 . rpc . fundchannel ( l3 . info [ ' id ' ] , 1 << 24 )
# But we can open and announce a normal one.
l1 . rpc . fundchannel ( l3 . info [ ' id ' ] , ' all ' )
bitcoind . generate_block ( 6 , wait_for_mempool = 1 )
wait_for ( lambda : l1 . channel_state ( l3 ) == ' CHANNELD_NORMAL ' )
# Make sure l2 sees correct size.
wait_for ( lambda : [ c [ ' amount_msat ' ] for c in l2 . rpc . listchannels ( l1 . get_channel_scid ( l3 ) ) [ ' channels ' ] ]
== [ Millisatoshi ( str ( ( 1 << 24 ) - 1 ) + " sat " ) ] * 2 )
2020-04-03 02:03:58 +02:00
# Make sure 'all' works with wumbo peers.
l1 . rpc . close ( l2 . info [ ' id ' ] )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
wait_for ( lambda : l1 . channel_state ( l2 ) == ' ONCHAIN ' )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , port = l2 . port )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , ' all ' )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
wait_for ( lambda : ' CHANNELD_NORMAL ' in [ c [ ' state ' ] for c in only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ] )
# Exact amount depends on fees, but it will be wumbo!
2021-06-16 03:00:17 +02:00
amount = [ c [ ' funding ' ] [ ' local_msat ' ] for c in only_one ( l1 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] if c [ ' state ' ] == ' CHANNELD_NORMAL ' ] [ 0 ]
assert amount > Millisatoshi ( str ( ( 1 << 24 ) - 1 ) + " sat " )
2020-08-21 03:57:20 +02:00
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2020-08-21 03:57:20 +02:00
def test_channel_features ( node_factory , bitcoind ) :
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = False )
bitcoind . rpc . sendtoaddress ( l1 . rpc . newaddr ( ) [ ' bech32 ' ] , 0.1 )
bitcoind . generate_block ( 1 )
wait_for ( lambda : l1 . rpc . listfunds ( ) [ ' outputs ' ] != [ ] )
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , ' all ' )
# We should see features in unconfirmed channels.
chan = only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] )
assert ' option_static_remotekey ' in chan [ ' features ' ]
2020-12-11 22:40:57 +01:00
if EXPERIMENTAL_FEATURES or l1 . config ( ' experimental-dual-fund ' ) :
2020-08-21 03:57:20 +02:00
assert ' option_anchor_outputs ' in chan [ ' features ' ]
# l2 should agree.
assert only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' features ' ] == chan [ ' features ' ]
# Confirm it.
bitcoind . generate_block ( 1 )
wait_for ( lambda : only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' state ' ] == ' CHANNELD_NORMAL ' )
wait_for ( lambda : only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' state ' ] == ' CHANNELD_NORMAL ' )
chan = only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] )
assert ' option_static_remotekey ' in chan [ ' features ' ]
2020-12-11 22:40:57 +01:00
if EXPERIMENTAL_FEATURES or l1 . config ( ' experimental-dual-fund ' ) :
2020-08-21 03:57:20 +02:00
assert ' option_anchor_outputs ' in chan [ ' features ' ]
# l2 should agree.
assert only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' features ' ] == chan [ ' features ' ]
2020-08-25 05:23:28 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " need dev-force-features " )
2020-08-25 05:23:28 +02:00
def test_nonstatic_channel ( node_factory , bitcoind ) :
""" Smoke test for a channel without option_static_remotekey """
l1 , l2 = node_factory . line_graph ( 2 ,
opts = [ { } ,
2021-07-12 22:48:58 +02:00
# needs at least 15 to connect
# (and 9 is a dependent)
2022-02-18 16:41:05 +01:00
{ ' dev-force-features ' : ' 9,15///// ' } ] )
2020-08-25 05:23:28 +02:00
chan = only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] )
assert ' option_static_remotekey ' not in chan [ ' features ' ]
assert ' option_anchor_outputs ' not in chan [ ' features ' ]
l1 . pay ( l2 , 1000 )
l1 . rpc . close ( l2 . info [ ' id ' ] )
2020-09-11 09:03:22 +02:00
2021-12-28 00:26:09 +01:00
@pytest.mark.skip ( ' needs blackhold support ' )
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " need --dev-timeout-secs " )
2021-05-07 20:39:23 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2020-09-11 09:03:22 +02:00
def test_connection_timeout ( node_factory ) :
# l1 hears nothing back after sending INIT, should time out.
l1 , l2 = node_factory . get_nodes ( 2 ,
opts = [ { ' dev-timeout-secs ' : 1 ,
' disconnect ' : [ ' 0WIRE_INIT ' , ' 0WIRE_INIT ' ] } ,
{ } ] )
with pytest . raises ( RpcError , match = ' timed out ' ) :
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , port = l2 . port )
l1 . daemon . wait_for_log ( ' conn timed out ' )
2020-10-07 10:26:16 +02:00
with pytest . raises ( RpcError , match = r ' (reset by peer|peer closed connection) ' ) :
2020-09-11 09:03:22 +02:00
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , port = l1 . port )
l1 . daemon . wait_for_log ( ' conn timed out ' )
2020-10-14 07:41:17 +02:00
2021-04-26 21:58:58 +02:00
@pytest.mark.developer ( " needs --dev-disconnect " )
2020-10-14 07:41:17 +02:00
def test_htlc_retransmit_order ( node_factory , executor ) :
NUM_HTLCS = 10
l1 , l2 = node_factory . line_graph ( 2 ,
opts = [ { ' may_reconnect ' : True ,
' feerates ' : ( 7500 , 7500 , 7500 , 7500 ) ,
2021-12-29 04:26:40 +01:00
' disconnect ' : [ ' =WIRE_UPDATE_ADD_HTLC* ' + str ( NUM_HTLCS ) ,
' -WIRE_COMMITMENT_SIGNED ' ] ,
' dev-disable-commit-after ' : 0 } ,
2020-10-14 07:41:17 +02:00
{ ' may_reconnect ' : True } ] )
2021-07-12 08:49:19 +02:00
invoices = [ l2 . rpc . invoice ( 1000 , str ( x ) , str ( x ) ) for x in range ( NUM_HTLCS ) ]
2020-10-14 07:41:17 +02:00
routestep = {
2022-06-20 12:22:09 +02:00
' amount_msat ' : 1000 ,
2020-10-14 07:41:17 +02:00
' id ' : l2 . info [ ' id ' ] ,
' delay ' : 5 ,
' channel ' : ' 1x1x1 ' # note: can be bogus for 1-hop direct payments
}
2021-07-12 08:49:19 +02:00
for inv in invoices :
executor . submit ( l1 . rpc . sendpay , [ routestep ] , inv [ ' payment_hash ' ] , payment_secret = inv [ ' payment_secret ' ] )
2020-10-14 07:41:17 +02:00
2021-12-29 04:26:40 +01:00
l1 . daemon . wait_for_log ( ' dev_disconnect ' )
2020-10-14 07:41:17 +02:00
l1 . rpc . call ( ' dev-reenable-commit ' , [ l2 . info [ ' id ' ] ] )
l1 . daemon . wait_for_log ( ' dev_disconnect ' )
# Now reconnect.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , port = l2 . port )
2021-07-12 08:49:19 +02:00
for inv in invoices :
result = l1 . rpc . waitsendpay ( inv [ ' payment_hash ' ] )
2020-10-14 07:41:17 +02:00
assert ( result [ ' status ' ] == ' complete ' )
# If order was wrong, we'll get a LOG_BROKEN and fixtures will complain.
2020-10-28 11:46:19 +01:00
2020-12-07 12:10:34 +01:00
@unittest.skipIf ( True , " Currently failing, see tracking issue #4265 " )
2021-04-26 21:59:42 +02:00
@pytest.mark.openchannel ( ' v1 ' )
2020-10-08 09:16:30 +02:00
def test_fundchannel_start_alternate ( node_factory , executor ) :
''' Test to see what happens if two nodes start channeling to
each other alternately .
Issue #4108
'''
l1 , l2 = node_factory . get_nodes ( 2 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . rpc . fundchannel_start ( l2 . info [ ' id ' ] , 100000 )
fut = executor . submit ( l2 . rpc . fundchannel_start , l1 . info [ ' id ' ] , 100000 )
with pytest . raises ( RpcError ) :
fut . result ( 10 )
2020-12-19 20:24:56 +01:00
2021-04-26 21:59:42 +02:00
@pytest.mark.openchannel ( ' v2 ' )
2020-12-19 20:24:56 +01:00
def test_openchannel_init_alternate ( node_factory , executor ) :
''' Test to see what happens if two nodes start channeling to
each other alternately .
'''
l1 , l2 = node_factory . get_nodes ( 2 )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundwallet ( 2000000 )
l2 . fundwallet ( 2000000 )
psbt1 = l1 . rpc . fundpsbt ( ' 1000000msat ' , ' 253perkw ' , 250 ) [ ' psbt ' ]
psbt2 = l2 . rpc . fundpsbt ( ' 1000000msat ' , ' 253perkw ' , 250 ) [ ' psbt ' ]
2021-06-01 18:12:03 +02:00
init = l1 . rpc . openchannel_init ( l2 . info [ ' id ' ] , 100000 , psbt1 )
2020-12-19 20:24:56 +01:00
fut = executor . submit ( l2 . rpc . openchannel_init , l1 . info [ ' id ' ] , ' 1000000msat ' , psbt2 )
with pytest . raises ( RpcError ) :
fut . result ( 10 )
2021-05-21 02:36:12 +02:00
2021-06-01 18:12:03 +02:00
# FIXME: Clean up so it doesn't hang. Ok if these fail.
for node in [ l1 , l2 ] :
try :
node . rpc . openchannel_abort ( init [ ' channel_id ' ] )
except RpcError :
# Ignoring all errors
print ( " nothing to do " )
2021-05-21 02:36:12 +02:00
2021-06-04 07:13:47 +02:00
@unittest.skipIf ( not EXPERIMENTAL_FEATURES , " upgrade protocol not available " )
@pytest.mark.developer ( " dev-force-features required " )
def test_upgrade_statickey ( node_factory , executor ) :
""" l1 doesn ' t have option_static_remotekey, l2 offers it. """
l1 , l2 = node_factory . line_graph ( 2 , opts = [ { ' may_reconnect ' : True ,
' dev-force-features ' : [ " -13 " , " -21 " ] } ,
{ ' may_reconnect ' : True } ] )
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-10-08 01:40:30 +02:00
l1 . daemon . wait_for_logs ( [ r " They sent current_channel_type \ [ \ ] " ,
2021-09-09 04:50:52 +02:00
r " They offered upgrade to \ [12 \ ] " ] )
2021-10-08 01:40:30 +02:00
l2 . daemon . wait_for_log ( r " They sent desired_channel_type \ [12 \ ] " )
2021-06-04 07:13:47 +02:00
l1 . daemon . wait_for_log ( ' option_static_remotekey enabled at 1/1 ' )
l2 . daemon . wait_for_log ( ' option_static_remotekey enabled at 1/1 ' )
# Make sure it's committed to db!
wait_for ( lambda : l1 . db_query ( ' SELECT local_static_remotekey_start, remote_static_remotekey_start FROM channels; ' ) == [ { ' local_static_remotekey_start ' : 1 , ' remote_static_remotekey_start ' : 1 } ] )
# They will consider themselves upgraded.
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
# They won't offer upgrade!
assert not l1 . daemon . is_in_log ( " They offered upgrade " ,
start = l1 . daemon . logsearch_start )
2021-10-08 01:40:30 +02:00
l1 . daemon . wait_for_log ( r " They sent current_channel_type \ [12 \ ] " )
l2 . daemon . wait_for_log ( r " They sent desired_channel_type \ [12 \ ] " )
2021-06-04 07:13:47 +02:00
2021-06-04 07:13:47 +02:00
@unittest.skipIf ( not EXPERIMENTAL_FEATURES , " upgrade protocol not available " )
@pytest.mark.developer ( " dev-force-features required " )
def test_upgrade_statickey_onchaind ( node_factory , executor , bitcoind ) :
""" We test penalty before/after, and unilateral before/after """
l1 , l2 = node_factory . line_graph ( 2 , opts = [ { ' may_reconnect ' : True ,
2021-10-09 07:53:29 +02:00
' dev-no-reconnect ' : None ,
2021-06-04 07:13:47 +02:00
' dev-force-features ' : [ " -13 " , " -21 " ] ,
# We try to cheat!
' allow_broken_log ' : True } ,
2021-10-09 07:53:29 +02:00
{ ' may_reconnect ' : True ,
' dev-no-reconnect ' : None } ] )
2021-06-04 07:13:47 +02:00
# TEST 1: Cheat from pre-upgrade.
tx = l1 . rpc . dev_sign_last_tx ( l2 . info [ ' id ' ] ) [ ' tx ' ]
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . daemon . wait_for_log ( ' option_static_remotekey enabled at 1/1 ' )
2022-01-29 04:33:06 +01:00
# Make sure another commitment happens, sending failed payment.
routestep = {
2022-06-20 12:22:09 +02:00
' amount_msat ' : 1 ,
2022-01-29 04:33:06 +01:00
' id ' : l2 . info [ ' id ' ] ,
' delay ' : 5 ,
' channel ' : ' 1x1x1 ' # note: can be bogus for 1-hop direct payments
}
l1 . rpc . sendpay ( [ routestep ] , ' 00 ' * 32 , payment_secret = ' 00 ' * 32 )
with pytest . raises ( RpcError , match = r ' WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS ' ) :
l1 . rpc . waitsendpay ( ' 00 ' * 32 )
2021-11-30 01:13:56 +01:00
# Make sure l2 gets REVOKE_AND_ACK from previous.
2022-01-29 04:33:06 +01:00
l2 . daemon . wait_for_log ( ' peer_in WIRE_UPDATE_ADD_HTLC ' )
2021-11-30 01:13:56 +01:00
l2 . daemon . wait_for_log ( ' peer_out WIRE_REVOKE_AND_ACK ' )
2022-01-29 04:33:06 +01:00
l2 . daemon . wait_for_log ( ' peer_in WIRE_REVOKE_AND_ACK ' )
2021-11-30 01:13:56 +01:00
2021-06-04 07:13:47 +02:00
# Pre-statickey penalty works.
bitcoind . rpc . sendrawtransaction ( tx )
bitcoind . generate_block ( 1 )
l2 . wait_for_onchaind_broadcast ( ' OUR_PENALTY_TX ' ,
' THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM ' )
bitcoind . generate_block ( 100 )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
# TEST 2: Cheat from post-upgrade.
node_factory . join_nodes ( [ l1 , l2 ] )
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . daemon . wait_for_log ( ' option_static_remotekey enabled at 1/1 ' )
l2 . daemon . wait_for_log ( ' option_static_remotekey enabled at 1/1 ' )
l1 . pay ( l2 , 1000000 )
# We will try to cheat later.
tx = l1 . rpc . dev_sign_last_tx ( l2 . info [ ' id ' ] ) [ ' tx ' ]
l1 . pay ( l2 , 1000000 )
# Pre-statickey penalty works.
bitcoind . rpc . sendrawtransaction ( tx )
bitcoind . generate_block ( 1 )
l2 . wait_for_onchaind_broadcast ( ' OUR_PENALTY_TX ' ,
' THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM ' )
bitcoind . generate_block ( 100 )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
# TEST 3: Unilateral close from pre-upgrade
node_factory . join_nodes ( [ l1 , l2 ] )
# Give them both something for onchain close.
l1 . pay ( l2 , 1000000 )
# Make sure it's completely quiescent.
l1 . daemon . wait_for_log ( " chan#3: Removing out HTLC 0 state RCVD_REMOVE_ACK_REVOCATION FULFILLED " )
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
2021-10-09 07:53:29 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-06-04 07:13:47 +02:00
l1 . daemon . wait_for_log ( ' option_static_remotekey enabled at 3/3 ' )
# But this is the *pre*-update commit tx!
l2 . stop ( )
l1 . rpc . close ( l2 . info [ ' id ' ] , unilateraltimeout = 1 )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
l2 . start ( )
# They should both handle it fine.
l1 . daemon . wait_for_log ( ' Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks ' )
l2 . daemon . wait_for_logs ( [ ' Ignoring output .*: THEIR_UNILATERAL/OUTPUT_TO_US ' ,
' Ignoring output .*: THEIR_UNILATERAL/DELAYED_OUTPUT_TO_THEM ' ] )
bitcoind . generate_block ( 5 )
bitcoind . generate_block ( 100 , wait_for_mempool = 1 )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
# TEST 4: Unilateral close from post-upgrade
node_factory . join_nodes ( [ l1 , l2 ] )
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
2021-10-09 07:53:29 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-06-04 07:13:47 +02:00
l1 . daemon . wait_for_log ( ' option_static_remotekey enabled at 1/1 ' )
# Move to static_remotekey.
l1 . pay ( l2 , 1000000 )
l2 . stop ( )
l1 . rpc . close ( l2 . info [ ' id ' ] , unilateraltimeout = 1 )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
l2 . start ( )
# They should both handle it fine.
l1 . daemon . wait_for_log ( ' Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks ' )
l2 . daemon . wait_for_logs ( [ ' Ignoring output .*: THEIR_UNILATERAL/OUTPUT_TO_US ' ,
' Ignoring output .*: THEIR_UNILATERAL/DELAYED_OUTPUT_TO_THEM ' ] )
bitcoind . generate_block ( 5 )
bitcoind . generate_block ( 100 , wait_for_mempool = 1 )
wait_for ( lambda : len ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) == 0 )
2021-06-04 07:13:48 +02:00
@unittest.skipIf ( not EXPERIMENTAL_FEATURES , " upgrade protocol not available " )
@pytest.mark.developer ( " dev-force-features, dev-disconnect required " )
def test_upgrade_statickey_fail ( node_factory , executor , bitcoind ) :
""" We reconnect at all points during retransmit, and we won ' t upgrade. """
l1_disconnects = [ ' -WIRE_COMMITMENT_SIGNED ' ,
' -WIRE_REVOKE_AND_ACK ' ]
l2_disconnects = [ ' -WIRE_REVOKE_AND_ACK ' ,
2021-12-29 04:26:40 +01:00
' -WIRE_COMMITMENT_SIGNED ' ]
2021-06-04 07:13:48 +02:00
l1 , l2 = node_factory . line_graph ( 2 , opts = [ { ' may_reconnect ' : True ,
' dev-no-reconnect ' : None ,
' disconnect ' : l1_disconnects ,
' dev-force-features ' : [ " -13 " , " -21 " ] ,
# Don't have feerate changes!
' feerates ' : ( 7500 , 7500 , 7500 , 7500 ) } ,
{ ' may_reconnect ' : True ,
' dev-no-reconnect ' : None ,
2021-12-29 04:26:40 +01:00
' disconnect ' : l2_disconnects ,
2022-01-22 05:50:22 +01:00
' plugin ' : os . path . join ( os . getcwd ( ) , ' tests/plugins/hold_htlcs.py ' ) ,
' hold-time ' : 10000 ,
' hold-result ' : ' fail ' } ] )
2021-06-04 07:13:48 +02:00
# This HTLC will fail
2022-06-20 12:22:09 +02:00
l1 . rpc . sendpay ( [ { ' amount_msat ' : 1000 , ' id ' : l2 . info [ ' id ' ] , ' delay ' : 5 , ' channel ' : ' 1x1x1 ' } ] , ' 00 ' * 32 , payment_secret = ' 00 ' * 32 )
2021-06-04 07:13:48 +02:00
# Each one should cause one disconnection, no upgrade.
2021-12-29 04:26:40 +01:00
for d in l1_disconnects + l2_disconnects :
2021-06-04 07:13:48 +02:00
l1 . daemon . wait_for_log ( ' Peer connection lost ' )
l2 . daemon . wait_for_log ( ' Peer connection lost ' )
assert not l1 . daemon . is_in_log ( ' option_static_remotekey enabled ' )
assert not l2 . daemon . is_in_log ( ' option_static_remotekey enabled ' )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2022-01-22 05:50:22 +01:00
line1 = l1 . daemon . wait_for_log ( ' No upgrade ' )
line2 = l2 . daemon . wait_for_log ( ' No upgrade ' )
2021-06-04 07:13:48 +02:00
# On the last reconnect, it retransmitted revoke_and_ack.
2022-01-22 05:50:22 +01:00
assert re . search ( ' No upgrade: we retransmitted ' , line1 )
assert re . search ( ' No upgrade: pending changes ' , line2 )
# Make sure we already skip the first of these.
l1 . daemon . wait_for_log ( ' billboard perm: Reconnected, and reestablished. ' )
assert ' option_static_remotekey ' not in only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' features ' ]
assert ' option_static_remotekey ' not in only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' features ' ]
sleeptime = 1
while True :
# Now when we reconnect, despite having an HTLC, we're quiescent.
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2021-06-04 07:13:48 +02:00
2022-01-22 05:50:22 +01:00
oldstart = l1 . daemon . logsearch_start
l1 . daemon . wait_for_log ( ' billboard perm: Reconnected, and reestablished. ' )
if not l1 . daemon . is_in_log ( ' No upgrade: ' , start = oldstart ) :
break
# Give it some processing time before reconnect...
time . sleep ( sleeptime )
sleeptime + = 1
2021-06-04 07:13:48 +02:00
2022-01-22 05:50:22 +01:00
l1 . daemon . logsearch_start = oldstart
assert l1 . daemon . wait_for_log ( ' option_static_remotekey enabled at 2/2 ' )
assert l2 . daemon . wait_for_log ( ' option_static_remotekey enabled at 2/2 ' )
assert ' option_static_remotekey ' in only_one ( only_one ( l1 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' features ' ]
assert ' option_static_remotekey ' in only_one ( only_one ( l2 . rpc . listpeers ( ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' features ' ]
2021-06-04 07:13:48 +02:00
2021-05-31 05:08:04 +02:00
@unittest.skipIf ( not EXPERIMENTAL_FEATURES , " quiescence is experimental " )
@pytest.mark.developer ( " quiescence triggering is dev only " )
def test_quiescence ( node_factory , executor ) :
l1 , l2 = node_factory . line_graph ( 2 )
# Works fine.
l1 . pay ( l2 , 1000 )
assert l1 . rpc . call ( ' dev-quiesce ' , [ l2 . info [ ' id ' ] ] ) == { }
# Both should consider themselves quiescent.
l1 . daemon . wait_for_log ( " STFU complete: we are quiescent " )
l2 . daemon . wait_for_log ( " STFU complete: we are quiescent " )
# Should not be able to increase fees.
l1 . rpc . call ( ' dev-feerate ' , [ l2 . info [ ' id ' ] , 9999 ] )
try :
l1 . daemon . wait_for_log ( ' peer_out WIRE_UPDATE_FEE ' , 5 )
assert False
except TimeoutError :
pass
2021-05-21 02:36:12 +02:00
def test_htlc_failed_noclose ( node_factory ) :
""" Test a bug where the htlc timeout would kick in even if the HTLC failed """
l1 , l2 = node_factory . line_graph ( 2 )
2021-07-12 08:49:19 +02:00
inv = l2 . rpc . invoice ( 1000 , " test " , " test " )
2021-05-21 02:36:12 +02:00
routestep = {
2022-06-20 12:22:09 +02:00
' amount_msat ' : FUNDAMOUNT * 1000 ,
2021-05-21 02:36:12 +02:00
' id ' : l2 . info [ ' id ' ] ,
' delay ' : 5 ,
' channel ' : ' 1x1x1 ' # note: can be bogus for 1-hop direct payments
}
# This fails at channeld
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( [ routestep ] , inv [ ' payment_hash ' ] , payment_secret = inv [ ' payment_secret ' ] )
2021-05-21 02:36:12 +02:00
with pytest . raises ( RpcError , match = " Capacity exceeded " ) :
2021-07-12 08:49:19 +02:00
l1 . rpc . waitsendpay ( inv [ ' payment_hash ' ] )
2021-05-21 02:36:12 +02:00
# Send a second one, too: make sure we don't crash.
2021-07-12 08:49:19 +02:00
l1 . rpc . sendpay ( [ routestep ] , inv [ ' payment_hash ' ] , payment_secret = inv [ ' payment_secret ' ] )
2021-05-21 02:36:12 +02:00
with pytest . raises ( RpcError , match = " Capacity exceeded " ) :
2021-07-12 08:49:19 +02:00
l1 . rpc . waitsendpay ( inv [ ' payment_hash ' ] )
2021-05-21 02:36:12 +02:00
time . sleep ( 35 )
assert l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ]
connectd: allow out-of-bounds fees unless they're actually getting *worse*.
Pointed out by @fiatjaf, and indeed it happened to me as well; a peer with
a high feerate reconnects and sends a similar (but now ludicrous) feerate,
and we get upset:
```
$ lightning-cli listpeers 039c73f53daad1050a6a72afb5353a2152f3152ee17168cd0ab28c2cb3e0050e36
{
"peers": [
{
"id": "039c73f53daad1050a6a72afb5353a2152f3152ee17168cd0ab28c2cb3e0050e36",
"connected": false,
"channels": [
{
"state": "CHANNELD_NORMAL",
"scratch_txid": "d796aa9c44920cc7169cdb61e36437bf180cedaec44103a69591ce2baac9b1d9",
"last_tx_fee": "14329000msat",
"last_tx_fee_msat": "14329000msat",
"feerate": {
"perkw": 19791,
"perkb": 79164
},
```
Then in the logs:
```
2021-07-23T19:34:56.227Z DEBUG 039c73f53daad1050a6a72afb5353a2152f3152ee17168cd0ab28c2cb3e0050e36-channeld-chan#39381: billboard perm: update_fee 17055 outside range 253-7210
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2021-07-24 06:08:39 +02:00
@pytest.mark.developer ( " dev-no-reconnect required " )
def test_old_feerate ( node_factory ) :
""" Test retransmission of old, now-unacceptable, feerate """
l1 , l2 = node_factory . line_graph ( 2 , opts = { ' feerates ' : ( 75000 , 75000 , 75000 , 75000 ) ,
' may_reconnect ' : True ,
' dev-no-reconnect ' : None } )
l1 . pay ( l2 , 1000 )
l1 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
# Drop acceptable feerate by l2
l2 . set_feerates ( ( 7000 , 7000 , 7000 , 7000 ) )
l2 . restart ( )
# Minor change to l1, so it sends update_fee
l1 . set_feerates ( ( 74900 , 74900 , 74900 , 74900 ) )
l1 . restart ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# This will timeout if l2 didn't accept fee.
l1 . pay ( l2 , 1000 )
2021-10-09 07:53:27 +02:00
2021-10-18 02:13:33 +02:00
@pytest.mark.developer ( " needs --dev-allow-localhost " )
def test_websocket ( node_factory ) :
ws_port = reserve ( )
2021-12-02 05:35:20 +01:00
port1 , port2 = reserve ( ) , reserve ( )
# We need a wildcard to show the websocket bug, but we need a real
# address to give us something to announce.
2021-10-18 02:13:33 +02:00
l1 , l2 = node_factory . line_graph ( 2 ,
opts = [ { ' experimental-websocket-port ' : ws_port ,
2021-12-02 05:35:20 +01:00
' addr ' : [ ' : ' + str ( port1 ) ,
' 127.0.0.1: ' + str ( port2 ) ] ,
2021-10-18 02:13:33 +02:00
' dev-allow-localhost ' : None } ,
{ ' dev-allow-localhost ' : None } ] ,
wait_for_announce = True )
assert l1 . rpc . listconfigs ( ) [ ' experimental-websocket-port ' ] == ws_port
# Adapter to turn websocket into a stream "connection"
class BinWebSocket ( object ) :
def __init__ ( self , hostname , port ) :
self . ws = websocket . WebSocket ( )
self . ws . connect ( " ws:// " + hostname + " : " + str ( port ) )
self . recvbuf = bytes ( )
def send ( self , data ) :
self . ws . send ( data , websocket . ABNF . OPCODE_BINARY )
def recv ( self , maxlen ) :
while len ( self . recvbuf ) < maxlen :
self . recvbuf + = self . ws . recv ( )
ret = self . recvbuf [ : maxlen ]
self . recvbuf = self . recvbuf [ maxlen : ]
return ret
ws = BinWebSocket ( ' localhost ' , ws_port )
lconn = wire . LightningConnection ( ws ,
wire . PublicKey ( bytes . fromhex ( l1 . info [ ' id ' ] ) ) ,
wire . PrivateKey ( bytes ( [ 1 ] * 32 ) ) ,
is_initiator = True )
l1 . daemon . wait_for_log ( ' Websocket connection in from ' )
# Perform handshake.
lconn . shake ( )
# Expect to receive init msg.
msg = lconn . read_message ( )
assert int . from_bytes ( msg [ 0 : 2 ] , ' big ' ) == 16
# Echo same message back.
lconn . send_message ( msg )
# Now try sending a ping, ask for 50 bytes
msg = bytes ( ( 0 , 18 , 0 , 50 , 0 , 0 ) )
lconn . send_message ( msg )
# Could actually reply with some gossip msg!
while True :
msg = lconn . read_message ( )
if int . from_bytes ( msg [ 0 : 2 ] , ' big ' ) == 19 :
break
# Check node_announcement has websocket
2022-05-02 20:37:32 +02:00
ws_address = { ' type ' : ' websocket ' , ' port ' : ws_port }
assert ws_address in only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) [ ' addresses ' ]
2021-10-18 02:13:33 +02:00
2021-10-09 07:53:27 +02:00
@pytest.mark.developer ( " dev-disconnect required " )
def test_ping_timeout ( node_factory ) :
# Disconnects after this, but doesn't know it.
l1_disconnects = [ ' xWIRE_PING ' ]
l1 , l2 = node_factory . line_graph ( 2 , opts = [ { ' dev-no-reconnect ' : None ,
' disconnect ' : l1_disconnects } ,
{ } ] )
# Takes 15-45 seconds, then another to try second ping
2022-02-26 18:36:46 +01:00
# Because of ping timer randomness we don't know which side hangs up first
wait_for ( lambda : l1 . rpc . getpeer ( l2 . info [ ' id ' ] ) [ ' connected ' ] is False , timeout = 45 + 45 + 5 )
wait_for ( lambda : ( l1 . daemon . is_in_log ( ' Last ping unreturned: hanging up ' )
or l2 . daemon . is_in_log ( ' Last ping unreturned: hanging up ' ) ) )
2022-03-22 23:59:20 +01:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
def test_multichan ( node_factory , executor , bitcoind ) :
""" Test multiple channels between same nodes """
2022-03-23 00:01:36 +01:00
l1 , l2 , l3 = node_factory . line_graph ( 3 , opts = { ' may_reconnect ' : True } )
2022-03-22 23:59:20 +01:00
scid12 = l1 . get_channel_scid ( l2 )
scid23a = l2 . get_channel_scid ( l3 )
2022-03-22 23:59:20 +01:00
# Now fund *second* channel l2->l3 (slightly larger)
2022-03-22 23:59:20 +01:00
bitcoind . rpc . sendtoaddress ( l2 . rpc . newaddr ( ) [ ' bech32 ' ] , 0.1 )
bitcoind . generate_block ( 1 )
sync_blockheight ( bitcoind , [ l2 ] )
2022-03-22 23:59:20 +01:00
l2 . rpc . fundchannel ( l3 . info [ ' id ' ] , ' 0.01001btc ' )
2022-03-22 23:59:20 +01:00
assert ( len ( only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) == 2 )
assert ( len ( only_one ( l3 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) == 2 )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
# Dance around to get the *other* scid.
wait_for ( lambda : all ( [ ' short_channel_id ' in c for c in l3 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ] ) )
scids = [ c [ ' short_channel_id ' ] for c in l3 . rpc . listpeers ( ) [ ' peers ' ] [ 0 ] [ ' channels ' ] ]
assert len ( scids ) == 2
if scids [ 0 ] == scid23a :
scid23b = scids [ 1 ]
else :
assert scids [ 1 ] == scid23a
scid23b = scids [ 0 ]
# Test paying by each,
2022-06-20 12:22:09 +02:00
route = [ { ' amount_msat ' : 100001001 ,
2022-03-22 23:59:20 +01:00
' id ' : l2 . info [ ' id ' ] ,
' delay ' : 11 ,
# Unneeded
' channel ' : scid12 } ,
2022-06-20 12:22:09 +02:00
{ ' amount_msat ' : 100000000 ,
2022-03-22 23:59:20 +01:00
' id ' : l3 . info [ ' id ' ] ,
' delay ' : 5 ,
' channel ' : scid23a } ]
before = only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ]
inv = l3 . rpc . invoice ( 100000000 , " invoice " , " invoice " )
l1 . rpc . sendpay ( route , inv [ ' payment_hash ' ] , payment_secret = inv [ ' payment_secret ' ] )
l1 . rpc . waitsendpay ( inv [ ' payment_hash ' ] )
# Wait until HTLCs fully settled
wait_for ( lambda : [ c [ ' htlcs ' ] for c in only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ] == [ [ ] , [ ] ] )
after = only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ]
if before [ 0 ] [ ' short_channel_id ' ] == scid23a :
chan23a_idx = 0
chan23b_idx = 1
else :
chan23a_idx = 1
chan23b_idx = 0
2022-03-23 00:01:36 +01:00
# Gratuitous reconnect
with pytest . raises ( RpcError , match = r " Peer has \ (at least one \ ) channel " ) :
l3 . rpc . disconnect ( l2 . info [ ' id ' ] )
l3 . rpc . disconnect ( l2 . info [ ' id ' ] , force = True )
l3 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2022-03-22 23:59:20 +01:00
# Check it used the larger channel!
assert before [ chan23a_idx ] [ ' to_us_msat ' ] == after [ chan23a_idx ] [ ' to_us_msat ' ]
assert before [ chan23b_idx ] [ ' to_us_msat ' ] != after [ chan23b_idx ] [ ' to_us_msat ' ]
2022-03-22 23:59:20 +01:00
before = only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ]
route [ 1 ] [ ' channel ' ] = scid23b
inv = l3 . rpc . invoice ( 100000000 , " invoice2 " , " invoice2 " )
l1 . rpc . sendpay ( route , inv [ ' payment_hash ' ] , payment_secret = inv [ ' payment_secret ' ] )
l1 . rpc . waitsendpay ( inv [ ' payment_hash ' ] )
# Wait until HTLCs fully settled
wait_for ( lambda : [ c [ ' htlcs ' ] for c in only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ] == [ [ ] , [ ] ] )
after = only_one ( l2 . rpc . listpeers ( l3 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ]
2022-03-22 23:59:20 +01:00
# Now the first channel is larger!
assert before [ chan23a_idx ] [ ' to_us_msat ' ] != after [ chan23a_idx ] [ ' to_us_msat ' ]
assert before [ chan23b_idx ] [ ' to_us_msat ' ] == after [ chan23b_idx ] [ ' to_us_msat ' ]
2022-03-22 23:59:20 +01:00
# Make sure gossip works.
bitcoind . generate_block ( 5 )
wait_for ( lambda : len ( l1 . rpc . listchannels ( source = l3 . info [ ' id ' ] ) [ ' channels ' ] ) == 2 )
chans = l1 . rpc . listchannels ( source = l3 . info [ ' id ' ] ) [ ' channels ' ]
if chans [ 0 ] [ ' short_channel_id ' ] == scid23a :
chan23a = chans [ 0 ]
chan23b = chans [ 1 ]
else :
chan23a = chans [ 1 ]
chan23b = chans [ 0 ]
assert chan23a [ ' amount_msat ' ] == Millisatoshi ( 1000000000 )
assert chan23a [ ' short_channel_id ' ] == scid23a
2022-03-22 23:59:20 +01:00
assert chan23b [ ' amount_msat ' ] == Millisatoshi ( 1001000000 )
2022-03-22 23:59:20 +01:00
assert chan23b [ ' short_channel_id ' ] == scid23b
2022-03-23 00:01:35 +01:00
# We can close one, other one is still fine.
with pytest . raises ( RpcError , match = " Peer has multiple channels " ) :
l2 . rpc . close ( l3 . info [ ' id ' ] )
l2 . rpc . close ( scid23b )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
# Gossip works as expected.
wait_for ( lambda : len ( l1 . rpc . listchannels ( source = l3 . info [ ' id ' ] ) [ ' channels ' ] ) == 1 )
assert only_one ( l1 . rpc . listchannels ( source = l3 . info [ ' id ' ] ) [ ' channels ' ] ) [ ' short_channel_id ' ] == scid23a
# We can actually pay by *closed* scid (at least until it's completely forgotten)
route [ 1 ] [ ' channel ' ] = scid23a
inv = l3 . rpc . invoice ( 100000000 , " invoice3 " , " invoice3 " )
l1 . rpc . sendpay ( route , inv [ ' payment_hash ' ] , payment_secret = inv [ ' payment_secret ' ] )
l1 . rpc . waitsendpay ( inv [ ' payment_hash ' ] )
2022-03-23 00:01:36 +01:00
# Restart with multiple channels works.
l3 . restart ( )
2022-03-31 11:10:50 +02:00
# FIXME: race against autoconnect can cause spurious failure (but we connect!)
try :
l3 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
except RpcError :
wait_for ( lambda : only_one ( l3 . rpc . listpeers ( l2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' connected ' ] )
2022-03-23 00:01:36 +01:00
inv = l3 . rpc . invoice ( 100000000 , " invoice4 " , " invoice4 " )
l1 . rpc . pay ( inv [ ' bolt11 ' ] )