2020-09-25 16:47:55 +02:00
from bitcoin . core import COIN # type: ignore
from bitcoin . rpc import RawProxy as BitcoinProxy # type: ignore
pytest: recreate wallet on bitcoind restart.
Doesn't seem to stick in master. Andy Chow suggested we
simply turn off wallet on older versions, and always create/load.
```
[gw8] [ 40%] FAILED tests/test_misc.py::test_bitcoind_goes_backwards
============================================================= FAILURES ==============================================================
___________________________________________________ test_bitcoind_goes_backwards ____________________________________________________
[gw8] linux -- Python 3.8.5 /usr/bin/python3
node_factory = <pyln.testing.utils.NodeFactory object at 0x7f931859a760>
bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f931865eee0>
def test_bitcoind_goes_backwards(node_factory, bitcoind):
"""Check that we refuse to acknowledge bitcoind giving a shorter chain without explicit rescan"""
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
l1.stop()
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
# Restart without killing proxies
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
# Will simply refuse to start.
with pytest.raises(ValueError):
l1.start()
# Nor will it start with if we ask for a reindex of fewer blocks.
l1.daemon.opts['rescan'] = 3
with pytest.raises(ValueError):
l1.start()
# This will force it, however.
l1.daemon.opts['rescan'] = -100
l1.start()
# Now mess with bitcoind at runtime.
> bitcoind.generate_block(6)
tests/test_misc.py:1307:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:399: in generate_block
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
contrib/pyln-testing/pyln/testing/utils.py:322: in f
return proxy._call(name, *args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <bitcoin.rpc.RawProxy object at 0x7f93184f6a30>, service_name = 'getnewaddress', args = ()
postdata = '{"version": "1.1", "method": "getnewaddress", "params": [], "id": 1}'
headers = {'Authorization': b'Basic cnBjdXNlcjpycGNwYXNz', 'Content-type': 'application/json', 'Host': 'localhost', 'User-Agent': 'AuthServiceProxy/0.1'}
response = {'error': {'code': -18, 'message': 'No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)'}, 'id': 1, 'result': None}
```
2020-12-02 07:14:45 +01:00
from bitcoin . rpc import JSONRPCError
2021-01-18 21:05:18 +01:00
from contextlib import contextmanager
from pathlib import Path
2020-03-08 11:36:25 +01:00
from pyln . client import RpcError
2019-10-26 20:27:37 +02:00
from pyln . testing . btcproxy import BitcoinRpcProxy
2022-06-07 13:36:00 +02:00
from pyln . testing . gossip import GossipStore
2019-10-26 20:27:37 +02:00
from collections import OrderedDict
from decimal import Decimal
from pyln . client import LightningRpc
2020-08-06 03:37:43 +02:00
from pyln . client import Millisatoshi
2022-06-30 11:29:57 +02:00
from pyln . testing import grpc
2019-10-26 20:27:37 +02:00
2022-06-26 06:32:01 +02:00
import ephemeral_port_reserve # type: ignore
2019-10-26 20:27:37 +02:00
import json
import logging
import lzma
import math
import os
2020-12-11 12:51:45 +01:00
import psutil # type: ignore
2019-10-26 20:27:37 +02:00
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
2020-10-06 11:57:51 +02:00
import sys
2019-10-26 20:27:37 +02:00
import threading
import time
2020-04-30 17:53:17 +02:00
import warnings
2019-10-26 20:27:37 +02:00
BITCOIND_CONFIG = {
" regtest " : 1 ,
" rpcuser " : " rpcuser " ,
" rpcpassword " : " rpcpass " ,
2020-04-08 11:59:49 +02:00
" fallbackfee " : Decimal ( 1000 ) / COIN ,
2019-10-26 20:27:37 +02:00
}
LIGHTNINGD_CONFIG = OrderedDict ( {
" log-level " : " debug " ,
" cltv-delta " : 6 ,
" cltv-final " : 5 ,
" watchtime-blocks " : 5 ,
" rescan " : 1 ,
' disable-dns ' : None ,
} )
2020-11-30 16:42:35 +01:00
FUNDAMOUNT = 10 * * 6
2019-10-26 20:27:37 +02:00
2019-10-27 19:08:30 +01:00
def env ( name , default = None ) :
""" Access to environment variables
2019-10-26 20:27:37 +02:00
2019-10-27 19:08:30 +01:00
Allows access to environment variables , falling back to config . vars ( part
2022-04-06 07:09:48 +02:00
of Core Lightning ' s `./configure` output), and finally falling back to a
2019-10-27 19:08:30 +01:00
default value .
2019-10-26 20:27:37 +02:00
2019-10-27 19:08:30 +01:00
"""
fname = ' config.vars '
if os . path . exists ( fname ) :
lines = open ( fname , ' r ' ) . readlines ( )
config = dict ( [ ( line . rstrip ( ) . split ( ' = ' , 1 ) ) for line in lines ] )
else :
config = { }
if name in os . environ :
return os . environ [ name ]
elif name in config :
return config [ name ]
else :
return default
VALGRIND = env ( " VALGRIND " ) == " 1 "
TEST_NETWORK = env ( " TEST_NETWORK " , ' regtest ' )
2019-10-28 22:04:20 +01:00
DEVELOPER = env ( " DEVELOPER " , " 0 " ) == " 1 "
2019-10-27 19:08:30 +01:00
TEST_DEBUG = env ( " TEST_DEBUG " , " 0 " ) == " 1 "
SLOW_MACHINE = env ( " SLOW_MACHINE " , " 0 " ) == " 1 "
2020-04-02 14:52:02 +02:00
DEPRECATED_APIS = env ( " DEPRECATED_APIS " , " 0 " ) == " 1 "
2019-10-27 19:08:30 +01:00
TIMEOUT = int ( env ( " TIMEOUT " , 180 if SLOW_MACHINE else 60 ) )
2021-05-07 20:27:54 +02:00
EXPERIMENTAL_DUAL_FUND = env ( " EXPERIMENTAL_DUAL_FUND " , " 0 " ) == " 1 "
2019-10-27 19:08:30 +01:00
2019-10-26 20:27:37 +02:00
def wait_for ( success , timeout = TIMEOUT ) :
start_time = time . time ( )
interval = 0.25
2021-02-03 12:14:38 +01:00
while not success ( ) :
time_left = start_time + timeout - time . time ( )
if time_left < = 0 :
raise ValueError ( " Timeout while waiting for {} " , success )
time . sleep ( min ( interval , time_left ) )
2019-10-26 20:27:37 +02:00
interval * = 2
if interval > 5 :
interval = 5
def write_config ( filename , opts , regtest_opts = None , section_name = ' regtest ' ) :
with open ( filename , ' w ' ) as f :
for k , v in opts . items ( ) :
f . write ( " {} = {} \n " . format ( k , v ) )
if regtest_opts :
f . write ( " [ {} ] \n " . format ( section_name ) )
for k , v in regtest_opts . items ( ) :
f . write ( " {} = {} \n " . format ( k , v ) )
def only_one ( arr ) :
""" Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len ( arr ) == 1
return arr [ 0 ]
def sync_blockheight ( bitcoind , nodes ) :
height = bitcoind . rpc . getblockchaininfo ( ) [ ' blocks ' ]
for n in nodes :
wait_for ( lambda : n . rpc . getinfo ( ) [ ' blockheight ' ] == height )
2022-01-30 04:37:23 +01:00
def mine_funding_to_announce ( bitcoind , nodes , num_blocks = 5 , wait_for_mempool = 0 ) :
""" Mine blocks so a channel can be announced (5, if it ' s already
mined ) , but make sure we don ' t leave nodes behind who will reject the
announcement . Not needed if there are only two nodes .
"""
bitcoind . generate_block ( num_blocks - 1 , wait_for_mempool )
sync_blockheight ( bitcoind , nodes )
bitcoind . generate_block ( 1 )
2019-10-26 20:27:37 +02:00
def wait_channel_quiescent ( n1 , n2 ) :
wait_for ( lambda : only_one ( only_one ( n1 . rpc . listpeers ( n2 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' htlcs ' ] == [ ] )
wait_for ( lambda : only_one ( only_one ( n2 . rpc . listpeers ( n1 . info [ ' id ' ] ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' htlcs ' ] == [ ] )
def get_tx_p2wsh_outnum ( bitcoind , tx , amount ) :
""" Get output number of this tx which is p2wsh of amount """
decoded = bitcoind . rpc . decoderawtransaction ( tx , True )
for out in decoded [ ' vout ' ] :
if out [ ' scriptPubKey ' ] [ ' type ' ] == ' witness_v0_scripthash ' :
if out [ ' value ' ] == Decimal ( amount ) / 10 * * 8 :
return out [ ' n ' ]
return None
2022-06-26 06:32:01 +02:00
unused_port_lock = threading . Lock ( )
unused_port_set = set ( )
def reserve_unused_port ( ) :
""" Get an unused port: avoids handing out the same port unless it ' s been
returned """
with unused_port_lock :
while True :
port = ephemeral_port_reserve . reserve ( )
if port not in unused_port_set :
break
unused_port_set . add ( port )
return port
def drop_unused_port ( port ) :
unused_port_set . remove ( port )
2019-10-26 20:27:37 +02:00
class TailableProc ( object ) :
""" A monitorable process that we can start, stop and tail.
This is the base class for the daemons . It allows us to directly
tail the processes and react to their output .
"""
2022-06-26 06:42:01 +02:00
def __init__ ( self , outputDir , verbose = True ) :
2019-10-26 20:27:37 +02:00
self . logs = [ ]
self . env = os . environ . copy ( )
self . proc = None
self . outputDir = outputDir
2022-06-26 06:42:01 +02:00
if not os . path . exists ( outputDir ) :
os . makedirs ( outputDir )
# Create and open them.
self . stdout_filename = os . path . join ( outputDir , " log " )
self . stderr_filename = os . path . join ( outputDir , " errlog " )
self . stdout_write = open ( self . stdout_filename , " wt " )
self . stderr_write = open ( self . stderr_filename , " wt " )
self . stdout_read = open ( self . stdout_filename , " rt " )
self . stderr_read = open ( self . stderr_filename , " rt " )
2019-10-26 20:27:37 +02:00
self . logsearch_start = 0
2020-03-08 01:05:57 +01:00
self . err_logs = [ ]
2020-11-12 13:20:02 +01:00
self . prefix = " "
2019-10-26 20:27:37 +02:00
# Should we be logging lines we read from stdout?
self . verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self . log_filter = lambda line : False
2022-07-21 06:39:30 +02:00
def start ( self , stdin = None , stdout_redir = True , stderr_redir = True ) :
2022-06-26 06:43:01 +02:00
""" Start the underlying process and start monitoring it. If
stdout_redir is false , you have to make sure logs go into
outputDir / log
2019-10-26 20:27:37 +02:00
"""
logging . debug ( " Starting ' %s ' " , " " . join ( self . cmd_line ) )
2022-06-26 06:43:01 +02:00
if stdout_redir :
2022-07-21 06:39:30 +02:00
stdout = self . stdout_write
2022-06-26 06:43:01 +02:00
else :
2022-07-21 06:39:30 +02:00
stdout = None
if stderr_redir :
stderr = self . stderr_write
self . stderr_redir = True
else :
stderr = None
self . stderr_redir = False
self . proc = subprocess . Popen ( self . cmd_line ,
stdin = stdin ,
stdout = stdout ,
stderr = stderr ,
env = self . env )
2019-10-26 20:27:37 +02:00
def stop ( self , timeout = 10 ) :
self . proc . terminate ( )
# Now give it some time to react to the signal
rc = self . proc . wait ( timeout )
if rc is None :
self . proc . kill ( )
self . proc . wait ( )
return self . proc . returncode
def kill ( self ) :
""" Kill process without giving it warning. """
self . proc . kill ( )
self . proc . wait ( )
2022-06-26 06:42:01 +02:00
def logs_catchup ( self ) :
""" Save the latest stdout / stderr contents; return true if we got anything.
2019-10-26 20:27:37 +02:00
"""
2022-06-26 06:42:01 +02:00
new_stdout = self . stdout_read . readlines ( )
if self . verbose :
for line in new_stdout :
sys . stdout . write ( " {} : {} " . format ( self . prefix , line ) )
self . logs + = [ l . rstrip ( ) for l in new_stdout ]
new_stderr = self . stderr_read . readlines ( )
if self . verbose :
for line in new_stderr :
sys . stderr . write ( " {} -stderr: {} " . format ( self . prefix , line ) )
self . err_logs + = [ l . rstrip ( ) for l in new_stderr ]
return len ( new_stdout ) > 0 or len ( new_stderr ) > 0
2019-10-26 20:27:37 +02:00
def is_in_log ( self , regex , start = 0 ) :
""" Look for `regex` in the logs. """
2022-06-26 06:42:01 +02:00
self . logs_catchup ( )
2019-10-26 20:27:37 +02:00
ex = re . compile ( regex )
for l in self . logs [ start : ] :
if ex . search ( l ) :
logging . debug ( " Found ' %s ' in logs " , regex )
return l
logging . debug ( " Did not find ' %s ' in logs " , regex )
return None
2020-03-08 01:05:57 +01:00
def is_in_stderr ( self , regex ) :
""" Look for `regex` in stderr. """
2022-07-21 06:39:30 +02:00
assert self . stderr_redir
2022-06-26 06:42:01 +02:00
self . logs_catchup ( )
2020-03-08 01:05:57 +01:00
ex = re . compile ( regex )
for l in self . err_logs :
if ex . search ( l ) :
logging . debug ( " Found ' %s ' in stderr " , regex )
return l
logging . debug ( " Did not find ' %s ' in stderr " , regex )
return None
2019-10-26 20:27:37 +02:00
def wait_for_logs ( self , regexs , timeout = TIMEOUT ) :
""" Look for `regexs` in the logs.
2021-12-14 12:47:50 +01:00
The logs contain tailed stdout of the process . We look for each regex
in ` regexs ` , starting from ` logsearch_start ` which normally is the
position of the last found entry of a previous wait - for logs call .
The ordering inside ` regexs ` doesn ' t matter.
We fail if the timeout is exceeded or if the underlying process
2019-10-26 20:27:37 +02:00
exits before all the ` regexs ` were found .
If timeout is None , no time - out is applied .
"""
logging . debug ( " Waiting for {} in the logs " . format ( regexs ) )
exs = [ re . compile ( r ) for r in regexs ]
start_time = time . time ( )
while True :
2022-06-26 06:42:01 +02:00
if self . logsearch_start > = len ( self . logs ) :
if not self . logs_catchup ( ) :
time . sleep ( 0.25 )
if timeout is not None and time . time ( ) > start_time + timeout :
print ( " Time-out: can ' t find {} in logs " . format ( exs ) )
for r in exs :
if self . is_in_log ( r ) :
print ( " ( {} was previously in logs!) " . format ( r ) )
raise TimeoutError ( ' Unable to find " {} " in logs. ' . format ( exs ) )
continue
line = self . logs [ self . logsearch_start ]
self . logsearch_start + = 1
for r in exs . copy ( ) :
if r . search ( line ) :
logging . debug ( " Found ' %s ' in logs " , r )
exs . remove ( r )
if len ( exs ) == 0 :
return line
# Don't match same line with different regexs!
break
2019-10-26 20:27:37 +02:00
def wait_for_log ( self , regex , timeout = TIMEOUT ) :
""" Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry .
"""
return self . wait_for_logs ( [ regex ] , timeout )
class SimpleBitcoinProxy :
""" Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
` bitcoind ` closing the connection , so here we just create
throwaway connections . This is easier than to reach into the RPC
library to close , reopen and reauth upon failure .
"""
def __init__ ( self , btc_conf_file , * args , * * kwargs ) :
self . __btc_conf_file__ = btc_conf_file
def __getattr__ ( self , name ) :
if name . startswith ( ' __ ' ) and name . endswith ( ' __ ' ) :
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy ( btc_conf_file = self . __btc_conf_file__ )
def f ( * args ) :
2020-12-18 12:37:35 +01:00
logging . debug ( " Calling {name} with arguments {args} " . format (
name = name ,
args = args
) )
res = proxy . _call ( name , * args )
logging . debug ( " Result for {name} call: {res} " . format (
name = name ,
res = res ,
) )
return res
2019-10-26 20:27:37 +02:00
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f . __name__ = name
return f
class BitcoinD ( TailableProc ) :
def __init__ ( self , bitcoin_dir = " /tmp/bitcoind-test " , rpcport = None ) :
TailableProc . __init__ ( self , bitcoin_dir , verbose = False )
if rpcport is None :
2022-06-26 06:32:01 +02:00
self . reserved_rpcport = reserve_unused_port ( )
rpcport = self . reserved_rpcport
else :
self . reserved_rpcport = None
2019-10-26 20:27:37 +02:00
self . bitcoin_dir = bitcoin_dir
self . rpcport = rpcport
self . prefix = ' bitcoind '
regtestdir = os . path . join ( bitcoin_dir , ' regtest ' )
if not os . path . exists ( regtestdir ) :
os . makedirs ( regtestdir )
self . cmd_line = [
' bitcoind ' ,
' -datadir= {} ' . format ( bitcoin_dir ) ,
' -printtoconsole ' ,
' -server ' ,
' -logtimestamps ' ,
' -nolisten ' ,
' -txindex ' ,
pytest: recreate wallet on bitcoind restart.
Doesn't seem to stick in master. Andy Chow suggested we
simply turn off wallet on older versions, and always create/load.
```
[gw8] [ 40%] FAILED tests/test_misc.py::test_bitcoind_goes_backwards
============================================================= FAILURES ==============================================================
___________________________________________________ test_bitcoind_goes_backwards ____________________________________________________
[gw8] linux -- Python 3.8.5 /usr/bin/python3
node_factory = <pyln.testing.utils.NodeFactory object at 0x7f931859a760>
bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f931865eee0>
def test_bitcoind_goes_backwards(node_factory, bitcoind):
"""Check that we refuse to acknowledge bitcoind giving a shorter chain without explicit rescan"""
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
l1.stop()
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
# Restart without killing proxies
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
# Will simply refuse to start.
with pytest.raises(ValueError):
l1.start()
# Nor will it start with if we ask for a reindex of fewer blocks.
l1.daemon.opts['rescan'] = 3
with pytest.raises(ValueError):
l1.start()
# This will force it, however.
l1.daemon.opts['rescan'] = -100
l1.start()
# Now mess with bitcoind at runtime.
> bitcoind.generate_block(6)
tests/test_misc.py:1307:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:399: in generate_block
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
contrib/pyln-testing/pyln/testing/utils.py:322: in f
return proxy._call(name, *args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <bitcoin.rpc.RawProxy object at 0x7f93184f6a30>, service_name = 'getnewaddress', args = ()
postdata = '{"version": "1.1", "method": "getnewaddress", "params": [], "id": 1}'
headers = {'Authorization': b'Basic cnBjdXNlcjpycGNwYXNz', 'Content-type': 'application/json', 'Host': 'localhost', 'User-Agent': 'AuthServiceProxy/0.1'}
response = {'error': {'code': -18, 'message': 'No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)'}, 'id': 1, 'result': None}
```
2020-12-02 07:14:45 +01:00
' -nowallet ' ,
2019-10-26 20:27:37 +02:00
' -addresstype=bech32 '
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG [ ' rpcport ' ] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = { ' rpcport ' : rpcport }
self . conf_file = os . path . join ( bitcoin_dir , ' bitcoin.conf ' )
write_config ( self . conf_file , BITCOIND_CONFIG , BITCOIND_REGTEST )
self . rpc = SimpleBitcoinProxy ( btc_conf_file = self . conf_file )
self . proxies = [ ]
2022-06-26 06:32:01 +02:00
def __del__ ( self ) :
if self . reserved_rpcport is not None :
drop_unused_port ( self . reserved_rpcport )
2019-10-26 20:27:37 +02:00
def start ( self ) :
TailableProc . start ( self )
self . wait_for_log ( " Done loading " , timeout = TIMEOUT )
logging . info ( " BitcoinD started " )
pytest: recreate wallet on bitcoind restart.
Doesn't seem to stick in master. Andy Chow suggested we
simply turn off wallet on older versions, and always create/load.
```
[gw8] [ 40%] FAILED tests/test_misc.py::test_bitcoind_goes_backwards
============================================================= FAILURES ==============================================================
___________________________________________________ test_bitcoind_goes_backwards ____________________________________________________
[gw8] linux -- Python 3.8.5 /usr/bin/python3
node_factory = <pyln.testing.utils.NodeFactory object at 0x7f931859a760>
bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f931865eee0>
def test_bitcoind_goes_backwards(node_factory, bitcoind):
"""Check that we refuse to acknowledge bitcoind giving a shorter chain without explicit rescan"""
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
l1.stop()
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
# Restart without killing proxies
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
# Will simply refuse to start.
with pytest.raises(ValueError):
l1.start()
# Nor will it start with if we ask for a reindex of fewer blocks.
l1.daemon.opts['rescan'] = 3
with pytest.raises(ValueError):
l1.start()
# This will force it, however.
l1.daemon.opts['rescan'] = -100
l1.start()
# Now mess with bitcoind at runtime.
> bitcoind.generate_block(6)
tests/test_misc.py:1307:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:399: in generate_block
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
contrib/pyln-testing/pyln/testing/utils.py:322: in f
return proxy._call(name, *args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <bitcoin.rpc.RawProxy object at 0x7f93184f6a30>, service_name = 'getnewaddress', args = ()
postdata = '{"version": "1.1", "method": "getnewaddress", "params": [], "id": 1}'
headers = {'Authorization': b'Basic cnBjdXNlcjpycGNwYXNz', 'Content-type': 'application/json', 'Host': 'localhost', 'User-Agent': 'AuthServiceProxy/0.1'}
response = {'error': {'code': -18, 'message': 'No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)'}, 'id': 1, 'result': None}
```
2020-12-02 07:14:45 +01:00
try :
self . rpc . createwallet ( " lightningd-tests " )
except JSONRPCError :
self . rpc . loadwallet ( " lightningd-tests " )
2019-10-26 20:27:37 +02:00
def stop ( self ) :
for p in self . proxies :
p . stop ( )
self . rpc . stop ( )
return TailableProc . stop ( self )
def get_proxy ( self ) :
proxy = BitcoinRpcProxy ( self )
self . proxies . append ( proxy )
proxy . start ( )
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
2022-02-18 00:44:11 +01:00
def generate_block ( self , numblocks = 1 , wait_for_mempool = 0 , to_addr = None ) :
2019-10-26 20:27:37 +02:00
if wait_for_mempool :
if isinstance ( wait_for_mempool , str ) :
wait_for_mempool = [ wait_for_mempool ]
if isinstance ( wait_for_mempool , list ) :
wait_for ( lambda : all ( txid in self . rpc . getrawmempool ( ) for txid in wait_for_mempool ) )
else :
wait_for ( lambda : len ( self . rpc . getrawmempool ( ) ) > = wait_for_mempool )
2020-12-18 12:28:12 +01:00
mempool = self . rpc . getrawmempool ( )
logging . debug ( " Generating {numblocks} , confirming {lenmempool} transactions: {mempool} " . format (
numblocks = numblocks ,
mempool = mempool ,
lenmempool = len ( mempool ) ,
) )
2019-10-26 20:27:37 +02:00
# As of 0.16, generate() is removed; use generatetoaddress.
2022-02-18 00:44:11 +01:00
if to_addr is None :
to_addr = self . rpc . getnewaddress ( )
return self . rpc . generatetoaddress ( numblocks , to_addr )
2019-10-26 20:27:37 +02:00
def simple_reorg ( self , height , shift = 0 ) :
"""
Reorganize chain by creating a fork at height = [ height ] and re - mine all mempool
transactions into [ height + shift ] , where shift > = 0. Returns hashes of generated
blocks .
Note that tx ' s that become invalid at [height] (because coin maturity, locktime
etc . ) are removed from mempool . The length of the new chain will be original + 1
OR original + [ shift ] , whichever is larger .
For example : to push tx ' s backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx ' s at height h1:
1. A block at height h2 < h1 should contain a non - coinbase tx that can be pulled
forward to h1 .
2. Set [ height ] = h2 and [ shift ] = h1 - h2
"""
hashes = [ ]
fee_delta = 1000000
orig_len = self . rpc . getblockcount ( )
old_hash = self . rpc . getblockhash ( height )
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self . rpc . invalidateblock ( old_hash )
self . wait_for_log ( r ' InvalidChainFound: invalid block=.* height= {} ' . format ( height ) )
memp = self . rpc . getrawmempool ( )
if shift == 0 :
hashes + = self . generate_block ( 1 + final_len - height )
else :
for txid in memp :
# lower priority (to effective feerate=0) so they are not mined
self . rpc . prioritisetransaction ( txid , None , - fee_delta )
hashes + = self . generate_block ( shift )
for txid in memp :
# restore priority so they are mined
self . rpc . prioritisetransaction ( txid , None , fee_delta )
hashes + = self . generate_block ( 1 + final_len - ( height + shift ) )
self . wait_for_log ( r ' UpdateTip: new best=.* height= {} ' . format ( final_len ) )
return hashes
def getnewaddress ( self ) :
return self . rpc . getnewaddress ( )
class ElementsD ( BitcoinD ) :
def __init__ ( self , bitcoin_dir = " /tmp/bitcoind-test " , rpcport = None ) :
config = BITCOIND_CONFIG . copy ( )
if ' regtest ' in config :
del config [ ' regtest ' ]
config [ ' chain ' ] = ' liquid-regtest '
BitcoinD . __init__ ( self , bitcoin_dir , rpcport )
self . cmd_line = [
' elementsd ' ,
' -datadir= {} ' . format ( bitcoin_dir ) ,
' -printtoconsole ' ,
' -server ' ,
' -logtimestamps ' ,
' -nolisten ' ,
pytest: recreate wallet on bitcoind restart.
Doesn't seem to stick in master. Andy Chow suggested we
simply turn off wallet on older versions, and always create/load.
```
[gw8] [ 40%] FAILED tests/test_misc.py::test_bitcoind_goes_backwards
============================================================= FAILURES ==============================================================
___________________________________________________ test_bitcoind_goes_backwards ____________________________________________________
[gw8] linux -- Python 3.8.5 /usr/bin/python3
node_factory = <pyln.testing.utils.NodeFactory object at 0x7f931859a760>
bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f931865eee0>
def test_bitcoind_goes_backwards(node_factory, bitcoind):
"""Check that we refuse to acknowledge bitcoind giving a shorter chain without explicit rescan"""
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
l1.stop()
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
# Restart without killing proxies
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
# Will simply refuse to start.
with pytest.raises(ValueError):
l1.start()
# Nor will it start with if we ask for a reindex of fewer blocks.
l1.daemon.opts['rescan'] = 3
with pytest.raises(ValueError):
l1.start()
# This will force it, however.
l1.daemon.opts['rescan'] = -100
l1.start()
# Now mess with bitcoind at runtime.
> bitcoind.generate_block(6)
tests/test_misc.py:1307:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:399: in generate_block
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
contrib/pyln-testing/pyln/testing/utils.py:322: in f
return proxy._call(name, *args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <bitcoin.rpc.RawProxy object at 0x7f93184f6a30>, service_name = 'getnewaddress', args = ()
postdata = '{"version": "1.1", "method": "getnewaddress", "params": [], "id": 1}'
headers = {'Authorization': b'Basic cnBjdXNlcjpycGNwYXNz', 'Content-type': 'application/json', 'Host': 'localhost', 'User-Agent': 'AuthServiceProxy/0.1'}
response = {'error': {'code': -18, 'message': 'No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)'}, 'id': 1, 'result': None}
```
2020-12-02 07:14:45 +01:00
' -nowallet ' ,
2019-10-26 20:27:37 +02:00
' -validatepegin=0 ' ,
' -con_blocksubsidy=5000000000 ' ,
]
conf_file = os . path . join ( bitcoin_dir , ' elements.conf ' )
config [ ' rpcport ' ] = self . rpcport
BITCOIND_REGTEST = { ' rpcport ' : self . rpcport }
write_config ( conf_file , config , BITCOIND_REGTEST , section_name = ' liquid-regtest ' )
self . conf_file = conf_file
self . rpc = SimpleBitcoinProxy ( btc_conf_file = self . conf_file )
self . prefix = ' elementsd '
def getnewaddress ( self ) :
""" Need to get an address and then make it unconfidential
"""
addr = self . rpc . getnewaddress ( )
info = self . rpc . getaddressinfo ( addr )
return info [ ' unconfidential ' ]
class LightningD ( TailableProc ) :
2022-06-30 11:29:57 +02:00
def __init__ (
self ,
lightning_dir ,
bitcoindproxy ,
port = 9735 ,
random_hsm = False ,
node_id = 0 ,
grpc_port = None
) :
2022-06-26 06:43:01 +02:00
# We handle our own version of verbose, below.
TailableProc . __init__ ( self , lightning_dir , verbose = False )
2019-10-30 11:50:16 +01:00
self . executable = ' lightningd '
2019-10-26 20:27:37 +02:00
self . lightning_dir = lightning_dir
self . port = port
self . cmd_prefix = [ ]
self . disconnect_file = None
self . rpcproxy = bitcoindproxy
self . opts = LIGHTNINGD_CONFIG . copy ( )
opts = {
' lightning-dir ' : lightning_dir ,
' addr ' : ' 127.0.0.1: {} ' . format ( port ) ,
2020-04-02 14:52:02 +02:00
' allow-deprecated-apis ' : ' {} ' . format ( " true " if DEPRECATED_APIS
else " false " ) ,
2019-11-23 02:46:40 +01:00
' network ' : TEST_NETWORK ,
2019-10-26 20:27:37 +02:00
' ignore-fee-limits ' : ' false ' ,
' bitcoin-rpcuser ' : BITCOIND_CONFIG [ ' rpcuser ' ] ,
' bitcoin-rpcpassword ' : BITCOIND_CONFIG [ ' rpcpassword ' ] ,
2020-04-28 19:21:39 +02:00
# Make sure we don't touch any existing config files in the user's $HOME
' bitcoin-datadir ' : lightning_dir ,
2019-10-26 20:27:37 +02:00
}
2022-06-30 11:29:57 +02:00
if grpc_port is not None :
opts [ ' grpc-port ' ] = grpc_port
2019-10-26 20:27:37 +02:00
for k , v in opts . items ( ) :
self . opts [ k ] = v
2019-11-23 02:46:40 +01:00
if not os . path . exists ( os . path . join ( lightning_dir , TEST_NETWORK ) ) :
os . makedirs ( os . path . join ( lightning_dir , TEST_NETWORK ) )
2019-10-26 20:27:37 +02:00
# Last 32-bytes of final part of dir -> seed.
seed = ( bytes ( re . search ( ' ([^/]+)/*$ ' , lightning_dir ) . group ( 1 ) , encoding = ' utf-8 ' ) + bytes ( 32 ) ) [ : 32 ]
if not random_hsm :
2019-11-23 02:46:40 +01:00
with open ( os . path . join ( lightning_dir , TEST_NETWORK , ' hsm_secret ' ) , ' wb ' ) as f :
2019-10-26 20:27:37 +02:00
f . write ( seed )
if DEVELOPER :
self . opts [ ' dev-fast-gossip ' ] = None
self . opts [ ' dev-bitcoind-poll ' ] = 1
self . prefix = ' lightningd- %d ' % ( node_id )
2022-06-26 06:43:01 +02:00
# Log to stdout so we see it in failure cases, and log file for TailableProc.
self . opts [ ' log-file ' ] = [ ' - ' , os . path . join ( lightning_dir , " log " ) ]
2022-07-08 11:55:11 +02:00
self . opts [ ' log-prefix ' ] = self . prefix + ' '
2022-06-26 06:43:01 +02:00
# In case you want specific ordering!
self . early_opts = [ ]
2019-10-26 20:27:37 +02:00
def cleanup ( self ) :
# To force blackhole to exit, disconnect file must be truncated!
if self . disconnect_file :
with open ( self . disconnect_file , " w " ) as f :
f . truncate ( )
@property
def cmd_line ( self ) :
opts = [ ]
for k , v in self . opts . items ( ) :
if v is None :
opts . append ( " -- {} " . format ( k ) )
elif isinstance ( v , list ) :
for i in v :
opts . append ( " -- {} = {} " . format ( k , i ) )
else :
opts . append ( " -- {} = {} " . format ( k , v ) )
2022-06-26 06:43:01 +02:00
return self . cmd_prefix + [ self . executable ] + self . early_opts + opts
2019-10-26 20:27:37 +02:00
2022-07-21 06:39:30 +02:00
def start ( self , stdin = None , wait_for_initialized = True , stderr_redir = False ) :
2019-10-26 20:27:37 +02:00
self . opts [ ' bitcoin-rpcport ' ] = self . rpcproxy . rpcport
2022-07-21 06:39:30 +02:00
TailableProc . start ( self , stdin , stdout_redir = False , stderr_redir = stderr_redir )
2019-10-26 20:27:37 +02:00
if wait_for_initialized :
self . wait_for_log ( " Server started with public key " )
logging . info ( " LightningD started " )
2022-06-26 06:44:01 +02:00
def wait ( self , timeout = TIMEOUT ) :
2019-10-26 20:27:37 +02:00
""" Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process , None if the process did
not return before the timeout triggers .
"""
self . proc . wait ( timeout )
return self . proc . returncode
2021-01-25 12:35:16 +01:00
class PrettyPrintingLightningRpc ( LightningRpc ) :
""" A version of the LightningRpc that pretty-prints calls and results.
Useful when debugging based on logs , and less painful to the
eyes . It has some overhead since we re - serialize the request and
result to json in order to pretty print it .
2021-05-26 07:47:01 +02:00
Also validates ( optional ) schemas for us .
2021-01-25 12:35:16 +01:00
"""
2021-05-26 07:47:01 +02:00
def __init__ ( self , socket_path , executor = None , logger = logging ,
patch_json = True , jsonschemas = { } ) :
super ( ) . __init__ (
socket_path ,
executor ,
logger ,
patch_json ,
)
self . jsonschemas = jsonschemas
2022-04-01 06:13:34 +02:00
self . check_request_schemas = True
2021-01-25 12:35:16 +01:00
def call ( self , method , payload = None ) :
id = self . next_id
2022-04-01 06:13:34 +02:00
schemas = self . jsonschemas . get ( method )
2021-01-25 12:35:16 +01:00
self . logger . debug ( json . dumps ( {
" id " : id ,
" method " : method ,
" params " : payload
} , indent = 2 ) )
2022-04-01 06:13:34 +02:00
# We only check payloads which are dicts, which is what we
# usually use: there are some cases which tests [] params,
# which we ignore.
if schemas and schemas [ 0 ] and isinstance ( payload , dict ) and self . check_request_schemas :
# fields which are None are explicitly removed, so do that now
testpayload = { }
for k , v in payload . items ( ) :
if v is not None :
testpayload [ k ] = v
schemas [ 0 ] . validate ( testpayload )
2021-01-25 12:35:16 +01:00
res = LightningRpc . call ( self , method , payload )
self . logger . debug ( json . dumps ( {
" id " : id ,
" result " : res
} , indent = 2 ) )
2021-05-26 07:47:01 +02:00
2022-04-01 06:13:34 +02:00
if schemas and schemas [ 1 ] :
schemas [ 1 ] . validate ( res )
2021-05-26 07:47:01 +02:00
2021-01-25 12:35:16 +01:00
return res
2019-10-26 20:27:37 +02:00
class LightningNode ( object ) :
2020-08-07 05:14:59 +02:00
def __init__ ( self , node_id , lightning_dir , bitcoind , executor , valgrind , may_fail = False ,
2021-02-03 05:11:09 +01:00
may_reconnect = False ,
allow_broken_log = False ,
allow_warning = False ,
allow_bad_gossip = False ,
db = None , port = None , disconnect = None , random_hsm = None , options = None ,
2021-05-26 07:47:01 +02:00
jsonschemas = { } ,
2021-11-27 12:17:59 +01:00
valgrind_plugins = True ,
2020-08-07 05:14:59 +02:00
* * kwargs ) :
2019-10-28 22:11:23 +01:00
self . bitcoin = bitcoind
2019-10-26 20:27:37 +02:00
self . executor = executor
self . may_fail = may_fail
self . may_reconnect = may_reconnect
self . allow_broken_log = allow_broken_log
self . allow_bad_gossip = allow_bad_gossip
2021-02-03 05:11:09 +01:00
self . allow_warning = allow_warning
2019-10-26 20:27:37 +02:00
self . db = db
2022-06-30 11:29:57 +02:00
self . lightning_dir = Path ( lightning_dir )
2019-10-26 20:27:37 +02:00
2019-11-29 16:11:17 +01:00
# Assume successful exit
self . rc = 0
2022-06-30 11:29:57 +02:00
# Ensure we have an RPC we can use to talk to the node
self . _create_rpc ( jsonschemas )
2019-10-28 22:11:23 +01:00
2022-06-07 13:36:00 +02:00
self . gossip_store = GossipStore ( Path ( lightning_dir , TEST_NETWORK , " gossip_store " ) )
2019-10-28 22:11:23 +01:00
self . daemon = LightningD (
lightning_dir , bitcoindproxy = bitcoind . get_proxy ( ) ,
2022-06-30 11:29:57 +02:00
port = port , random_hsm = random_hsm , node_id = node_id ,
grpc_port = self . grpc_port ,
2019-10-28 22:11:23 +01:00
)
2022-06-30 11:29:57 +02:00
2019-10-28 22:11:23 +01:00
# If we have a disconnect string, dump it to a file for daemon.
if disconnect :
2019-11-23 02:46:40 +01:00
self . daemon . disconnect_file = os . path . join ( lightning_dir , TEST_NETWORK , " dev_disconnect " )
2019-10-28 22:11:23 +01:00
with open ( self . daemon . disconnect_file , " w " ) as f :
f . write ( " \n " . join ( disconnect ) )
self . daemon . opts [ " dev-disconnect " ] = " dev_disconnect "
if DEVELOPER :
self . daemon . opts [ " dev-fail-on-subdaemon-fail " ] = None
2020-07-22 14:07:22 +02:00
# Don't run --version on every subdaemon if we're valgrinding and slow.
if SLOW_MACHINE and VALGRIND :
self . daemon . opts [ " dev-no-version-checks " ] = None
2019-10-28 22:11:23 +01:00
if os . getenv ( " DEBUG_SUBD " ) :
self . daemon . opts [ " dev-debugger " ] = os . getenv ( " DEBUG_SUBD " )
2020-08-07 05:14:59 +02:00
if valgrind :
2019-10-28 22:11:23 +01:00
self . daemon . env [ " LIGHTNINGD_DEV_NO_BACKTRACE " ] = " 1 "
2021-11-12 07:44:46 +01:00
self . daemon . opts [ " dev-no-plugin-checksum " ] = None
2020-09-23 03:37:04 +02:00
else :
# Under valgrind, scanning can access uninitialized mem.
self . daemon . env [ " LIGHTNINGD_DEV_MEMLEAK " ] = " 1 "
2019-10-28 22:11:23 +01:00
if not may_reconnect :
self . daemon . opts [ " dev-no-reconnect " ] = None
2021-04-26 21:23:40 +02:00
if EXPERIMENTAL_DUAL_FUND :
self . daemon . opts [ " experimental-dual-fund " ] = None
2019-10-28 22:11:23 +01:00
if options is not None :
self . daemon . opts . update ( options )
dsn = db . get_dsn ( )
if dsn is not None :
self . daemon . opts [ ' wallet ' ] = dsn
2020-08-07 05:14:59 +02:00
if valgrind :
2022-06-30 20:36:52 +02:00
trace_skip_pattern = ' *python*,*bitcoin-cli*,*elements-cli*,*cln-grpc '
2021-11-27 12:17:59 +01:00
if not valgrind_plugins :
trace_skip_pattern + = ' ,*plugins* '
2019-10-28 22:11:23 +01:00
self . daemon . cmd_prefix = [
' valgrind ' ,
' -q ' ,
' --trace-children=yes ' ,
2021-11-27 12:17:59 +01:00
' --trace-children-skip= {} ' . format ( trace_skip_pattern ) ,
2019-10-28 22:11:23 +01:00
' --error-exitcode=7 ' ,
' --log-file= {} /valgrind-errors. % p ' . format ( self . daemon . lightning_dir )
]
2020-07-22 14:06:09 +02:00
# Reduce precision of errors, speeding startup and reducing memory greatly:
if SLOW_MACHINE :
self . daemon . cmd_prefix + = [ ' --read-inline-info=no ' ]
2019-10-28 22:11:23 +01:00
2022-06-30 11:29:57 +02:00
def _create_rpc ( self , jsonschemas ) :
""" Prepares anything related to the RPC.
"""
if os . environ . get ( ' CLN_TEST_GRPC ' ) == ' 1 ' :
logging . info ( " Switching to GRPC based RPC for tests " )
self . _create_grpc_rpc ( )
else :
self . _create_jsonrpc_rpc ( jsonschemas )
def _create_grpc_rpc ( self ) :
self . grpc_port = reserve_unused_port ( )
d = self . lightning_dir / TEST_NETWORK
d . mkdir ( parents = True , exist_ok = True )
# Copy all the certificates and keys into place:
with ( d / " ca.pem " ) . open ( mode = ' wb ' ) as f :
f . write ( grpc . DUMMY_CA_PEM )
with ( d / " ca-key.pem " ) . open ( mode = ' wb ' ) as f :
f . write ( grpc . DUMMY_CA_KEY_PEM )
# Now the node will actually start up and use them, so we can
# create the RPC instance.
self . rpc = grpc . LightningGrpc (
host = ' localhost ' ,
port = self . grpc_port ,
root_certificates = grpc . DUMMY_CA_PEM ,
private_key = grpc . DUMMY_CLIENT_KEY_PEM ,
certificate_chain = grpc . DUMMY_CLIENT_PEM
)
def _create_jsonrpc_rpc ( self , jsonschemas ) :
socket_path = self . lightning_dir / TEST_NETWORK / " lightning-rpc "
self . grpc_port = None
self . rpc = PrettyPrintingLightningRpc (
str ( socket_path ) ,
self . executor ,
jsonschemas = jsonschemas
)
2019-10-26 20:27:37 +02:00
def connect ( self , remote_node ) :
self . rpc . connect ( remote_node . info [ ' id ' ] , ' 127.0.0.1 ' , remote_node . daemon . port )
def is_connected ( self , remote_node ) :
return remote_node . info [ ' id ' ] in [ p [ ' id ' ] for p in self . rpc . listpeers ( ) [ ' peers ' ] ]
2020-11-30 16:42:35 +01:00
def openchannel ( self , remote_node , capacity = FUNDAMOUNT , addrtype = " p2sh-segwit " , confirm = True , wait_for_announce = True , connect = True ) :
2019-10-26 20:27:37 +02:00
addr , wallettxid = self . fundwallet ( 10 * capacity , addrtype )
if connect and not self . is_connected ( remote_node ) :
self . connect ( remote_node )
2022-01-16 10:56:15 +01:00
res = self . rpc . fundchannel ( remote_node . info [ ' id ' ] , capacity )
2019-10-26 20:27:37 +02:00
if confirm or wait_for_announce :
2022-01-16 10:56:15 +01:00
self . bitcoin . generate_block ( 1 , wait_for_mempool = res [ ' txid ' ] )
2019-10-26 20:27:37 +02:00
if wait_for_announce :
self . bitcoin . generate_block ( 5 )
2022-01-16 10:56:15 +01:00
wait_for ( lambda : [ ' alias ' in e for e in self . rpc . listnodes ( remote_node . info [ ' id ' ] ) [ ' nodes ' ] ] )
2019-10-26 20:27:37 +02:00
2022-01-16 10:56:15 +01:00
return { ' address ' : addr , ' wallettxid ' : wallettxid , ' fundingtx ' : res [ ' tx ' ] }
2019-10-26 20:27:37 +02:00
2021-06-04 12:47:24 +02:00
def fundwallet ( self , sats , addrtype = " p2sh-segwit " , mine_block = True ) :
2019-10-26 20:27:37 +02:00
addr = self . rpc . newaddr ( addrtype ) [ addrtype ]
txid = self . bitcoin . rpc . sendtoaddress ( addr , sats / 10 * * 8 )
2021-06-04 12:47:24 +02:00
if mine_block :
self . bitcoin . generate_block ( 1 )
self . daemon . wait_for_log ( ' Owning output .* txid {} CONFIRMED ' . format ( txid ) )
2019-10-26 20:27:37 +02:00
return addr , txid
2020-08-06 03:37:43 +02:00
def fundbalancedchannel ( self , remote_node , total_capacity , announce = True ) :
'''
Creates a perfectly - balanced channel , as all things should be .
'''
if isinstance ( total_capacity , Millisatoshi ) :
total_capacity = int ( total_capacity . to_satoshi ( ) )
else :
total_capacity = int ( total_capacity )
self . fundwallet ( total_capacity + 10000 )
2020-12-16 02:41:57 +01:00
if remote_node . config ( ' experimental-dual-fund ' ) :
remote_node . fundwallet ( total_capacity + 10000 )
# We cut the total_capacity in half, since the peer's
# expected to contribute that same amount
chan_capacity = total_capacity / / 2
total_capacity = chan_capacity * 2
2021-04-26 21:23:40 +02:00
# Tell the node to equally dual-fund the channel
remote_node . rpc . call ( ' funderupdate ' , { ' policy ' : ' match ' ,
' policy_mod ' : 100 ,
' fuzz_percent ' : 0 } )
2020-12-16 02:41:57 +01:00
else :
chan_capacity = total_capacity
2020-08-06 03:37:43 +02:00
self . rpc . connect ( remote_node . info [ ' id ' ] , ' localhost ' , remote_node . port )
2021-04-07 09:49:42 +02:00
res = self . rpc . fundchannel ( remote_node . info [ ' id ' ] , chan_capacity , feerate = ' slow ' , minconf = 0 , announce = announce , push_msat = Millisatoshi ( chan_capacity * 500 ) )
2022-01-14 12:38:54 +01:00
blockid = self . bitcoin . generate_block ( 1 , wait_for_mempool = res [ ' txid ' ] ) [ 0 ]
2020-08-06 03:37:43 +02:00
# Generate the scid.
2021-04-07 09:49:42 +02:00
for i , txid in enumerate ( self . bitcoin . rpc . getblock ( blockid ) [ ' tx ' ] ) :
if txid == res [ ' txid ' ] :
txnum = i
2020-12-16 02:41:57 +01:00
2022-01-14 12:38:54 +01:00
return ' {} x {} x {} ' . format ( self . bitcoin . rpc . getblockcount ( ) , txnum , res [ ' outnum ' ] )
2020-08-06 03:37:43 +02:00
2019-10-26 20:27:37 +02:00
def getactivechannels ( self ) :
return [ c for c in self . rpc . listchannels ( ) [ ' channels ' ] if c [ ' active ' ] ]
def db_query ( self , query ) :
return self . db . query ( query )
# Assumes node is stopped!
def db_manip ( self , query ) :
2019-11-23 02:46:40 +01:00
db = sqlite3 . connect ( os . path . join ( self . daemon . lightning_dir , TEST_NETWORK , " lightningd.sqlite3 " ) )
2019-10-26 20:27:37 +02:00
db . row_factory = sqlite3 . Row
c = db . cursor ( )
c . execute ( query )
db . commit ( )
c . close ( )
db . close ( )
def is_synced_with_bitcoin ( self , info = None ) :
if info is None :
info = self . rpc . getinfo ( )
return ' warning_bitcoind_sync ' not in info and ' warning_lightningd_sync ' not in info
2022-07-21 06:39:30 +02:00
def start ( self , wait_for_bitcoind_sync = True , stderr_redir = False ) :
self . daemon . start ( stderr_redir = stderr_redir )
2019-10-26 20:27:37 +02:00
# Cache `getinfo`, we'll be using it a lot
self . info = self . rpc . getinfo ( )
# This shortcut is sufficient for our simple tests.
self . port = self . info [ ' binding ' ] [ 0 ] [ ' port ' ]
2022-06-07 13:36:00 +02:00
self . gossip_store . open ( ) # Reopen the gossip_store now that we should have one
2019-10-26 20:27:37 +02:00
if wait_for_bitcoind_sync and not self . is_synced_with_bitcoin ( self . info ) :
wait_for ( lambda : self . is_synced_with_bitcoin ( ) )
def stop ( self , timeout = 10 ) :
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try :
# May fail if the process already died
self . rpc . stop ( )
except Exception :
pass
2019-11-29 16:11:17 +01:00
self . rc = self . daemon . wait ( timeout )
2019-10-26 20:27:37 +02:00
# If it did not stop be more insistent
2019-11-29 16:11:17 +01:00
if self . rc is None :
self . rc = self . daemon . stop ( )
2019-10-26 20:27:37 +02:00
self . daemon . cleanup ( )
2019-11-29 16:11:17 +01:00
if self . rc != 0 and not self . may_fail :
raise ValueError ( " Node did not exit cleanly, rc= {} " . format ( self . rc ) )
2019-10-26 20:27:37 +02:00
else :
2019-11-29 16:11:17 +01:00
return self . rc
2019-10-26 20:27:37 +02:00
def restart ( self , timeout = 10 , clean = True ) :
""" Stop and restart the lightning node.
Keyword arguments :
timeout : number of seconds to wait for a shutdown
clean : whether to issue a ` stop ` RPC command before killing
"""
if clean :
self . stop ( timeout )
else :
self . daemon . stop ( )
self . start ( )
2019-12-06 11:22:40 +01:00
def fund_channel ( self , l2 , amount , wait_for_active = True , announce_channel = True ) :
2020-09-16 14:24:07 +02:00
warnings . warn ( " LightningNode.fund_channel is deprecated in favor of "
" LightningNode.fundchannel " , category = DeprecationWarning )
return self . fundchannel ( l2 , amount , wait_for_active , announce_channel )
2019-10-26 20:27:37 +02:00
2020-11-30 16:42:35 +01:00
def fundchannel ( self , l2 , amount = FUNDAMOUNT , wait_for_active = True ,
2020-10-16 02:54:08 +02:00
announce_channel = True , * * kwargs ) :
2019-10-26 20:27:37 +02:00
# Give yourself some funds to work with
addr = self . rpc . newaddr ( ) [ ' bech32 ' ]
2020-09-19 19:32:06 +02:00
def has_funds_on_addr ( addr ) :
""" Check if the given address has funds in the internal wallet.
"""
outs = self . rpc . listfunds ( ) [ ' outputs ' ]
addrs = [ o [ ' address ' ] for o in outs ]
return addr in addrs
# We should not have funds on that address yet, we just generated it.
assert ( not has_funds_on_addr ( addr ) )
2019-10-26 20:27:37 +02:00
self . bitcoin . rpc . sendtoaddress ( addr , ( amount + 1000000 ) / 10 * * 8 )
self . bitcoin . generate_block ( 1 )
2020-09-19 19:32:06 +02:00
# Now we should.
wait_for ( lambda : has_funds_on_addr ( addr ) )
2019-10-26 20:27:37 +02:00
2020-09-19 19:32:06 +02:00
# Now go ahead and open a channel
2020-10-16 02:54:08 +02:00
res = self . rpc . fundchannel ( l2 . info [ ' id ' ] , amount ,
announce = announce_channel ,
* * kwargs )
2022-01-14 12:38:54 +01:00
blockid = self . bitcoin . generate_block ( 1 , wait_for_mempool = res [ ' txid ' ] ) [ 0 ]
2021-04-07 09:49:42 +02:00
for i , txid in enumerate ( self . bitcoin . rpc . getblock ( blockid ) [ ' tx ' ] ) :
if txid == res [ ' txid ' ] :
txnum = i
2019-10-26 20:27:37 +02:00
2021-04-07 09:49:42 +02:00
scid = " {} x {} x {} " . format ( self . bitcoin . rpc . getblockcount ( ) ,
2021-04-29 18:28:17 +02:00
txnum , res [ ' outnum ' ] )
2019-10-26 20:27:37 +02:00
if wait_for_active :
2020-08-25 18:51:25 +02:00
self . wait_channel_active ( scid )
l2 . wait_channel_active ( scid )
2020-10-15 20:10:31 +02:00
return scid , res
2019-10-26 20:27:37 +02:00
2019-11-17 12:41:33 +01:00
def subd_pid ( self , subd , peerid = None ) :
2019-10-26 20:27:37 +02:00
""" Get the process id of the given subdaemon, eg channeld or gossipd """
2019-11-17 12:41:33 +01:00
if peerid :
2019-11-18 01:27:17 +01:00
ex = re . compile ( r ' {} -.* {} .*: pid ([0-9]*), '
2019-11-17 12:41:33 +01:00
. format ( peerid , subd ) )
else :
2019-11-18 01:27:17 +01:00
ex = re . compile ( ' {} -.*: pid ([0-9]*), ' . format ( subd ) )
2019-10-26 20:27:37 +02:00
# Make sure we get latest one if it's restarted!
for l in reversed ( self . daemon . logs ) :
group = ex . search ( l )
if group :
return group . group ( 1 )
raise ValueError ( " No daemon {} found " . format ( subd ) )
def channel_state ( self , other ) :
""" Return the state of the channel to the other node.
Returns None if there is no such peer , or a channel hasn ' t been funded
yet .
"""
peers = self . rpc . listpeers ( other . info [ ' id ' ] ) [ ' peers ' ]
if not peers or ' channels ' not in peers [ 0 ] :
return None
channel = peers [ 0 ] [ ' channels ' ] [ 0 ]
return channel [ ' state ' ]
def get_channel_scid ( self , other ) :
""" Get the short_channel_id for the channel to the other node.
"""
peers = self . rpc . listpeers ( other . info [ ' id ' ] ) [ ' peers ' ]
if not peers or ' channels ' not in peers [ 0 ] :
return None
channel = peers [ 0 ] [ ' channels ' ] [ 0 ]
return channel [ ' short_channel_id ' ]
2020-09-08 18:15:16 +02:00
def get_channel_id ( self , other ) :
""" Get the channel_id for the channel to the other node.
"""
peers = self . rpc . listpeers ( other . info [ ' id ' ] ) [ ' peers ' ]
if not peers or ' channels ' not in peers [ 0 ] :
return None
channel = peers [ 0 ] [ ' channels ' ] [ 0 ]
return channel [ ' channel_id ' ]
2019-10-26 20:27:37 +02:00
def is_channel_active ( self , chanid ) :
2019-12-06 11:23:37 +01:00
channels = self . rpc . listchannels ( chanid ) [ ' channels ' ]
2019-10-26 20:27:37 +02:00
active = [ ( c [ ' short_channel_id ' ] , c [ ' channel_flags ' ] ) for c in channels if c [ ' active ' ] ]
return ( chanid , 0 ) in active and ( chanid , 1 ) in active
def wait_for_channel_onchain ( self , peerid ) :
txid = only_one ( only_one ( self . rpc . listpeers ( peerid ) [ ' peers ' ] ) [ ' channels ' ] ) [ ' scratch_txid ' ]
wait_for ( lambda : txid in self . bitcoin . rpc . getrawmempool ( ) )
def wait_channel_active ( self , chanid ) :
wait_for ( lambda : self . is_channel_active ( chanid ) )
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates ( self , scids ) :
# Could happen in any order...
self . daemon . wait_for_logs ( [ ' Received channel_update for channel {} /0 ' . format ( c )
for c in scids ]
+ [ ' Received channel_update for channel {} /1 ' . format ( c )
for c in scids ] )
2021-10-08 15:28:47 +02:00
def wait_for_route ( self , destination , timeout = TIMEOUT ) :
2019-10-26 20:27:37 +02:00
""" Wait for a route to the destination to become available.
"""
start_time = time . time ( )
while time . time ( ) < start_time + timeout :
try :
self . rpc . getroute ( destination . info [ ' id ' ] , 1 , 1 )
return True
except Exception :
time . sleep ( 1 )
if time . time ( ) > start_time + timeout :
raise ValueError ( " Error waiting for a route to destination {} " . format ( destination ) )
2020-02-09 17:45:28 +01:00
# This helper waits for all HTLCs to settle
2021-02-03 12:21:36 +01:00
# `scids` can be a list of strings. If unset wait on all channels.
def wait_for_htlcs ( self , scids = None ) :
2020-02-09 17:45:28 +01:00
peers = self . rpc . listpeers ( ) [ ' peers ' ]
for p , peer in enumerate ( peers ) :
if ' channels ' in peer :
for c , channel in enumerate ( peer [ ' channels ' ] ) :
2021-02-03 12:21:36 +01:00
if scids is not None and channel [ ' short_channel_id ' ] not in scids :
continue
2020-02-09 17:45:28 +01:00
if ' htlcs ' in channel :
wait_for ( lambda : len ( self . rpc . listpeers ( ) [ ' peers ' ] [ p ] [ ' channels ' ] [ c ] [ ' htlcs ' ] ) == 0 )
2020-03-08 11:36:25 +01:00
# This sends money to a directly connected peer
2019-10-26 20:27:37 +02:00
def pay ( self , dst , amt , label = None ) :
if not label :
label = ' ' . join ( random . choice ( string . ascii_letters + string . digits ) for _ in range ( 20 ) )
2020-03-12 09:52:00 +01:00
# check we are connected
dst_id = dst . info [ ' id ' ]
assert len ( self . rpc . listpeers ( dst_id ) . get ( ' peers ' ) ) == 1
# make an invoice
2021-07-12 08:49:19 +02:00
inv = dst . rpc . invoice ( amt , label , label )
# FIXME: pre 0.10.1 invoice calls didn't have payment_secret field
psecret = dst . rpc . decodepay ( inv [ ' bolt11 ' ] ) [ ' payment_secret ' ]
rhash = inv [ ' payment_hash ' ]
2019-10-26 20:27:37 +02:00
invoices = dst . rpc . listinvoices ( label ) [ ' invoices ' ]
assert len ( invoices ) == 1 and invoices [ 0 ] [ ' status ' ] == ' unpaid '
routestep = {
2022-06-20 12:22:09 +02:00
' amount_msat ' : amt ,
2020-03-12 09:52:00 +01:00
' id ' : dst_id ,
2019-10-26 20:27:37 +02:00
' delay ' : 5 ,
2020-03-12 09:52:00 +01:00
' channel ' : ' 1x1x1 ' # note: can be bogus for 1-hop direct payments
2019-10-26 20:27:37 +02:00
}
# sendpay is async now
2021-07-12 08:49:19 +02:00
self . rpc . sendpay ( [ routestep ] , rhash , payment_secret = psecret )
2019-10-26 20:27:37 +02:00
# wait for sendpay to comply
2020-02-13 12:41:22 +01:00
result = self . rpc . waitsendpay ( rhash )
assert ( result . get ( ' status ' ) == ' complete ' )
2019-10-26 20:27:37 +02:00
2022-01-12 04:49:16 +01:00
# Make sure they're all settled, in case we quickly mine blocks!
dst . wait_for_htlcs ( )
2020-03-08 11:36:25 +01:00
# This helper sends all money to a peer until even 1 msat can't get through.
def drain ( self , peer ) :
total = 0
2020-12-09 10:57:37 +01:00
msat = 4294967295 # Max payment size in some configs
2020-03-08 11:36:25 +01:00
while msat != 0 :
try :
2020-12-09 10:57:37 +01:00
logging . debug ( " Drain step with size= {} " . format ( msat ) )
2020-03-08 11:36:25 +01:00
self . pay ( peer , msat )
total + = msat
2020-12-09 10:57:37 +01:00
except RpcError as e :
logging . debug ( " Got an exception while draining channel: {} " . format ( e ) )
2020-03-08 11:36:25 +01:00
msat / / = 2
2020-12-09 10:57:37 +01:00
logging . debug ( " Draining complete after sending a total of {} msats " . format ( total ) )
2020-03-08 11:36:25 +01:00
return total
2019-10-26 20:27:37 +02:00
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates ( self , feerates , wait_for_effect = True ) :
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee ( r ) :
params = r [ ' params ' ]
if params == [ 2 , ' CONSERVATIVE ' ] :
feerate = feerates [ 0 ] * 4
2021-05-04 12:36:11 +02:00
elif params == [ 6 , ' ECONOMICAL ' ] :
2019-10-26 20:27:37 +02:00
feerate = feerates [ 1 ] * 4
2021-05-04 12:36:11 +02:00
elif params == [ 12 , ' ECONOMICAL ' ] :
2019-10-26 20:27:37 +02:00
feerate = feerates [ 2 ] * 4
2020-03-10 19:31:24 +01:00
elif params == [ 100 , ' ECONOMICAL ' ] :
feerate = feerates [ 3 ] * 4
2019-10-26 20:27:37 +02:00
else :
2020-04-30 17:53:17 +02:00
warnings . warn ( " Don ' t have a feerate set for {} / {} . " . format (
2020-04-13 15:31:22 +02:00
params [ 0 ] , params [ 1 ] ,
) )
2020-04-30 17:53:17 +02:00
feerate = 42
2019-10-26 20:27:37 +02:00
return {
' id ' : r [ ' id ' ] ,
' error ' : None ,
' result ' : {
' feerate ' : Decimal ( feerate ) / 10 * * 8
} ,
}
self . daemon . rpcproxy . mock_rpc ( ' estimatesmartfee ' , mock_estimatesmartfee )
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect :
2020-03-10 19:31:24 +01:00
wait_for ( lambda :
self . daemon . rpcproxy . mock_counts [ ' estimatesmartfee ' ] > = 4 )
2019-10-26 20:27:37 +02:00
2020-03-08 11:36:25 +01:00
# force new feerates by restarting and thus skipping slow smoothed process
# Note: testnode must be created with: opts={'may_reconnect': True}
def force_feerates ( self , rate ) :
assert ( self . may_reconnect )
2020-03-10 19:31:24 +01:00
self . set_feerates ( [ rate ] * 4 , False )
2020-03-08 11:36:25 +01:00
self . restart ( )
self . daemon . wait_for_log ( ' peer_out WIRE_UPDATE_FEE ' )
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
assert ( self . rpc . feerates ( ' perkw ' ) [ ' perkw ' ] [ ' opening ' ] == rate )
2020-03-08 11:36:25 +01:00
2019-10-26 20:27:37 +02:00
def wait_for_onchaind_broadcast ( self , name , resolve = None ) :
""" Wait for onchaind to drop tx name to resolve (if any) """
if resolve :
r = self . daemon . wait_for_log ( ' Broadcasting {} .* to resolve {} '
. format ( name , resolve ) )
else :
r = self . daemon . wait_for_log ( ' Broadcasting {} .* to resolve '
. format ( name ) )
rawtx = re . search ( r ' .* \ (([0-9a-fA-F]*) \ ) ' , r ) . group ( 1 )
txid = self . bitcoin . rpc . decoderawtransaction ( rawtx , True ) [ ' txid ' ]
wait_for ( lambda : txid in self . bitcoin . rpc . getrawmempool ( ) )
def query_gossip ( self , querytype , * args , filters = [ ] ) :
""" Generate a gossip query, feed it into this node and get responses
in hex """
query = subprocess . run ( [ ' devtools/mkquery ' ,
querytype ] + [ str ( a ) for a in args ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( )
out = subprocess . run ( [ ' devtools/gossipwith ' ,
2022-06-26 06:53:01 +02:00
' --features=40 ' , # OPT_GOSSIP_QUERIES
2019-11-15 05:18:34 +01:00
' --timeout-after= {} ' . format ( int ( math . sqrt ( TIMEOUT ) + 1 ) ) ,
2019-10-26 20:27:37 +02:00
' {} @localhost: {} ' . format ( self . info [ ' id ' ] ,
self . port ) ,
query ] ,
check = True ,
timeout = TIMEOUT , stdout = subprocess . PIPE ) . stdout
def passes_filters ( hmsg , filters ) :
for f in filters :
if hmsg . startswith ( f ) :
return False
return True
msgs = [ ]
while len ( out ) :
length = struct . unpack ( ' >H ' , out [ 0 : 2 ] ) [ 0 ]
hmsg = out [ 2 : 2 + length ] . hex ( )
if passes_filters ( hmsg , filters ) :
msgs . append ( out [ 2 : 2 + length ] . hex ( ) )
out = out [ 2 + length : ]
return msgs
2020-12-10 01:06:49 +01:00
def config ( self , config_name ) :
try :
opt = self . rpc . listconfigs ( config_name )
return opt [ config_name ]
except RpcError :
return None
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
def dev_pay ( self , bolt11 , amount_msat = None , label = None , riskfactor = None ,
2022-04-01 06:13:33 +02:00
maxfeepercent = None , retry_for = None ,
maxdelay = None , exemptfee = None , use_shadow = True , exclude = [ ] ) :
""" Wrapper for rpc.dev_pay which suppresses the request schema """
2022-04-01 06:13:34 +02:00
# FIXME? dev options are not in schema
old_check = self . rpc . check_request_schemas
self . rpc . check_request_schemas = False
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
ret = self . rpc . dev_pay ( bolt11 , amount_msat , label , riskfactor ,
2022-04-01 06:13:34 +02:00
maxfeepercent , retry_for ,
maxdelay , exemptfee , use_shadow , exclude )
self . rpc . check_request_schemas = old_check
return ret
2022-04-01 06:13:33 +02:00
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
def dev_invoice ( self , amount_msat , label , description , expiry = None , fallbacks = None , preimage = None , exposeprivatechannels = None , cltv = None , dev_routes = None ) :
2022-04-01 06:13:33 +02:00
""" Wrapper for rpc.invoice() with dev-routes option """
payload = {
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
" amount_msat " : amount_msat ,
2022-04-01 06:13:33 +02:00
" label " : label ,
" description " : description ,
" expiry " : expiry ,
" fallbacks " : fallbacks ,
" preimage " : preimage ,
" exposeprivatechannels " : exposeprivatechannels ,
" cltv " : cltv ,
" dev-routes " : dev_routes ,
}
2022-04-01 06:13:34 +02:00
# FIXME? dev options are not in schema
old_check = self . rpc . check_request_schemas
self . rpc . check_request_schemas = False
ret = self . rpc . call ( " invoice " , payload )
self . rpc . check_request_schemas = old_check
return ret
2022-04-01 06:13:33 +02:00
2019-10-26 20:27:37 +02:00
2021-01-18 21:05:18 +01:00
@contextmanager
def flock ( directory : Path ) :
""" A fair filelock, based on atomic fs operations.
"""
if not isinstance ( directory , Path ) :
directory = Path ( directory )
d = directory / Path ( " .locks " )
os . makedirs ( str ( d ) , exist_ok = True )
fname = None
while True :
# Try until we find a filename that doesn't exist yet.
try :
fname = d / Path ( " lock- {} " . format ( time . time ( ) ) )
fd = os . open ( str ( fname ) , flags = os . O_CREAT | os . O_EXCL )
os . close ( fd )
break
except FileExistsError :
time . sleep ( 0.1 )
# So now we have a position in the lock, let's check if we are the
# next one to go:
while True :
2021-09-08 19:57:37 +02:00
files = sorted ( [ f for f in d . iterdir ( ) if f . is_file ( ) ] )
2021-01-18 21:05:18 +01:00
# We're queued, so it should at least have us.
assert len ( files ) > = 1
if files [ 0 ] == fname :
break
time . sleep ( 0.1 )
# We can continue
yield fname
# Remove our file, so the next one can go ahead.
fname . unlink ( )
2020-12-11 12:51:45 +01:00
class Throttler ( object ) :
""" Throttles the creation of system-processes to avoid overload.
There is no reason to overload the system with too many processes
being spawned or run at the same time . It causes timeouts by
aggressively preempting processes and swapping if the memory limit is
reached . In order to reduce this loss of performance we provide a
` wait ( ) ` method which will serialize the creation of processes , but
also delay if the system load is too high .
Notice that technically we are throttling too late , i . e . , we react
to an overload , but chances are pretty good that some other
already running process is about to terminate , and so the overload
is short - lived . We throttle when the process object is first
created , not when restarted , in order to avoid delaying running
tests , which could cause more timeouts .
"""
2021-01-19 11:35:08 +01:00
def __init__ ( self , directory : str , target : float = 90 ) :
2020-12-11 12:51:45 +01:00
""" If specified we try to stick to a load of target (in percent).
"""
self . target = target
self . current_load = self . target # Start slow
psutil . cpu_percent ( ) # Prime the internal load metric
2021-01-18 21:05:18 +01:00
self . directory = directory
2020-12-11 12:51:45 +01:00
def wait ( self ) :
start_time = time . time ( )
2021-01-18 21:05:18 +01:00
with flock ( self . directory ) :
2020-12-11 12:51:45 +01:00
# We just got the lock, assume someone else just released it
self . current_load = 100
while self . load ( ) > = self . target :
time . sleep ( 1 )
self . current_load = 100 # Back off slightly to avoid triggering right away
2021-01-18 21:05:18 +01:00
print ( " Throttler delayed startup for {} seconds " . format ( time . time ( ) - start_time ) )
2020-12-11 12:51:45 +01:00
def load ( self ) :
""" An exponential moving average of the load
"""
decay = 0.5
load = psutil . cpu_percent ( )
self . current_load = decay * load + ( 1 - decay ) * self . current_load
return self . current_load
2019-10-26 20:27:37 +02:00
class NodeFactory ( object ) :
""" A factory to setup and start `lightningd` daemons.
"""
2020-12-11 12:51:45 +01:00
def __init__ ( self , request , testname , bitcoind , executor , directory ,
2021-05-26 07:47:01 +02:00
db_provider , node_cls , throttler , jsonschemas ) :
2020-08-07 05:14:59 +02:00
if request . node . get_closest_marker ( " slow_test " ) and SLOW_MACHINE :
self . valgrind = False
else :
self . valgrind = VALGRIND
2019-10-26 20:27:37 +02:00
self . testname = testname
self . next_id = 1
self . nodes = [ ]
2022-06-26 06:32:01 +02:00
self . reserved_ports = [ ]
2019-10-26 20:27:37 +02:00
self . executor = executor
self . bitcoind = bitcoind
self . directory = directory
self . lock = threading . Lock ( )
self . db_provider = db_provider
2019-10-30 11:50:16 +01:00
self . node_cls = node_cls
2020-12-11 12:51:45 +01:00
self . throttler = throttler
2021-05-26 07:47:01 +02:00
self . jsonschemas = jsonschemas
2019-10-26 20:27:37 +02:00
def split_options ( self , opts ) :
""" Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line . Split them so we know where to use
them .
"""
node_opt_keys = [
' disconnect ' ,
' may_fail ' ,
' allow_broken_log ' ,
2021-02-03 05:11:09 +01:00
' allow_warning ' ,
2019-10-26 20:27:37 +02:00
' may_reconnect ' ,
' random_hsm ' ,
' feerates ' ,
' wait_for_bitcoind_sync ' ,
2021-04-16 06:31:24 +02:00
' allow_bad_gossip ' ,
' start ' ,
2019-10-26 20:27:37 +02:00
]
node_opts = { k : v for k , v in opts . items ( ) if k in node_opt_keys }
cli_opts = { k : v for k , v in opts . items ( ) if k not in node_opt_keys }
return node_opts , cli_opts
def get_node_id ( self ) :
""" Generate a unique numeric ID for a lightning node
"""
with self . lock :
node_id = self . next_id
self . next_id + = 1
return node_id
def get_nodes ( self , num_nodes , opts = None ) :
""" Start a number of nodes in parallel, each with its own options
"""
if opts is None :
# No opts were passed in, give some dummy opts
opts = [ { } for _ in range ( num_nodes ) ]
elif isinstance ( opts , dict ) :
# A single dict was passed in, so we use these opts for all nodes
opts = [ opts ] * num_nodes
assert len ( opts ) == num_nodes
2021-11-27 12:17:59 +01:00
# Only trace one random node's plugins, to avoid OOM.
if SLOW_MACHINE :
valgrind_plugins = [ False ] * num_nodes
valgrind_plugins [ random . randint ( 0 , num_nodes - 1 ) ] = True
else :
valgrind_plugins = [ True ] * num_nodes
2019-10-26 20:27:37 +02:00
jobs = [ ]
for i in range ( num_nodes ) :
node_opts , cli_opts = self . split_options ( opts [ i ] )
jobs . append ( self . executor . submit (
self . get_node , options = cli_opts ,
2021-11-27 12:17:59 +01:00
node_id = self . get_node_id ( ) , * * node_opts ,
valgrind_plugins = valgrind_plugins [ i ]
2019-10-26 20:27:37 +02:00
) )
return [ j . result ( ) for j in jobs ]
2019-10-28 22:11:23 +01:00
def get_node ( self , node_id = None , options = None , dbfile = None ,
2020-03-10 19:31:24 +01:00
feerates = ( 15000 , 11000 , 7500 , 3750 ) , start = True ,
2020-07-29 19:36:15 +02:00
wait_for_bitcoind_sync = True , may_fail = False ,
expect_fail = False , cleandir = True , * * kwargs ) :
2020-12-11 12:51:45 +01:00
self . throttler . wait ( )
2019-10-28 22:11:23 +01:00
node_id = self . get_node_id ( ) if not node_id else node_id
2022-06-26 06:32:01 +02:00
port = reserve_unused_port ( )
2019-10-26 20:27:37 +02:00
lightning_dir = os . path . join (
self . directory , " lightning- {} / " . format ( node_id ) )
2020-04-06 11:41:46 +02:00
if cleandir and os . path . exists ( lightning_dir ) :
2019-10-26 20:27:37 +02:00
shutil . rmtree ( lightning_dir )
2019-10-28 22:11:23 +01:00
# Get the DB backend DSN we should be using for this test and this
# node.
2019-11-23 02:46:40 +01:00
db = self . db_provider . get_db ( os . path . join ( lightning_dir , TEST_NETWORK ) , self . testname , node_id )
2019-10-30 11:50:16 +01:00
node = self . node_cls (
2020-08-07 05:14:59 +02:00
node_id , lightning_dir , self . bitcoind , self . executor , self . valgrind , db = db ,
2020-07-29 19:36:15 +02:00
port = port , options = options , may_fail = may_fail or expect_fail ,
2021-05-26 07:47:01 +02:00
jsonschemas = self . jsonschemas ,
2020-07-29 19:36:15 +02:00
* * kwargs
2019-10-28 22:11:23 +01:00
)
2019-10-26 20:27:37 +02:00
# Regtest estimatefee are unusable, so override.
node . set_feerates ( feerates , False )
self . nodes . append ( node )
2022-06-26 06:32:01 +02:00
self . reserved_ports . append ( port )
2019-10-26 20:27:37 +02:00
if dbfile :
2019-11-23 02:46:40 +01:00
out = open ( os . path . join ( node . daemon . lightning_dir , TEST_NETWORK ,
2019-10-28 22:11:23 +01:00
' lightningd.sqlite3 ' ) , ' xb ' )
2019-10-26 20:27:37 +02:00
with lzma . open ( os . path . join ( ' tests/data ' , dbfile ) , ' rb ' ) as f :
out . write ( f . read ( ) )
if start :
try :
2022-06-26 06:42:01 +02:00
node . start ( wait_for_bitcoind_sync )
2019-10-26 20:27:37 +02:00
except Exception :
2020-03-08 01:06:43 +01:00
if expect_fail :
return node
2019-10-26 20:27:37 +02:00
node . daemon . stop ( )
raise
return node
2020-11-30 16:42:35 +01:00
def join_nodes ( self , nodes , fundchannel = True , fundamount = FUNDAMOUNT , wait_for_announce = False , announce_channels = True ) - > None :
2022-01-16 10:56:15 +01:00
""" Given nodes, connect them in a line, optionally funding a channel, wait_for_announce waits for channel and node announcements """
2019-10-26 20:27:37 +02:00
assert not ( wait_for_announce and not announce_channels ) , " You ' ve asked to wait for an announcement that ' s not coming. (wait_for_announce=True,announce_channels=False) "
2020-08-04 20:29:51 +02:00
connections = [ ( nodes [ i ] , nodes [ i + 1 ] ) for i in range ( len ( nodes ) - 1 ) ]
2019-10-26 20:27:37 +02:00
for src , dst in connections :
src . rpc . connect ( dst . info [ ' id ' ] , ' localhost ' , dst . port )
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel :
for src , dst in connections :
2022-03-22 21:27:29 +01:00
dst . daemon . wait_for_log ( r ' {} -connectd: Handed peer, entering loop ' . format ( src . info [ ' id ' ] ) )
2020-08-04 20:29:51 +02:00
return
2019-10-26 20:27:37 +02:00
2020-08-04 20:29:51 +02:00
bitcoind = nodes [ 0 ] . bitcoin
2019-10-26 20:27:37 +02:00
# If we got here, we want to fund channels
for src , dst in connections :
addr = src . rpc . newaddr ( ) [ ' bech32 ' ]
2020-08-04 20:29:51 +02:00
bitcoind . rpc . sendtoaddress ( addr , ( fundamount + 1000000 ) / 10 * * 8 )
2019-10-26 20:27:37 +02:00
2020-08-04 20:29:51 +02:00
bitcoind . generate_block ( 1 )
2020-08-07 05:13:55 +02:00
sync_blockheight ( bitcoind , nodes )
2020-08-07 05:14:58 +02:00
txids = [ ]
2019-10-26 20:27:37 +02:00
for src , dst in connections :
2020-08-07 05:14:58 +02:00
txids . append ( src . rpc . fundchannel ( dst . info [ ' id ' ] , fundamount , announce = announce_channels ) [ ' txid ' ] )
2019-10-26 20:27:37 +02:00
# Confirm all channels and wait for them to become usable
2022-01-16 10:56:15 +01:00
bitcoind . generate_block ( 1 , wait_for_mempool = txids )
2019-10-26 20:27:37 +02:00
scids = [ ]
for src , dst in connections :
wait_for ( lambda : src . channel_state ( dst ) == ' CHANNELD_NORMAL ' )
scid = src . get_channel_scid ( dst )
scids . append ( scid )
2020-08-25 18:49:38 +02:00
# Wait for all channels to be active (locally)
for i , n in enumerate ( scids ) :
nodes [ i ] . wait_channel_active ( scids [ i ] )
nodes [ i + 1 ] . wait_channel_active ( scids [ i ] )
2020-08-04 20:30:51 +02:00
2019-10-26 20:27:37 +02:00
if not wait_for_announce :
2020-08-04 20:29:51 +02:00
return
2019-10-26 20:27:37 +02:00
2020-08-04 20:29:51 +02:00
bitcoind . generate_block ( 5 )
2019-10-26 20:27:37 +02:00
2022-06-26 06:50:01 +02:00
# Make sure everyone sees all channels, all other nodes
for n in nodes :
for scid in scids :
n . wait_channel_active ( scid )
2019-10-26 20:27:37 +02:00
2022-06-26 06:50:01 +02:00
# Make sure we have all node announcements, too
2019-10-26 20:27:37 +02:00
for n in nodes :
2022-06-26 06:50:01 +02:00
for n2 in nodes :
wait_for ( lambda : ' alias ' in only_one ( n . rpc . listnodes ( n2 . info [ ' id ' ] ) [ ' nodes ' ] ) )
2019-10-26 20:27:37 +02:00
2020-11-30 16:42:35 +01:00
def line_graph ( self , num_nodes , fundchannel = True , fundamount = FUNDAMOUNT , wait_for_announce = False , opts = None , announce_channels = True ) :
2020-08-04 20:29:51 +02:00
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self . get_nodes ( num_nodes , opts = opts )
self . join_nodes ( nodes , fundchannel , fundamount , wait_for_announce , announce_channels )
2019-10-26 20:27:37 +02:00
return nodes
def killall ( self , expected_successes ) :
""" Returns true if every node we expected to succeed actually succeeded """
unexpected_fail = False
err_msgs = [ ]
for i in range ( len ( self . nodes ) ) :
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
2020-08-07 05:14:59 +02:00
if not self . valgrind and DEVELOPER :
2019-10-26 20:27:37 +02:00
try :
# This also puts leaks in log.
leaks = self . nodes [ i ] . rpc . dev_memleak ( ) [ ' leaks ' ]
except Exception :
pass
try :
self . nodes [ i ] . stop ( )
except Exception :
if expected_successes [ i ] :
unexpected_fail = True
if leaks is not None and len ( leaks ) != 0 :
unexpected_fail = True
err_msgs . append ( " Node {} has memory leaks: {} " . format (
self . nodes [ i ] . daemon . lightning_dir ,
json . dumps ( leaks , sort_keys = True , indent = 4 )
) )
2022-06-26 06:32:01 +02:00
for p in self . reserved_ports :
drop_unused_port ( p )
2019-10-26 20:27:37 +02:00
return not unexpected_fail , err_msgs