2019-08-10 12:47:32 +02:00
|
|
|
from bitcoin.rpc import RawProxy
|
2019-05-09 21:28:21 +02:00
|
|
|
from decimal import Decimal
|
2018-08-04 12:31:31 +02:00
|
|
|
from fixtures import * # noqa: F401,F403
|
2019-12-16 14:54:22 +01:00
|
|
|
from fixtures import LightningNode, TEST_NETWORK
|
2019-05-22 14:42:35 +02:00
|
|
|
from flaky import flaky # noqa: F401
|
2020-01-06 12:20:12 +01:00
|
|
|
from pyln.client import RpcError
|
2019-08-10 12:47:32 +02:00
|
|
|
from threading import Event
|
2020-01-06 12:20:12 +01:00
|
|
|
from pyln.testing.utils import (
|
2020-04-02 15:12:46 +02:00
|
|
|
DEVELOPER, TIMEOUT, VALGRIND, DEPRECATED_APIS, sync_blockheight, only_one,
|
2020-12-18 20:00:02 +01:00
|
|
|
wait_for, TailableProc, env
|
2020-01-06 12:20:12 +01:00
|
|
|
)
|
2020-04-02 05:08:22 +02:00
|
|
|
from utils import (
|
2022-01-25 21:24:31 +01:00
|
|
|
account_balance, scriptpubkey_addr, check_coin_moves
|
2020-04-02 05:08:22 +02:00
|
|
|
)
|
2018-08-06 06:57:30 +02:00
|
|
|
from ephemeral_port_reserve import reserve
|
2020-04-01 05:53:22 +02:00
|
|
|
from utils import EXPERIMENTAL_FEATURES
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import pytest
|
2019-01-08 01:53:25 +01:00
|
|
|
import re
|
2018-08-22 12:06:40 +02:00
|
|
|
import shutil
|
2018-08-04 12:31:31 +02:00
|
|
|
import signal
|
|
|
|
import socket
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import unittest
|
|
|
|
|
|
|
|
|
|
|
|
def test_names(node_factory):
|
2020-03-12 00:58:01 +01:00
|
|
|
# Note:
|
|
|
|
# private keys:
|
|
|
|
# l1: 41bfd2660762506c9933ade59f1debf7e6495b10c14a92dbcd2d623da2507d3d01,
|
|
|
|
# l2: c4a813f81ffdca1da6864db81795ad2d320add274452cafa1fb2ac2d07d062bd01
|
|
|
|
# l3: dae24b3853e1443a176daba5544ee04f7db33ebe38e70bdfdb1da34e89512c1001
|
2018-08-04 12:31:31 +02:00
|
|
|
configs = [
|
|
|
|
('0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'JUNIORBEAM', '0266e4'),
|
|
|
|
('022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59', 'SILENTARTIST', '022d22'),
|
|
|
|
('035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d', 'HOPPINGFIRE', '035d2b'),
|
|
|
|
('0382ce59ebf18be7d84677c2e35f23294b9992ceca95491fcf8a56c6cb2d9de199', 'JUNIORFELONY', '0382ce'),
|
|
|
|
('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'SOMBERFIRE', '032cf1'),
|
|
|
|
('0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6518', 'LOUDPHOTO', '0265b6')
|
|
|
|
]
|
|
|
|
|
2020-08-07 05:14:55 +02:00
|
|
|
nodes = node_factory.get_nodes(len(configs))
|
|
|
|
for n, (key, alias, color) in zip(nodes, configs):
|
2018-10-26 07:49:53 +02:00
|
|
|
assert n.daemon.is_in_log(r'public key {}, alias {}.* \(color #{}\)'
|
2018-08-04 12:31:31 +02:00
|
|
|
.format(key, alias, color))
|
|
|
|
|
|
|
|
|
2019-09-12 22:49:42 +02:00
|
|
|
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "This migration is based on a sqlite3 snapshot")
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_db_upgrade(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
version = subprocess.check_output(['lightningd/lightningd',
|
|
|
|
'--version']).decode('utf-8').splitlines()[0]
|
|
|
|
|
|
|
|
upgrades = l1.db_query("SELECT * from db_upgrades;")
|
|
|
|
assert len(upgrades) == 1
|
|
|
|
assert(upgrades[0]['upgrade_from'] == -1)
|
|
|
|
assert(upgrades[0]['lightning_version'] == version)
|
|
|
|
|
|
|
|
# Try resetting to earlier db state.
|
2019-11-23 02:46:40 +01:00
|
|
|
os.unlink(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.db_manip("CREATE TABLE version (version INTEGER);")
|
|
|
|
l1.db_manip("INSERT INTO version VALUES (1);")
|
|
|
|
|
|
|
|
l1.start()
|
|
|
|
upgrades = l1.db_query("SELECT * from db_upgrades;")
|
|
|
|
assert len(upgrades) == 1
|
|
|
|
assert(upgrades[0]['upgrade_from'] == 1)
|
|
|
|
assert(upgrades[0]['lightning_version'] == version)
|
|
|
|
|
|
|
|
|
|
|
|
def test_bitcoin_failure(node_factory, bitcoind):
|
2018-08-22 02:13:57 +02:00
|
|
|
l1 = node_factory.get_node()
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Make sure we're not failing it between getblockhash and getblock.
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
2018-09-05 10:43:32 +02:00
|
|
|
def crash_bitcoincli(r):
|
|
|
|
return {'error': 'go away'}
|
|
|
|
|
|
|
|
# This is not a JSON-RPC response by purpose
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', crash_bitcoincli)
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockhash', crash_bitcoincli)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# This should cause both estimatefee and getblockhash fail
|
2020-02-05 16:59:00 +01:00
|
|
|
l1.daemon.wait_for_logs(['Unable to estimate .* fee',
|
2018-08-04 12:31:31 +02:00
|
|
|
'getblockhash .* exited with status 1'])
|
|
|
|
|
|
|
|
# And they should retry!
|
2020-02-05 16:59:00 +01:00
|
|
|
l1.daemon.wait_for_logs(['Unable to estimate .* fee',
|
2018-08-04 12:31:31 +02:00
|
|
|
'getblockhash .* exited with status 1'])
|
|
|
|
|
|
|
|
# Restore, then it should recover and get blockheight.
|
2018-09-05 10:43:32 +02:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', None)
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
bitcoind.generate_block(5)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
2020-07-29 19:38:31 +02:00
|
|
|
# We refuse to start if bitcoind is in `blocksonly`
|
|
|
|
l1.stop()
|
|
|
|
bitcoind.stop()
|
|
|
|
bitcoind.cmd_line += ["-blocksonly"]
|
|
|
|
bitcoind.start()
|
|
|
|
|
2021-09-03 12:16:20 +02:00
|
|
|
# Ignore BROKEN log message about blocksonly mode.
|
|
|
|
l2 = node_factory.get_node(start=False, expect_fail=True,
|
|
|
|
allow_broken_log=True)
|
2020-07-29 19:38:31 +02:00
|
|
|
with pytest.raises(ValueError):
|
|
|
|
l2.start(stderr=subprocess.PIPE)
|
|
|
|
assert l2.daemon.is_in_stderr(r".*deactivating transaction relay is not"
|
|
|
|
" supported.") is not None
|
2021-09-03 12:16:20 +02:00
|
|
|
# wait_for_log gets upset since daemon is not running.
|
|
|
|
wait_for(lambda: l2.daemon.is_in_log('deactivating transaction'
|
|
|
|
' relay is not supported'))
|
2020-07-29 19:38:31 +02:00
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
|
2019-08-09 04:38:59 +02:00
|
|
|
def test_bitcoin_ibd(node_factory, bitcoind):
|
|
|
|
"""Test that we recognize bitcoin in initial download mode"""
|
|
|
|
info = bitcoind.rpc.getblockchaininfo()
|
|
|
|
info['initialblockdownload'] = True
|
|
|
|
|
|
|
|
l1 = node_factory.get_node(start=False)
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockchaininfo', info)
|
|
|
|
|
2019-08-10 12:48:24 +02:00
|
|
|
l1.start(wait_for_bitcoind_sync=False)
|
2019-08-09 04:38:59 +02:00
|
|
|
|
|
|
|
# This happens before the Starting message start() waits for.
|
|
|
|
assert l1.daemon.is_in_log('Waiting for initial block download')
|
|
|
|
assert 'warning_bitcoind_sync' in l1.rpc.getinfo()
|
|
|
|
|
|
|
|
# "Finish" IDB.
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockchaininfo', None)
|
|
|
|
|
2020-02-05 16:59:00 +01:00
|
|
|
l1.daemon.wait_for_log('Bitcoin backend now synced')
|
2019-08-09 04:38:59 +02:00
|
|
|
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
|
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2019-08-10 12:47:32 +02:00
|
|
|
def test_lightningd_still_loading(node_factory, bitcoind, executor):
|
|
|
|
"""Test that we recognize we haven't got all blocks from bitcoind"""
|
|
|
|
|
|
|
|
mock_release = Event()
|
|
|
|
|
|
|
|
# This is slow enough that we're going to notice.
|
|
|
|
def mock_getblock(r):
|
|
|
|
conf_file = os.path.join(bitcoind.bitcoin_dir, 'bitcoin.conf')
|
|
|
|
brpc = RawProxy(btc_conf_file=conf_file)
|
|
|
|
if r['params'][0] == slow_blockid:
|
|
|
|
mock_release.wait(TIMEOUT)
|
|
|
|
return {
|
|
|
|
"result": brpc._call(r['method'], *r['params']),
|
|
|
|
"error": None,
|
|
|
|
"id": r['id']
|
|
|
|
}
|
|
|
|
|
2019-08-10 12:48:20 +02:00
|
|
|
# Start it, establish channel, get extra funds.
|
2020-08-07 05:14:55 +02:00
|
|
|
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'may_reconnect': True,
|
|
|
|
'wait_for_bitcoind_sync': False},
|
|
|
|
{'may_reconnect': True,
|
|
|
|
'wait_for_bitcoind_sync': False},
|
|
|
|
{}])
|
|
|
|
node_factory.join_nodes([l1, l2])
|
2019-09-11 05:47:26 +02:00
|
|
|
|
2019-08-10 12:48:20 +02:00
|
|
|
# Balance l1<->l2 channel
|
2019-08-10 12:48:13 +02:00
|
|
|
l1.pay(l2, 10**9 // 2)
|
2019-08-10 12:48:20 +02:00
|
|
|
|
2019-08-10 12:47:32 +02:00
|
|
|
l1.stop()
|
|
|
|
|
2020-08-07 05:14:55 +02:00
|
|
|
# Now make sure l2 is behind.
|
2019-08-10 12:47:32 +02:00
|
|
|
bitcoind.generate_block(2)
|
2019-08-10 12:48:20 +02:00
|
|
|
# Make sure l2/l3 are synced
|
|
|
|
sync_blockheight(bitcoind, [l2, l3])
|
2019-08-10 12:47:32 +02:00
|
|
|
|
|
|
|
# Make it slow grabbing the final block.
|
|
|
|
slow_blockid = bitcoind.rpc.getblockhash(bitcoind.rpc.getblockcount())
|
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblock', mock_getblock)
|
|
|
|
|
2019-08-10 12:48:24 +02:00
|
|
|
l1.start(wait_for_bitcoind_sync=False)
|
2019-08-10 12:47:32 +02:00
|
|
|
|
|
|
|
# It will warn about being out-of-sync.
|
|
|
|
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
|
|
|
|
assert 'warning_lightningd_sync' in l1.rpc.getinfo()
|
|
|
|
|
2020-02-27 05:12:17 +01:00
|
|
|
# Make sure it's connected to l2 (otherwise we get TEMPORARY_CHANNEL_FAILURE)
|
|
|
|
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
|
|
|
|
|
2019-08-10 12:47:49 +02:00
|
|
|
# Payments will fail. FIXME: More informative msg?
|
2020-02-27 05:12:17 +01:00
|
|
|
with pytest.raises(RpcError, match=r'TEMPORARY_NODE_FAILURE'):
|
2019-08-10 12:47:49 +02:00
|
|
|
l1.pay(l2, 1000)
|
|
|
|
|
2019-11-01 21:04:19 +01:00
|
|
|
# Can't fund a new channel.
|
2019-08-10 12:48:20 +02:00
|
|
|
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
with pytest.raises(RpcError, match=r'304'):
|
2020-12-18 20:00:02 +01:00
|
|
|
if l1.config('experimental-dual-fund'):
|
|
|
|
psbt = l1.rpc.fundpsbt('10000sat', '253perkw', 250)['psbt']
|
|
|
|
l1.rpc.openchannel_init(l3.info['id'], '10000sat', psbt)
|
|
|
|
else:
|
|
|
|
l1.rpc.fundchannel_start(l3.info['id'], '10000sat')
|
2019-11-01 21:04:19 +01:00
|
|
|
|
|
|
|
# Attempting to fund an extremely large transaction should fail
|
|
|
|
# with a 'unsynced' error
|
|
|
|
with pytest.raises(RpcError, match=r'304'):
|
|
|
|
l1.rpc.txprepare([{l1.rpc.newaddr()['bech32']: '200000000sat'}])
|
2019-08-10 12:48:20 +02:00
|
|
|
|
2019-08-10 12:48:13 +02:00
|
|
|
# This will work, but will be delayed until synced.
|
|
|
|
fut = executor.submit(l2.pay, l1, 1000)
|
|
|
|
l1.daemon.wait_for_log("Deferring incoming commit until we sync")
|
|
|
|
|
|
|
|
# Release the mock.
|
2019-08-10 12:47:32 +02:00
|
|
|
mock_release.set()
|
2019-08-10 12:48:13 +02:00
|
|
|
fut.result()
|
|
|
|
|
|
|
|
assert 'warning_lightningd_sync' not in l1.rpc.getinfo()
|
2019-11-01 21:04:19 +01:00
|
|
|
|
|
|
|
# Now we get insufficient funds error
|
|
|
|
with pytest.raises(RpcError, match=r'301'):
|
|
|
|
l1.rpc.txprepare([{l1.rpc.newaddr()['bech32']: '200000000sat'}])
|
2019-08-10 12:48:13 +02:00
|
|
|
|
|
|
|
# This will now work normally.
|
2019-08-10 12:47:49 +02:00
|
|
|
l1.pay(l2, 1000)
|
2019-08-10 12:47:32 +02:00
|
|
|
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_ping(node_factory):
|
2021-10-09 07:53:29 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
def ping_tests(l1, l2):
|
|
|
|
# 0-byte pong gives just type + length field.
|
2018-08-09 02:29:30 +02:00
|
|
|
ret = l1.rpc.ping(l2.info['id'], 0, 0)
|
2018-08-04 12:31:31 +02:00
|
|
|
assert ret['totlen'] == 4
|
|
|
|
|
|
|
|
# 1000-byte ping, 0-byte pong.
|
2018-08-09 02:29:30 +02:00
|
|
|
ret = l1.rpc.ping(l2.info['id'], 1000, 0)
|
2018-08-04 12:31:31 +02:00
|
|
|
assert ret['totlen'] == 4
|
|
|
|
|
|
|
|
# 1000 byte pong.
|
2018-08-09 02:29:30 +02:00
|
|
|
ret = l1.rpc.ping(l2.info['id'], 1000, 1000)
|
2018-08-04 12:31:31 +02:00
|
|
|
assert ret['totlen'] == 1004
|
|
|
|
|
|
|
|
# Maximum length pong.
|
2018-08-09 02:29:30 +02:00
|
|
|
ret = l1.rpc.ping(l2.info['id'], 1000, 65531)
|
2018-08-04 12:31:31 +02:00
|
|
|
assert ret['totlen'] == 65535
|
|
|
|
|
|
|
|
# Overlength -> no reply.
|
|
|
|
for s in range(65532, 65536):
|
2018-08-09 02:29:30 +02:00
|
|
|
ret = l1.rpc.ping(l2.info['id'], 1000, s)
|
2018-08-04 12:31:31 +02:00
|
|
|
assert ret['totlen'] == 0
|
|
|
|
|
|
|
|
# 65535 - type(2 bytes) - num_pong_bytes(2 bytes) - byteslen(2 bytes)
|
|
|
|
# = 65529 max.
|
|
|
|
with pytest.raises(RpcError, match=r'oversize ping'):
|
2018-08-09 02:29:30 +02:00
|
|
|
l1.rpc.ping(l2.info['id'], 65530, 1)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# channeld pinging
|
|
|
|
ping_tests(l1, l2)
|
|
|
|
if DEVELOPER:
|
2018-10-26 07:49:53 +02:00
|
|
|
l1.daemon.wait_for_log(r'Got pong 1000 bytes \({}\.\.\.\)'
|
2018-08-04 12:31:31 +02:00
|
|
|
.format(l2.info['version']))
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs --dev-disconnect")
|
2018-10-26 07:34:56 +02:00
|
|
|
def test_htlc_sig_persistence(node_factory, bitcoind, executor):
|
2018-08-04 12:31:31 +02:00
|
|
|
"""Interrupt a payment between two peers, then fail and recover funds using the HTLC sig.
|
|
|
|
"""
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
|
|
|
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-08-04 12:31:31 +02:00
|
|
|
l2 = node_factory.get_node(disconnect=['+WIRE_COMMITMENT_SIGNED'])
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2020-09-24 14:06:36 +02:00
|
|
|
l1.fundchannel(l2, 10**6)
|
2018-08-04 12:31:31 +02:00
|
|
|
f = executor.submit(l1.pay, l2, 31337000)
|
|
|
|
l1.daemon.wait_for_log(r'HTLC out 0 RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# `pay` call is lost
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
f.result()
|
|
|
|
|
|
|
|
# We should have the HTLC sig
|
|
|
|
assert(len(l1.db_query("SELECT * FROM htlc_sigs;")) == 1)
|
|
|
|
|
|
|
|
# This should reload the htlc_sig
|
|
|
|
l2.rpc.dev_fail(l1.info['id'])
|
|
|
|
# Make sure it broadcasts to chain.
|
2018-09-19 06:06:07 +02:00
|
|
|
l2.wait_for_channel_onchain(l1.info['id'])
|
2018-08-04 12:31:31 +02:00
|
|
|
l2.stop()
|
2018-10-26 07:34:56 +02:00
|
|
|
bitcoind.generate_block(1)
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.start()
|
|
|
|
|
|
|
|
assert l1.daemon.is_in_log(r'Loaded 1 HTLC signatures from DB')
|
|
|
|
l1.daemon.wait_for_logs([
|
|
|
|
r'Peer permanent failure in CHANNELD_NORMAL: Funding transaction spent',
|
|
|
|
r'Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US'
|
|
|
|
])
|
2018-10-26 07:34:56 +02:00
|
|
|
bitcoind.generate_block(5)
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.daemon.wait_for_log("Broadcasting OUR_HTLC_TIMEOUT_TO_US")
|
|
|
|
time.sleep(3)
|
2018-10-26 07:34:56 +02:00
|
|
|
bitcoind.generate_block(1)
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.daemon.wait_for_logs([
|
2019-02-21 04:45:55 +01:00
|
|
|
r'Owning output . (\d+)sat .SEGWIT. txid',
|
2018-08-04 12:31:31 +02:00
|
|
|
])
|
|
|
|
|
|
|
|
# We should now have a) the change from funding, b) the
|
|
|
|
# unilateral to us, and c) the HTLC respend to us
|
|
|
|
assert len(l1.rpc.listfunds()['outputs']) == 3
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs to deactivate shadow routing")
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_htlc_out_timeout(node_factory, bitcoind, executor):
|
|
|
|
"""Test that we drop onchain if the peer doesn't time out HTLC"""
|
|
|
|
|
|
|
|
# HTLC 1->2, 1 fails after it's irrevocably committed, can't reconnect
|
2021-12-28 00:27:09 +01:00
|
|
|
disconnects = ['-WIRE_REVOKE_AND_ACK']
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
2018-08-04 12:31:31 +02:00
|
|
|
l1 = node_factory.get_node(disconnect=disconnects,
|
2018-08-23 01:27:17 +02:00
|
|
|
options={'dev-no-reconnect': None},
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-08-04 12:31:31 +02:00
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2020-10-15 20:10:31 +02:00
|
|
|
chanid, _ = l1.fundchannel(l2, 10**6)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Wait for route propagation.
|
|
|
|
l1.wait_channel_active(chanid)
|
|
|
|
|
|
|
|
amt = 200000000
|
|
|
|
inv = l2.rpc.invoice(amt, 'test_htlc_out_timeout', 'desc')['bolt11']
|
|
|
|
assert only_one(l2.rpc.listinvoices('test_htlc_out_timeout')['invoices'])['status'] == 'unpaid'
|
|
|
|
|
2019-11-02 20:53:17 +01:00
|
|
|
executor.submit(l1.rpc.dev_pay, inv, use_shadow=False)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# l1 will disconnect, and not reconnect.
|
2021-12-28 00:27:09 +01:00
|
|
|
l1.daemon.wait_for_log('dev_disconnect: -WIRE_REVOKE_AND_ACK')
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Takes 6 blocks to timeout (cltv-final + 1), but we also give grace period of 1 block.
|
2019-01-15 11:04:07 +01:00
|
|
|
# shadow route can add extra blocks!
|
|
|
|
status = only_one(l1.rpc.call('paystatus')['pay'])
|
|
|
|
if 'shadow' in status:
|
|
|
|
shadowlen = 6 * status['shadow'].count('Added 6 cltv delay for shadow')
|
|
|
|
else:
|
|
|
|
shadowlen = 0
|
|
|
|
|
|
|
|
bitcoind.generate_block(5 + 1 + shadowlen)
|
|
|
|
time.sleep(3)
|
|
|
|
assert not l1.daemon.is_in_log('hit deadline')
|
|
|
|
bitcoind.generate_block(1)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
l1.daemon.wait_for_log('Offered HTLC 0 SENT_ADD_ACK_REVOCATION cltv .* hit deadline')
|
|
|
|
l1.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
l1.bitcoin.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
l2.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
|
|
|
# L1 will timeout HTLC immediately
|
|
|
|
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 0 blocks',
|
|
|
|
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'])
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
|
|
|
|
bitcoind.generate_block(4)
|
|
|
|
# It should now claim both the to-local and htlc-timeout-tx outputs.
|
|
|
|
l1.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET',
|
|
|
|
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET',
|
|
|
|
'sendrawtx exit 0',
|
|
|
|
'sendrawtx exit 0'])
|
|
|
|
|
|
|
|
# Now, 100 blocks it should be done.
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs to deactivate shadow routing")
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_htlc_in_timeout(node_factory, bitcoind, executor):
|
|
|
|
"""Test that we drop onchain if the peer doesn't accept fulfilled HTLC"""
|
|
|
|
|
|
|
|
# HTLC 1->2, 1 fails after 2 has sent committed the fulfill
|
|
|
|
disconnects = ['-WIRE_REVOKE_AND_ACK*2']
|
2018-08-23 01:27:17 +02:00
|
|
|
# Feerates identical so we don't get gratuitous commit to update them
|
2018-08-04 12:31:31 +02:00
|
|
|
l1 = node_factory.get_node(disconnect=disconnects,
|
2018-08-23 01:27:17 +02:00
|
|
|
options={'dev-no-reconnect': None},
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2018-08-04 12:31:31 +02:00
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2020-10-15 20:10:31 +02:00
|
|
|
chanid, _ = l1.fundchannel(l2, 10**6)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
l1.wait_channel_active(chanid)
|
|
|
|
sync_blockheight(bitcoind, [l1, l2])
|
|
|
|
|
|
|
|
amt = 200000000
|
|
|
|
inv = l2.rpc.invoice(amt, 'test_htlc_in_timeout', 'desc')['bolt11']
|
|
|
|
assert only_one(l2.rpc.listinvoices('test_htlc_in_timeout')['invoices'])['status'] == 'unpaid'
|
|
|
|
|
2019-11-02 20:53:17 +01:00
|
|
|
executor.submit(l1.rpc.dev_pay, inv, use_shadow=False)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# l1 will disconnect and not reconnect.
|
|
|
|
l1.daemon.wait_for_log('dev_disconnect: -WIRE_REVOKE_AND_ACK')
|
|
|
|
|
2018-10-22 06:15:35 +02:00
|
|
|
# Deadline HTLC expiry minus 1/2 cltv-expiry delta (rounded up) (== cltv - 3). cltv is 5+1.
|
2019-01-15 11:04:07 +01:00
|
|
|
# shadow route can add extra blocks!
|
|
|
|
status = only_one(l1.rpc.call('paystatus')['pay'])
|
|
|
|
if 'shadow' in status:
|
|
|
|
shadowlen = 6 * status['shadow'].count('Added 6 cltv delay for shadow')
|
|
|
|
else:
|
|
|
|
shadowlen = 0
|
|
|
|
bitcoind.generate_block(2 + shadowlen)
|
|
|
|
assert not l2.daemon.is_in_log('hit deadline')
|
|
|
|
bitcoind.generate_block(1)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
l2.daemon.wait_for_log('Fulfilled HTLC 0 SENT_REMOVE_COMMIT cltv .* hit deadline')
|
|
|
|
l2.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
l2.bitcoin.generate_block(1)
|
|
|
|
l2.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
l1.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
2019-01-15 11:04:07 +01:00
|
|
|
# L2 will collect HTLC (iff no shadow route)
|
2019-01-15 11:04:07 +01:00
|
|
|
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks')
|
|
|
|
l2.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
|
|
|
|
bitcoind.generate_block(4)
|
|
|
|
l2.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
|
|
|
|
l2.daemon.wait_for_log('sendrawtx exit 0')
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Now, 100 blocks it should be both done.
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
|
|
|
|
|
|
|
|
|
2020-06-23 03:11:11 +02:00
|
|
|
@unittest.skipIf(not TEST_NETWORK == 'regtest', 'must be on bitcoin network')
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs DEVELOPER=1")
|
2019-05-06 20:50:11 +02:00
|
|
|
def test_bech32_funding(node_factory, chainparams):
|
2018-08-04 12:31:31 +02:00
|
|
|
# Don't get any funds from previous runs.
|
2020-08-07 05:14:55 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts={'random_hsm': True}, fundchannel=False)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# fund a bech32 address and then open a channel with it
|
2020-08-25 06:46:35 +02:00
|
|
|
res = l1.openchannel(l2, 25000, 'bech32')
|
2018-08-04 12:31:31 +02:00
|
|
|
address = res['address']
|
2019-05-12 21:27:06 +02:00
|
|
|
assert address.startswith(chainparams['bip173_prefix'])
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# probably overly paranoid checking
|
|
|
|
wallettxid = res['wallettxid']
|
|
|
|
|
|
|
|
wallettx = l1.bitcoin.rpc.getrawtransaction(wallettxid, True)
|
|
|
|
fundingtx = l1.bitcoin.rpc.decoderawtransaction(res['fundingtx']['tx'])
|
|
|
|
|
|
|
|
def is_p2wpkh(output):
|
|
|
|
return output['type'] == 'witness_v0_keyhash' and \
|
2021-04-18 06:38:22 +02:00
|
|
|
address == scriptpubkey_addr(output)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
assert any(is_p2wpkh(output['scriptPubKey']) for output in wallettx['vout'])
|
|
|
|
assert only_one(fundingtx['vin'])['txid'] == res['wallettxid']
|
|
|
|
|
|
|
|
|
2020-04-02 05:08:22 +02:00
|
|
|
def test_withdraw_misc(node_factory, bitcoind, chainparams):
|
2020-08-28 04:44:57 +02:00
|
|
|
def dont_spend_outputs(n, txid):
|
|
|
|
"""Reserve both outputs (we assume there are two!) in case any our ours, so we don't spend change: wrecks accounting checks"""
|
|
|
|
n.rpc.reserveinputs(bitcoind.rpc.createpsbt([{'txid': txid,
|
|
|
|
'vout': 0},
|
|
|
|
{'txid': txid,
|
|
|
|
'vout': 1}], []))
|
|
|
|
|
2020-04-02 05:08:22 +02:00
|
|
|
# We track channel balances, to verify that accounting is ok.
|
|
|
|
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
|
|
|
|
|
2020-08-28 04:44:57 +02:00
|
|
|
amount = 2000000
|
2019-05-09 21:28:21 +02:00
|
|
|
# Don't get any funds from previous runs.
|
2020-04-02 05:08:22 +02:00
|
|
|
l1 = node_factory.get_node(random_hsm=True,
|
2020-04-09 01:46:30 +02:00
|
|
|
options={'plugin': coin_mvt_plugin},
|
|
|
|
feerates=(7500, 7500, 7500, 7500))
|
2019-05-09 21:28:21 +02:00
|
|
|
l2 = node_factory.get_node(random_hsm=True)
|
|
|
|
addr = l1.rpc.newaddr()['bech32']
|
|
|
|
|
|
|
|
# Add some funds to withdraw later
|
|
|
|
for i in range(10):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8)
|
2019-05-09 21:28:21 +02:00
|
|
|
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
|
|
|
|
|
|
|
|
# Reach around into the db to check that outputs were added
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
|
|
|
|
|
|
|
|
waddr = l1.bitcoin.getnewaddress()
|
|
|
|
# Now attempt to withdraw some (making sure we collect multiple inputs)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.withdraw('not an address', amount)
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.withdraw(waddr, 'not an amount')
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.withdraw(waddr, -amount)
|
2020-08-28 05:56:32 +02:00
|
|
|
with pytest.raises(RpcError, match=r'Could not afford'):
|
2019-05-09 21:28:21 +02:00
|
|
|
l1.rpc.withdraw(waddr, amount * 100)
|
|
|
|
|
2020-08-28 04:44:57 +02:00
|
|
|
out = l1.rpc.withdraw(waddr, amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
|
|
|
|
# Make sure bitcoind received the withdrawal
|
|
|
|
unspent = l1.bitcoin.rpc.listunspent(0)
|
|
|
|
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
|
|
|
|
|
|
|
|
assert(withdrawal[0]['amount'] == Decimal('0.02'))
|
|
|
|
|
2020-08-28 04:44:57 +02:00
|
|
|
bitcoind.generate_block(1, wait_for_mempool=1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
2019-05-09 21:28:21 +02:00
|
|
|
# Now make sure two of them were marked as spent
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
|
|
|
|
|
2020-08-28 04:44:57 +02:00
|
|
|
dont_spend_outputs(l1, out['txid'])
|
|
|
|
|
2019-05-09 21:28:21 +02:00
|
|
|
# Now send some money to l2.
|
|
|
|
# lightningd uses P2SH-P2WPKH
|
|
|
|
waddr = l2.rpc.newaddr('bech32')['bech32']
|
2020-08-28 04:44:57 +02:00
|
|
|
out = l1.rpc.withdraw(waddr, amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
# Make sure l2 received the withdrawal.
|
|
|
|
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
|
|
|
|
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
|
2020-08-28 04:44:57 +02:00
|
|
|
assert only_one(outputs)['value'] == amount
|
2019-05-09 21:28:21 +02:00
|
|
|
|
|
|
|
# Now make sure an additional two of them were marked as spent
|
2020-08-28 04:44:57 +02:00
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
dont_spend_outputs(l1, out['txid'])
|
2019-05-09 21:28:21 +02:00
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
|
|
|
|
|
|
|
|
if chainparams['name'] != 'regtest':
|
|
|
|
return
|
|
|
|
|
|
|
|
# Simple test for withdrawal to P2WPKH
|
|
|
|
# Address from: https://bc-2.jp/tools/bech32demo/index.html
|
|
|
|
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', amount)
|
|
|
|
out = l1.rpc.withdraw(waddr, amount)
|
|
|
|
bitcoind.generate_block(1, wait_for_mempool=1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
dont_spend_outputs(l1, out['txid'])
|
|
|
|
|
2019-05-09 21:28:21 +02:00
|
|
|
# Now make sure additional two of them were marked as spent
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
|
|
|
|
|
|
|
|
# Simple test for withdrawal to P2WSH
|
|
|
|
# Address from: https://bc-2.jp/tools/bech32demo/index.html
|
|
|
|
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', amount)
|
|
|
|
out = l1.rpc.withdraw(waddr, amount)
|
|
|
|
bitcoind.generate_block(1, wait_for_mempool=1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
dont_spend_outputs(l1, out['txid'])
|
2019-05-09 21:28:21 +02:00
|
|
|
# Now make sure additional two of them were marked as spent
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
|
|
|
|
|
|
|
|
# failure testing for invalid SegWit addresses, from BIP173
|
|
|
|
# HRP character out of range
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw(' 1nwldj5', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# overall max length exceeded
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# No separator character
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('pzry9x0s0muk', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# Empty HRP
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('1pzry9x0s0muk', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# Invalid witness version
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# Invalid program length for witness version 0 (per BIP141)
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# Mixed case
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
# Non-zero padding in 8-to-5 conversion
|
|
|
|
with pytest.raises(RpcError):
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', amount)
|
2019-05-09 21:28:21 +02:00
|
|
|
|
2020-08-28 04:44:57 +02:00
|
|
|
# Should have 2 outputs available.
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 2
|
|
|
|
|
|
|
|
# Unreserve everything.
|
|
|
|
inputs = []
|
|
|
|
for out in l1.rpc.listfunds()['outputs']:
|
|
|
|
if out['reserved']:
|
|
|
|
inputs += [{'txid': out['txid'], 'vout': out['output']}]
|
2021-05-26 03:14:50 +02:00
|
|
|
assert out['reserved_to_block'] > bitcoind.rpc.getblockchaininfo()['blocks']
|
2020-08-28 04:44:57 +02:00
|
|
|
l1.rpc.unreserveinputs(bitcoind.rpc.createpsbt(inputs, []))
|
2019-05-09 21:28:21 +02:00
|
|
|
|
|
|
|
# Test withdrawal to self.
|
|
|
|
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
|
|
|
|
|
|
|
|
l1.rpc.withdraw(waddr, 'all', minconf=0)
|
|
|
|
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
|
|
|
|
|
|
|
|
# This should fail, can't even afford fee.
|
2020-08-28 05:56:32 +02:00
|
|
|
with pytest.raises(RpcError, match=r'Could not afford'):
|
2019-05-09 21:28:21 +02:00
|
|
|
l1.rpc.withdraw(waddr, 'all')
|
|
|
|
|
2020-04-03 23:56:57 +02:00
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
2020-04-02 05:08:22 +02:00
|
|
|
assert account_balance(l1, 'wallet') == 0
|
2020-04-03 23:56:57 +02:00
|
|
|
|
2022-01-25 21:24:31 +01:00
|
|
|
external_moves = [
|
|
|
|
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tags': ['deposit']},
|
|
|
|
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tags': ['deposit']},
|
|
|
|
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tags': ['deposit']},
|
|
|
|
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tags': ['deposit']},
|
|
|
|
{'type': 'chain_mvt', 'credit': 11957603000, 'debit': 0, 'tags': ['deposit']},
|
|
|
|
]
|
|
|
|
|
|
|
|
check_coin_moves(l1, 'external', external_moves, chainparams)
|
|
|
|
|
2019-05-09 21:28:21 +02:00
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_io_logging(node_factory, executor):
|
|
|
|
l1 = node_factory.get_node(options={'log-level': 'io'})
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
# Fundchannel manually so we get channeld pid.
|
|
|
|
l1.fundwallet(10**6 + 1000000)
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 10**6)['tx']
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log('sendrawtx exit 0')
|
|
|
|
l1.bitcoin.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
|
|
|
|
l2.daemon.wait_for_log(' to CHANNELD_NORMAL')
|
|
|
|
|
|
|
|
fut = executor.submit(l1.pay, l2, 200000000)
|
|
|
|
|
|
|
|
# WIRE_UPDATE_ADD_HTLC = 128 = 0x0080
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log(r'channeld.*: \[OUT\] 0080')
|
2018-08-04 12:31:31 +02:00
|
|
|
# WIRE_UPDATE_FULFILL_HTLC = 130 = 0x0082
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log(r'channeld.*: \[IN\] 0082')
|
2018-08-04 12:31:31 +02:00
|
|
|
fut.result(10)
|
|
|
|
|
|
|
|
# Send it sigusr1: should turn off logging.
|
2020-12-15 02:01:29 +01:00
|
|
|
pid1 = l1.subd_pid('channeld')
|
2018-08-04 12:31:31 +02:00
|
|
|
subprocess.run(['kill', '-USR1', pid1])
|
|
|
|
|
|
|
|
l1.pay(l2, 200000000)
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
assert not l1.daemon.is_in_log(r'channeld.*: \[OUT\] 0080',
|
2018-08-04 12:31:31 +02:00
|
|
|
start=l1.daemon.logsearch_start)
|
2019-11-18 01:27:17 +01:00
|
|
|
assert not l1.daemon.is_in_log(r'channeld.*: \[IN\] 0082',
|
2018-08-04 12:31:31 +02:00
|
|
|
start=l1.daemon.logsearch_start)
|
|
|
|
|
|
|
|
# IO logs should not appear in peer logs.
|
|
|
|
peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log']
|
|
|
|
assert not any(l['type'] == 'IO_OUT' or l['type'] == 'IO_IN'
|
|
|
|
for l in peerlog)
|
|
|
|
|
|
|
|
# Turn on in l2 channel logging.
|
2020-12-15 02:01:29 +01:00
|
|
|
pid2 = l2.subd_pid('channeld')
|
2018-08-04 12:31:31 +02:00
|
|
|
subprocess.run(['kill', '-USR1', pid2])
|
|
|
|
l1.pay(l2, 200000000)
|
|
|
|
|
|
|
|
# Now it should find it.
|
|
|
|
peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log']
|
|
|
|
assert any(l['type'] == 'IO_OUT' for l in peerlog)
|
|
|
|
assert any(l['type'] == 'IO_IN' for l in peerlog)
|
|
|
|
|
|
|
|
|
|
|
|
def test_address(node_factory):
|
2018-08-08 16:06:58 +02:00
|
|
|
if DEVELOPER:
|
|
|
|
opts = {'dev-allow-localhost': None}
|
|
|
|
else:
|
|
|
|
opts = None
|
|
|
|
l1 = node_factory.get_node(options=opts)
|
2018-08-04 12:31:31 +02:00
|
|
|
addr = l1.rpc.getinfo()['address']
|
2018-08-08 16:06:58 +02:00
|
|
|
if DEVELOPER:
|
2018-08-04 12:31:31 +02:00
|
|
|
assert len(addr) == 1
|
|
|
|
assert addr[0]['type'] == 'ipv4'
|
|
|
|
assert addr[0]['address'] == '127.0.0.1'
|
|
|
|
assert int(addr[0]['port']) == l1.port
|
|
|
|
else:
|
|
|
|
assert len(addr) == 0
|
|
|
|
|
|
|
|
bind = l1.rpc.getinfo()['binding']
|
|
|
|
assert len(bind) == 1
|
|
|
|
assert bind[0]['type'] == 'ipv4'
|
|
|
|
assert bind[0]['address'] == '127.0.0.1'
|
|
|
|
assert int(bind[0]['port']) == l1.port
|
|
|
|
|
2019-03-12 01:32:33 +01:00
|
|
|
# Now test UNIX domain binding.
|
|
|
|
l1.stop()
|
2019-11-23 02:46:40 +01:00
|
|
|
l1.daemon.opts['bind-addr'] = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "sock")
|
2019-03-12 01:32:33 +01:00
|
|
|
l1.start()
|
|
|
|
|
|
|
|
l2 = node_factory.get_node()
|
2021-03-16 04:44:36 +01:00
|
|
|
ret = l2.rpc.connect(l1.info['id'], l1.daemon.opts['bind-addr'])
|
|
|
|
assert ret['address'] == {'type': 'local socket', 'socket': l1.daemon.opts['bind-addr']}
|
2019-03-12 01:32:33 +01:00
|
|
|
|
2019-04-08 03:53:16 +02:00
|
|
|
# 'addr' with local socket works too.
|
|
|
|
l1.stop()
|
|
|
|
del l1.daemon.opts['bind-addr']
|
2019-11-23 02:46:40 +01:00
|
|
|
l1.daemon.opts['addr'] = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "sock")
|
2019-04-08 03:53:16 +02:00
|
|
|
# start expects a port, so we open-code here.
|
|
|
|
l1.daemon.start()
|
|
|
|
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
l2.rpc.connect(l1.info['id'], l1.daemon.opts['addr'])
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
|
2020-04-02 15:12:46 +02:00
|
|
|
@unittest.skipIf(DEPRECATED_APIS, "Tests the --allow-deprecated-apis config")
|
2019-05-06 20:50:11 +02:00
|
|
|
def test_listconfigs(node_factory, bitcoind, chainparams):
|
2019-11-23 02:46:58 +01:00
|
|
|
# Make extremely long entry, check it works
|
|
|
|
l1 = node_factory.get_node(options={'log-prefix': 'lightning1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'})
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
configs = l1.rpc.listconfigs()
|
|
|
|
# See utils.py
|
|
|
|
assert configs['allow-deprecated-apis'] is False
|
2019-05-06 20:50:11 +02:00
|
|
|
assert configs['network'] == chainparams['name']
|
2018-08-04 12:31:31 +02:00
|
|
|
assert configs['ignore-fee-limits'] is False
|
2019-11-23 02:46:58 +01:00
|
|
|
assert configs['ignore-fee-limits'] is False
|
|
|
|
assert configs['log-prefix'] == 'lightning1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx...'
|
2018-08-04 12:31:31 +02:00
|
|
|
|
2020-09-08 07:20:15 +02:00
|
|
|
# These are aliases, but we don't print the (unofficial!) wumbo.
|
|
|
|
assert 'wumbo' not in configs
|
|
|
|
assert configs['large-channels'] is False
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
# Test one at a time.
|
|
|
|
for c in configs.keys():
|
2020-07-29 13:24:07 +02:00
|
|
|
if c.startswith('#') or c.startswith('plugins') or c == 'important-plugins':
|
2018-08-04 12:31:31 +02:00
|
|
|
continue
|
|
|
|
oneconfig = l1.rpc.listconfigs(config=c)
|
|
|
|
assert(oneconfig[c] == configs[c])
|
|
|
|
|
|
|
|
|
2019-11-21 14:51:18 +01:00
|
|
|
def test_listconfigs_plugins(node_factory, bitcoind, chainparams):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
# assert that we have pay plugin and that plugins have a name and path
|
|
|
|
configs = l1.rpc.listconfigs()
|
2020-07-30 08:04:59 +02:00
|
|
|
assert configs['important-plugins']
|
|
|
|
assert len([p for p in configs['important-plugins'] if p['name'] == "pay"]) == 1
|
|
|
|
for p in configs['important-plugins']:
|
2019-11-21 14:51:18 +01:00
|
|
|
assert p['name'] and len(p['name']) > 0
|
|
|
|
assert p['path'] and len(p['path']) > 0
|
|
|
|
assert os.path.isfile(p['path']) and os.access(p['path'], os.X_OK)
|
|
|
|
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_multirpc(node_factory):
|
|
|
|
"""Test that we can do multiple RPC without waiting for response"""
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
|
|
|
sock.connect(l1.rpc.socket_path)
|
|
|
|
|
|
|
|
commands = [
|
|
|
|
b'{"id":1,"jsonrpc":"2.0","method":"listpeers","params":[]}',
|
|
|
|
b'{"id":2,"jsonrpc":"2.0","method":"listpeers","params":[]}',
|
|
|
|
b'{"id":3,"jsonrpc":"2.0","method":"listpeers","params":[]}',
|
|
|
|
b'{"id":4,"jsonrpc":"2.0","method":"listpeers","params":[]}',
|
|
|
|
b'{"id":5,"jsonrpc":"2.0","method":"listpeers","params":[]}',
|
|
|
|
b'{"id":6,"jsonrpc":"2.0","method":"listpeers","params":[]}',
|
|
|
|
b'{"method": "invoice", "params": [100, "foo", "foo"], "jsonrpc": "2.0", "id": 7 }',
|
2018-11-20 02:46:32 +01:00
|
|
|
b'{"method": "waitinvoice", "params": ["foo"], "jsonrpc" : "2.0", "id": 8 }',
|
2018-08-04 12:31:31 +02:00
|
|
|
b'{"method": "delinvoice", "params": ["foo", "unpaid"], "jsonrpc" : "2.0", "id": 9 }',
|
|
|
|
]
|
|
|
|
|
|
|
|
sock.sendall(b'\n'.join(commands))
|
|
|
|
|
2018-11-18 10:43:28 +01:00
|
|
|
buff = b''
|
2018-11-17 05:58:46 +01:00
|
|
|
for i in commands:
|
2018-11-18 10:43:28 +01:00
|
|
|
_, buff = l1.rpc._readobj(sock, buff)
|
2018-08-04 12:31:31 +02:00
|
|
|
sock.close()
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs DEVELOPER=1")
|
2018-11-20 02:50:11 +01:00
|
|
|
def test_multiplexed_rpc(node_factory):
|
|
|
|
"""Test that we can do multiple RPCs which exit in different orders"""
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
|
|
|
sock.connect(l1.rpc.socket_path)
|
|
|
|
|
|
|
|
# Neighbouring ones may be in or out of order.
|
|
|
|
commands = [
|
2019-07-16 03:41:51 +02:00
|
|
|
b'{"id":1,"jsonrpc":"2.0","method":"dev","params":["slowcmd",2000]}',
|
|
|
|
b'{"id":1,"jsonrpc":"2.0","method":"dev","params":["slowcmd",2000]}',
|
|
|
|
b'{"id":2,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1500]}',
|
|
|
|
b'{"id":2,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1500]}',
|
|
|
|
b'{"id":3,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1000]}',
|
|
|
|
b'{"id":3,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1000]}',
|
|
|
|
b'{"id":4,"jsonrpc":"2.0","method":"dev","params":["slowcmd",500]}',
|
|
|
|
b'{"id":4,"jsonrpc":"2.0","method":"dev","params":["slowcmd",500]}'
|
2018-11-20 02:50:11 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
sock.sendall(b'\n'.join(commands))
|
|
|
|
|
|
|
|
buff = b''
|
|
|
|
|
|
|
|
# They will return in the same order, since they start immediately
|
|
|
|
# (delaying completion should mean we don't see the other commands intermingled).
|
|
|
|
for i in commands:
|
|
|
|
obj, buff = l1.rpc._readobj(sock, buff)
|
|
|
|
assert obj['id'] == l1.rpc.decoder.decode(i.decode("UTF-8"))['id']
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
|
2018-11-20 02:53:16 +01:00
|
|
|
def test_malformed_rpc(node_factory):
|
|
|
|
"""Test that we get a correct response to malformed RPC commands"""
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
|
|
|
sock.connect(l1.rpc.socket_path)
|
|
|
|
|
|
|
|
# No ID
|
|
|
|
sock.sendall(b'{"jsonrpc":"2.0","method":"getinfo","params":[]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['error']['code'] == -32600
|
|
|
|
|
|
|
|
# No method
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","params":[]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['error']['code'] == -32600
|
|
|
|
|
|
|
|
# Complete crap
|
|
|
|
sock.sendall(b'[]')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['error']['code'] == -32600
|
|
|
|
|
|
|
|
# Bad ID
|
|
|
|
sock.sendall(b'{"id":{}, "jsonrpc":"2.0","method":"getinfo","params":[]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['error']['code'] == -32600
|
|
|
|
|
|
|
|
# Bad method
|
|
|
|
sock.sendall(b'{"id":1, "method": 12, "jsonrpc":"2.0","params":[]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['error']['code'] == -32600
|
|
|
|
|
|
|
|
# Unknown method
|
|
|
|
sock.sendall(b'{"id":1, "method": "unknown", "jsonrpc":"2.0","params":[]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['error']['code'] == -32601
|
|
|
|
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_cli(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'help']).decode('utf-8')
|
|
|
|
# Test some known output.
|
2018-09-13 17:57:24 +02:00
|
|
|
assert 'help [command]\n List available commands, or give verbose help on one {command}' in out
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Test JSON output.
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-J',
|
|
|
|
'help']).decode('utf-8')
|
|
|
|
j, _ = json.JSONDecoder().raw_decode(out)
|
|
|
|
assert j['help'][0]['command'] is not None
|
|
|
|
assert j['help'][0]['description'] is not None
|
|
|
|
|
|
|
|
# Test keyword input (autodetect)
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-J',
|
|
|
|
'help', 'command=help']).decode('utf-8')
|
|
|
|
j, _ = json.JSONDecoder().raw_decode(out)
|
2019-02-04 11:55:35 +01:00
|
|
|
assert 'help [command]' in j['help'][0]['verbose']
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Test keyword input (forced)
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-J', '-k',
|
|
|
|
'help', 'command=help']).decode('utf-8')
|
|
|
|
j, _ = json.JSONDecoder().raw_decode(out)
|
2019-02-04 11:55:35 +01:00
|
|
|
assert 'help [command]' in j['help'][0]['verbose']
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Test ordered input (autodetect)
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-J',
|
|
|
|
'help', 'help']).decode('utf-8')
|
|
|
|
j, _ = json.JSONDecoder().raw_decode(out)
|
2019-02-04 11:55:35 +01:00
|
|
|
assert 'help [command]' in j['help'][0]['verbose']
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Test ordered input (forced)
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-J', '-o',
|
|
|
|
'help', 'help']).decode('utf-8')
|
|
|
|
j, _ = json.JSONDecoder().raw_decode(out)
|
2019-02-04 11:55:35 +01:00
|
|
|
assert 'help [command]' in j['help'][0]['verbose']
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Test missing parameters.
|
|
|
|
try:
|
|
|
|
# This will error due to missing parameters.
|
|
|
|
# We want to check if lightningd will crash.
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2018-08-04 12:31:31 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-J', '-o',
|
|
|
|
'sendpay']).decode('utf-8')
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
2019-06-12 02:38:55 +02:00
|
|
|
# Test it escapes JSON completely in both method and params.
|
|
|
|
# cli turns " into \", reply turns that into \\\".
|
2019-04-10 05:14:23 +02:00
|
|
|
out = subprocess.run(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2019-04-10 05:14:23 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'x"[]{}'],
|
|
|
|
stdout=subprocess.PIPE)
|
2019-06-12 02:38:55 +02:00
|
|
|
assert 'Unknown command \'x\\\\\\"[]{}\'' in out.stdout.decode('utf-8')
|
2019-04-10 05:14:23 +02:00
|
|
|
|
|
|
|
subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2019-04-10 05:14:23 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'invoice', '123000', 'l"[]{}', 'd"[]{}']).decode('utf-8')
|
|
|
|
# Check label is correct, and also that cli's keyword parsing works.
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2019-04-10 05:14:23 +02:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-k',
|
|
|
|
'listinvoices', 'label=l"[]{}']).decode('utf-8')
|
|
|
|
j = json.loads(out)
|
|
|
|
assert only_one(j['invoices'])['label'] == 'l"[]{}'
|
|
|
|
|
2019-08-08 04:36:42 +02:00
|
|
|
# For those using shell scripts (you know who you are Rene), make sure we're maintaining whitespace
|
|
|
|
lines = [l for l in out.splitlines() if '"bolt11"' not in l and '"payment_hash"' not in l and '"expires_at"' not in l]
|
|
|
|
assert lines == ['{',
|
|
|
|
' "invoices": [',
|
|
|
|
' {',
|
|
|
|
r' "label": "l\"[]{}",',
|
|
|
|
' "msatoshi": 123000,',
|
|
|
|
' "amount_msat": "123000msat",',
|
|
|
|
' "status": "unpaid",',
|
|
|
|
r' "description": "d\"[]{}",',
|
|
|
|
' }',
|
|
|
|
' ]',
|
|
|
|
'}']
|
|
|
|
|
2020-05-11 02:41:16 +02:00
|
|
|
# Make sure we omit top-levels and don't include format hint, when -H forced
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
|
|
|
'--network={}'.format(TEST_NETWORK),
|
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-H',
|
|
|
|
'help']).decode('utf-8')
|
|
|
|
lines = out.splitlines()
|
|
|
|
assert [l for l in lines if l.startswith('help=')] == []
|
|
|
|
assert [l for l in lines if l.startswith('format-hint=')] == []
|
|
|
|
|
2020-05-11 03:03:09 +02:00
|
|
|
# Flat format is great for grep. LONG LIVE UNIX!
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
|
|
|
'--network={}'.format(TEST_NETWORK),
|
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-F',
|
|
|
|
'help']).decode('utf-8')
|
|
|
|
lines = out.splitlines()
|
|
|
|
# Everything is a help[XX]= line, except format-hint.
|
|
|
|
assert [l for l in lines if not re.search(r'^help\[[0-9]*\].', l)] == ['format-hint=simple']
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
|
2019-02-18 02:58:24 +01:00
|
|
|
def test_daemon_option(node_factory):
|
|
|
|
"""
|
|
|
|
Make sure --daemon at least vaguely works!
|
|
|
|
"""
|
|
|
|
# Lazy way to set up command line and env, plus do VALGRIND checks
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
os.unlink(l1.rpc.socket_path)
|
2019-11-23 02:46:40 +01:00
|
|
|
logfname = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "log-daemon")
|
|
|
|
subprocess.run(l1.daemon.cmd_line + ['--daemon', '--log-file={}'.format(logfname)], env=l1.daemon.env,
|
2019-02-18 02:58:24 +01:00
|
|
|
check=True)
|
|
|
|
|
|
|
|
# Test some known output (wait for rpc to be ready)
|
|
|
|
wait_for(lambda: os.path.exists(l1.rpc.socket_path))
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2019-02-18 02:58:24 +01:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'help']).decode('utf-8')
|
|
|
|
assert 'help [command]\n List available commands, or give verbose help on one {command}' in out
|
|
|
|
|
|
|
|
subprocess.run(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2019-02-18 02:58:24 +01:00
|
|
|
'--lightning-dir={}'.format(l1.daemon.lightning_dir),
|
|
|
|
'stop'], check=True)
|
|
|
|
|
2019-08-01 07:05:53 +02:00
|
|
|
# It should not complain that subdaemons aren't children.
|
2019-11-23 02:46:40 +01:00
|
|
|
with open(logfname, 'r') as f:
|
2019-08-01 07:05:53 +02:00
|
|
|
assert 'No child process' not in f.read()
|
|
|
|
|
2019-02-18 02:58:24 +01:00
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
@flaky
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs DEVELOPER=1")
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_blockchaintrack(node_factory, bitcoind):
|
|
|
|
"""Check that we track the blockchain correctly across reorgs
|
|
|
|
"""
|
|
|
|
l1 = node_factory.get_node(random_hsm=True)
|
2019-03-18 17:55:48 +01:00
|
|
|
addr = l1.rpc.newaddr(addresstype='all')['p2sh-segwit']
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
######################################################################
|
|
|
|
# First failure scenario: rollback on startup doesn't work,
|
|
|
|
# and we try to add a block twice when rescanning:
|
|
|
|
l1.restart()
|
|
|
|
|
2019-03-26 15:23:00 +01:00
|
|
|
height = bitcoind.rpc.getblockcount() # 101
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# At height 111 we receive an incoming payment
|
2019-03-26 15:23:00 +01:00
|
|
|
hashes = bitcoind.generate_block(9) # 102-110
|
2018-08-04 12:31:31 +02:00
|
|
|
bitcoind.rpc.sendtoaddress(addr, 1)
|
|
|
|
time.sleep(1) # mempool is still unpredictable
|
2018-10-26 07:34:56 +02:00
|
|
|
bitcoind.generate_block(1)
|
2018-08-04 12:31:31 +02:00
|
|
|
|
2019-03-18 17:55:48 +01:00
|
|
|
l1.daemon.wait_for_log(r'Owning output.* \(P2SH\).* CONFIRMED')
|
2018-08-04 12:31:31 +02:00
|
|
|
outputs = l1.rpc.listfunds()['outputs']
|
|
|
|
assert len(outputs) == 1
|
|
|
|
|
|
|
|
######################################################################
|
|
|
|
# Second failure scenario: perform a 20 block reorg
|
2018-10-26 07:34:56 +02:00
|
|
|
bitcoind.generate_block(10)
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.daemon.wait_for_log('Adding block {}: '.format(height + 20))
|
|
|
|
|
|
|
|
# Now reorg out with a longer fork of 21 blocks
|
|
|
|
bitcoind.rpc.invalidateblock(hashes[0])
|
|
|
|
bitcoind.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'
|
|
|
|
.format(height + 1))
|
2018-10-26 07:34:56 +02:00
|
|
|
hashes = bitcoind.generate_block(30)
|
2018-08-04 12:31:31 +02:00
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
bitcoind.rpc.getblockcount()
|
|
|
|
l1.daemon.wait_for_log('Adding block {}: '.format(height + 30))
|
|
|
|
|
|
|
|
# Our funds got reorged out, we should not have any funds that are confirmed
|
2019-03-26 15:23:00 +01:00
|
|
|
# NOTE: sendtoaddress() sets locktime=103 and the reorg at 102 invalidates that tx
|
|
|
|
# and deletes it from mempool
|
2018-08-04 12:31:31 +02:00
|
|
|
assert [o for o in l1.rpc.listfunds()['outputs'] if o['status'] != "unconfirmed"] == []
|
|
|
|
|
|
|
|
|
2021-07-20 18:00:18 +02:00
|
|
|
def chan_active(node, scid, is_active):
|
|
|
|
chans = node.rpc.listchannels(scid)['channels']
|
|
|
|
print(chans)
|
|
|
|
return [c['active'] for c in chans] == [is_active, is_active]
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs DEVELOPER=1")
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v2')
|
|
|
|
@pytest.mark.openchannel('v1')
|
2019-03-26 15:23:00 +01:00
|
|
|
def test_funding_reorg_private(node_factory, bitcoind):
|
|
|
|
"""Change funding tx height after lockin, between node restart.
|
|
|
|
"""
|
|
|
|
# Rescan to detect reorg at restart and may_reconnect so channeld
|
2019-08-27 08:08:34 +02:00
|
|
|
# will restart. Reorg can cause bad gossip msg.
|
|
|
|
opts = {'funding-confirms': 2, 'rescan': 10, 'may_reconnect': True,
|
2022-01-24 20:59:52 +01:00
|
|
|
'allow_bad_gossip': True,
|
|
|
|
# gossipd send lightning update for original channel.
|
|
|
|
'allow_broken_log': True}
|
2019-03-26 15:23:00 +01:00
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
|
|
|
|
l1.fundwallet(10000000)
|
|
|
|
sync_blockheight(bitcoind, [l1]) # height 102
|
|
|
|
bitcoind.generate_block(3) # heights 103-105
|
|
|
|
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], "all", announce=False)
|
|
|
|
bitcoind.generate_block(1) # height 106
|
2020-12-11 23:37:27 +01:00
|
|
|
|
|
|
|
daemon = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD'
|
2019-03-26 15:23:00 +01:00
|
|
|
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['status']
|
2020-12-11 23:37:27 +01:00
|
|
|
== ['{}_AWAITING_LOCKIN:Funding needs 1 more confirmations for lockin.'.format(daemon)])
|
2019-03-26 15:23:00 +01:00
|
|
|
bitcoind.generate_block(1) # height 107
|
|
|
|
l1.wait_channel_active('106x1x0')
|
2021-06-13 06:24:53 +02:00
|
|
|
l2.wait_channel_active('106x1x0')
|
2019-03-26 15:23:00 +01:00
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# Create a fork that changes short_channel_id from 106x1x0 to 108x1x0
|
|
|
|
bitcoind.simple_reorg(106, 2) # heights 106-108
|
|
|
|
bitcoind.generate_block(1) # height 109 (to reach minimum_depth=2 again)
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# l2 was running, sees last stale block being removed
|
|
|
|
l2.daemon.wait_for_logs([r'Removing stale block {}'.format(106),
|
|
|
|
r'Got depth change .->{} for .* REORG'.format(0)])
|
|
|
|
|
2021-08-12 05:05:55 +02:00
|
|
|
# New one should replace old.
|
2021-07-20 18:00:18 +02:00
|
|
|
wait_for(lambda: chan_active(l2, '108x1x0', True))
|
2021-08-12 05:05:55 +02:00
|
|
|
assert l2.rpc.listchannels('106x1x0')['channels'] == []
|
2019-03-26 15:23:00 +01:00
|
|
|
|
2019-08-27 08:08:34 +02:00
|
|
|
l1.rpc.close(l2.info['id'])
|
|
|
|
bitcoind.generate_block(1, True)
|
2019-05-03 08:27:19 +02:00
|
|
|
l1.daemon.wait_for_log(r'Deleting channel')
|
|
|
|
l2.daemon.wait_for_log(r'Deleting channel')
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs DEVELOPER=1")
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2019-05-03 08:27:19 +02:00
|
|
|
def test_funding_reorg_remote_lags(node_factory, bitcoind):
|
|
|
|
"""Nodes may disagree about short_channel_id before channel announcement
|
|
|
|
"""
|
2019-08-27 08:08:34 +02:00
|
|
|
# may_reconnect so channeld will restart; bad gossip can happen due to reorg
|
|
|
|
opts = {'funding-confirms': 1, 'may_reconnect': True, 'allow_bad_gossip': True}
|
2019-05-03 08:27:19 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
|
|
|
|
l1.fundwallet(10000000)
|
|
|
|
sync_blockheight(bitcoind, [l1]) # height 102
|
|
|
|
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], "all")
|
|
|
|
bitcoind.generate_block(5) # heights 103 - 107
|
|
|
|
l1.wait_channel_active('103x1x0')
|
2021-06-13 06:24:53 +02:00
|
|
|
l2.wait_channel_active('103x1x0')
|
2019-05-03 08:27:19 +02:00
|
|
|
|
|
|
|
# Make l2 temporary blind for blocks > 107
|
2019-06-03 17:52:26 +02:00
|
|
|
def no_more_blocks(req):
|
2019-07-13 15:23:08 +02:00
|
|
|
return {"result": None,
|
|
|
|
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
|
2019-05-03 08:27:19 +02:00
|
|
|
|
|
|
|
l2.daemon.rpcproxy.mock_rpc('getblockhash', no_more_blocks)
|
|
|
|
|
2020-07-06 04:46:15 +02:00
|
|
|
# Reorg changes short_channel_id 103x1x0 to 104x1x0, l1 sees it, restarts channeld
|
|
|
|
bitcoind.simple_reorg(103, 1) # heights 103 - 108
|
|
|
|
# But now it's height 104, we need another block to make it announcable.
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l1.daemon.wait_for_log(r'Peer transient failure .* short_channel_id changed to 104x1x0 \(was 103x1x0\)')
|
2019-05-03 08:27:19 +02:00
|
|
|
|
|
|
|
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
|
|
|
|
'CHANNELD_NORMAL:Reconnected, and reestablished.',
|
|
|
|
'CHANNELD_NORMAL:Funding transaction locked. They need our announcement signatures.'])
|
|
|
|
|
|
|
|
# Unblinding l2 brings it back in sync, restarts channeld and sends its announce sig
|
|
|
|
l2.daemon.rpcproxy.mock_rpc('getblockhash', None)
|
|
|
|
|
2021-07-20 18:00:18 +02:00
|
|
|
wait_for(lambda: chan_active(l2, '104x1x0', True))
|
2021-08-12 05:05:55 +02:00
|
|
|
assert l2.rpc.listchannels('103x1x0')['channels'] == []
|
2019-05-03 08:27:19 +02:00
|
|
|
|
|
|
|
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
|
|
|
|
'CHANNELD_NORMAL:Reconnected, and reestablished.',
|
|
|
|
'CHANNELD_NORMAL:Funding transaction locked. Channel announced.'])
|
|
|
|
|
2019-08-27 08:08:34 +02:00
|
|
|
l1.rpc.close(l2.info['id'])
|
2019-06-04 11:33:24 +02:00
|
|
|
bitcoind.generate_block(1, True)
|
2019-03-26 15:23:00 +01:00
|
|
|
l1.daemon.wait_for_log(r'Deleting channel')
|
|
|
|
l2.daemon.wait_for_log(r'Deleting channel')
|
|
|
|
|
|
|
|
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_rescan(node_factory, bitcoind):
|
|
|
|
"""Test the rescan option
|
|
|
|
"""
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
# The first start should start at current_height - 30 = 71, make sure
|
|
|
|
# it's not earlier
|
|
|
|
l1.daemon.wait_for_log(r'Adding block 101')
|
|
|
|
assert not l1.daemon.is_in_log(r'Adding block 70')
|
|
|
|
|
|
|
|
# Restarting with a higher rescan should go back further
|
|
|
|
l1.daemon.opts['rescan'] = 50
|
|
|
|
l1.restart()
|
|
|
|
l1.daemon.wait_for_log(r'Adding block 101')
|
|
|
|
assert l1.daemon.is_in_log(r'Adding block 51')
|
|
|
|
assert not l1.daemon.is_in_log(r'Adding block 50')
|
|
|
|
|
|
|
|
# Restarting with an absolute rescan should start from there
|
|
|
|
l1.daemon.opts['rescan'] = -31
|
|
|
|
l1.restart()
|
|
|
|
l1.daemon.wait_for_log(r'Adding block 101')
|
|
|
|
assert l1.daemon.is_in_log(r'Adding block 31')
|
|
|
|
assert not l1.daemon.is_in_log(r'Adding block 30')
|
|
|
|
|
2019-11-21 05:13:37 +01:00
|
|
|
# Restarting with a future absolute blockheight should *fail* if we
|
|
|
|
# can't find that height
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.daemon.opts['rescan'] = -500000
|
|
|
|
l1.stop()
|
2018-10-26 07:34:56 +02:00
|
|
|
bitcoind.generate_block(4)
|
2019-11-21 05:13:37 +01:00
|
|
|
with pytest.raises(ValueError):
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# Restarting with future absolute blockheight is fine if we can find it.
|
|
|
|
l1.daemon.opts['rescan'] = -105
|
|
|
|
oldneedle = l1.daemon.logsearch_start
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.start()
|
2019-11-21 05:13:37 +01:00
|
|
|
# This could occur before pubkey msg, so move search needle back.
|
|
|
|
l1.daemon.logsearch_start = oldneedle
|
2018-08-04 12:31:31 +02:00
|
|
|
l1.daemon.wait_for_log(r'Adding block 105')
|
|
|
|
assert not l1.daemon.is_in_log(r'Adding block 102')
|
|
|
|
|
|
|
|
|
2019-11-20 01:09:09 +01:00
|
|
|
def test_bitcoind_goes_backwards(node_factory, bitcoind):
|
|
|
|
"""Check that we refuse to acknowledge bitcoind giving a shorter chain without explicit rescan"""
|
|
|
|
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
|
|
|
|
|
|
|
|
bitcoind.generate_block(10)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
|
|
|
|
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
|
|
|
|
# Restart without killing proxies
|
|
|
|
bitcoind.rpc.stop()
|
|
|
|
TailableProc.stop(bitcoind)
|
|
|
|
bitcoind.start()
|
|
|
|
|
|
|
|
# Will simply refuse to start.
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# Nor will it start with if we ask for a reindex of fewer blocks.
|
|
|
|
l1.daemon.opts['rescan'] = 3
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# This will force it, however.
|
|
|
|
l1.daemon.opts['rescan'] = -100
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# Now mess with bitcoind at runtime.
|
|
|
|
bitcoind.generate_block(6)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log('Adding block 110')
|
|
|
|
|
|
|
|
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
|
|
|
|
bitcoind.rpc.stop()
|
|
|
|
TailableProc.stop(bitcoind)
|
|
|
|
bitcoind.start()
|
|
|
|
bitcoind.generate_block(5)
|
|
|
|
|
|
|
|
# It will ignore bitcoind and keep asking for block 110.
|
|
|
|
time.sleep(5)
|
|
|
|
assert l1.rpc.getinfo()['blockheight'] == 110
|
|
|
|
assert not l1.daemon.is_in_log('Adding block 109',
|
|
|
|
start=l1.daemon.logsearch_start)
|
|
|
|
|
|
|
|
# Get past that, and it will suddenly read new blocks
|
|
|
|
bitcoind.generate_block(2)
|
|
|
|
l1.daemon.wait_for_log('Adding block 109')
|
|
|
|
l1.daemon.wait_for_log('Adding block 110')
|
|
|
|
l1.daemon.wait_for_log('Adding block 111')
|
|
|
|
|
|
|
|
|
pytest: Mark test_reserve_enforcement as flaky
It has been causing a lot of Travis failures due to a presumed memory
leak:
```
Exception: Node /tmp/ltests-fhjg26a2/test_reserve_enforcement_1/lightning-2/ has memory leaks: [{'label': 'lightningd/json_stream.c:48:struct json_stream', 'backtrace': ['ccan/ccan/tal/tal.c:435 (tal_alloc_)', 'lightningd/json_stream.c:48 (new_json_stream)', 'lightningd/jsonrpc.c:90 (jcon_new_json_stream)', 'lightningd/jsonrpc.c:444 (attach_json_stream)', 'lightningd/jsonrpc.c:455 (json_start)', 'lightningd/jsonrpc.c:464 (json_stream_success)', 'lightningd/pay.c:932 (json_sendpay_on_resolve)', 'lightningd/pay.c:444 (payment_store)', 'lightningd/pay.c:540 (payment_failed)', 'lightningd/peer_htlcs.c:143 (fail_out_htlc)', 'lightningd/peer_htlcs.c:360 (destroy_hout_subd_died)', 'ccan/ccan/tal/tal.c:235 (notify)', 'ccan/ccan/tal/tal.c:395 (del_tree)', 'ccan/ccan/tal/tal.c:405 (del_tree)', 'ccan/ccan/tal/tal.c:405 (del_tree)', 'ccan/ccan/tal/tal.c:479 (tal_free)', 'ccan/ccan/io/io.c:451 (io_close)', 'lightningd/subd.c:500 (sd_msg_read)', 'lightningd/subd.c:302 (read_fds)', 'ccan/ccan/io/io.c:59 (next_plan)', 'ccan/ccan/io/io.c:395 (do_plan)', 'ccan/ccan/io/io.c:405 (io_ready)', 'ccan/ccan/io/poll.c:310 (io_loop)', 'lightningd/lightningd.c:769 (main)'], 'parents': ['lightningd/jsonrpc.c:681:struct json_connection', 'common/configdir.c:29:char[]'], 'value': '0x125be08'}]
```
Signed-off-by: Christian Decker <@cdecker>
2018-11-21 13:31:59 +01:00
|
|
|
@flaky
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2018-08-04 12:31:31 +02:00
|
|
|
def test_reserve_enforcement(node_factory, executor):
|
|
|
|
"""Channeld should disallow you spending into your reserve"""
|
2021-02-03 05:11:09 +01:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True, 'allow_warning': True})
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
# Pay 1000 satoshi to l2.
|
|
|
|
l1.pay(l2, 1000000)
|
|
|
|
l2.stop()
|
|
|
|
|
|
|
|
# They should both aim for 1%.
|
2019-08-30 18:59:53 +02:00
|
|
|
reserves = l2.db.query('SELECT channel_reserve_satoshis FROM channel_configs')
|
2018-08-04 12:31:31 +02:00
|
|
|
assert reserves == [{'channel_reserve_satoshis': 10**6 // 100}] * 2
|
|
|
|
|
|
|
|
# Edit db to reduce reserve to 0 so it will try to violate it.
|
2019-08-30 18:59:53 +02:00
|
|
|
l2.db.execute('UPDATE channel_configs SET channel_reserve_satoshis=0')
|
2018-08-04 12:31:31 +02:00
|
|
|
|
|
|
|
l2.start()
|
|
|
|
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
|
|
|
|
|
2021-02-02 13:49:01 +01:00
|
|
|
# This should be impossible to pay entire thing back: l1 should warn and
|
|
|
|
# close connection for trying to violate reserve.
|
2018-08-04 12:31:31 +02:00
|
|
|
executor.submit(l2.pay, l1, 1000000)
|
|
|
|
l1.daemon.wait_for_log(
|
2021-02-02 13:49:01 +01:00
|
|
|
'Peer transient failure in CHANNELD_NORMAL: channeld.*'
|
|
|
|
' CHANNEL_ERR_CHANNEL_CAPACITY_EXCEEDED'
|
2018-08-04 12:31:31 +02:00
|
|
|
)
|
2021-02-02 13:49:01 +01:00
|
|
|
assert only_one(l1.rpc.listpeers()['peers'])['connected'] is False
|
2018-08-06 06:57:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_ipv4_and_ipv6(node_factory):
|
|
|
|
"""Test we can bind to both IPv4 and IPv6 addresses (if supported)"""
|
|
|
|
port = reserve()
|
|
|
|
l1 = node_factory.get_node(options={'addr': ':{}'.format(port)})
|
|
|
|
bind = l1.rpc.getinfo()['binding']
|
|
|
|
|
|
|
|
if len(bind) == 2:
|
|
|
|
assert bind[0]['type'] == 'ipv6'
|
|
|
|
assert bind[0]['address'] == '::'
|
|
|
|
assert int(bind[0]['port']) == port
|
|
|
|
assert bind[1]['type'] == 'ipv4'
|
|
|
|
assert bind[1]['address'] == '0.0.0.0'
|
|
|
|
assert int(bind[1]['port']) == port
|
|
|
|
else:
|
|
|
|
# Assume we're IPv4 only...
|
|
|
|
assert len(bind) == 1
|
|
|
|
assert bind[0]['type'] == 'ipv4'
|
|
|
|
assert bind[0]['address'] == '0.0.0.0'
|
|
|
|
assert int(bind[0]['port']) == port
|
2018-08-22 12:06:40 +02:00
|
|
|
|
|
|
|
|
2020-08-26 19:51:47 +02:00
|
|
|
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Fees on elements are different")
|
2020-04-02 15:12:46 +02:00
|
|
|
@unittest.skipIf(
|
|
|
|
not DEVELOPER or DEPRECATED_APIS, "Without DEVELOPER=1 we snap to "
|
|
|
|
"FEERATE_FLOOR on testnets, and we test the new API."
|
|
|
|
)
|
2018-08-24 04:22:02 +02:00
|
|
|
def test_feerates(node_factory):
|
2021-03-01 03:32:05 +01:00
|
|
|
l1 = node_factory.get_node(options={'log-level': 'io',
|
|
|
|
'dev-no-fake-fees': True}, start=False)
|
2018-09-05 01:01:50 +02:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', {
|
|
|
|
'error': {"errors": ["Insufficient data or no feerate found"], "blocks": 0}
|
|
|
|
})
|
2018-08-24 04:22:02 +02:00
|
|
|
l1.start()
|
|
|
|
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
# All estimation types
|
|
|
|
types = ["opening", "mutual_close", "unilateral_close", "delayed_to_us",
|
|
|
|
"htlc_resolution", "penalty"]
|
|
|
|
|
2021-06-23 21:31:47 +02:00
|
|
|
# Try parsing the feerates, won't work because can't estimate
|
|
|
|
for t in types:
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot estimate fees'):
|
|
|
|
feerate = l1.rpc.parsefeerate(t)
|
|
|
|
|
2018-08-24 04:22:02 +02:00
|
|
|
# Query feerates (shouldn't give any!)
|
2018-09-27 13:55:07 +02:00
|
|
|
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 2)
|
2018-08-25 01:39:17 +02:00
|
|
|
feerates = l1.rpc.feerates('perkw')
|
2020-07-14 08:27:50 +02:00
|
|
|
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
|
2018-08-25 01:39:17 +02:00
|
|
|
assert 'perkb' not in feerates
|
|
|
|
assert feerates['perkw']['max_acceptable'] == 2**32 - 1
|
|
|
|
assert feerates['perkw']['min_acceptable'] == 253
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
for t in types:
|
|
|
|
assert t not in feerates['perkw']
|
2018-08-24 04:22:02 +02:00
|
|
|
|
2018-09-27 13:55:07 +02:00
|
|
|
wait_for(lambda: len(l1.rpc.feerates('perkb')['perkb']) == 2)
|
2018-08-25 01:39:17 +02:00
|
|
|
feerates = l1.rpc.feerates('perkb')
|
2020-07-14 08:27:50 +02:00
|
|
|
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
|
2018-08-25 01:39:17 +02:00
|
|
|
assert 'perkw' not in feerates
|
2018-08-27 07:11:39 +02:00
|
|
|
assert feerates['perkb']['max_acceptable'] == (2**32 - 1)
|
2018-08-25 01:39:17 +02:00
|
|
|
assert feerates['perkb']['min_acceptable'] == 253 * 4
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
for t in types:
|
|
|
|
assert t not in feerates['perkb']
|
2018-08-24 04:22:02 +02:00
|
|
|
|
|
|
|
# Now try setting them, one at a time.
|
2021-05-04 12:36:11 +02:00
|
|
|
# Set CONSERVATIVE/2 feerate, for max
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 0, 0, 0), True)
|
2021-05-04 12:36:11 +02:00
|
|
|
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 2)
|
2018-08-28 22:46:36 +02:00
|
|
|
feerates = l1.rpc.feerates('perkw')
|
2020-07-14 08:27:50 +02:00
|
|
|
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
|
2018-08-25 01:39:17 +02:00
|
|
|
assert 'perkb' not in feerates
|
|
|
|
assert feerates['perkw']['max_acceptable'] == 15000 * 10
|
|
|
|
assert feerates['perkw']['min_acceptable'] == 253
|
|
|
|
|
2021-05-04 12:36:11 +02:00
|
|
|
# Set ECONOMICAL/6 feerate, for unilateral_close and htlc_resolution
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 11000, 0, 0), True)
|
2021-05-04 12:36:11 +02:00
|
|
|
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 4)
|
2020-03-10 19:31:24 +01:00
|
|
|
feerates = l1.rpc.feerates('perkw')
|
2021-05-04 12:36:11 +02:00
|
|
|
assert feerates['perkw']['unilateral_close'] == 11000
|
2020-03-10 19:31:24 +01:00
|
|
|
assert feerates['perkw']['htlc_resolution'] == 11000
|
2020-07-14 08:27:50 +02:00
|
|
|
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
|
2020-03-10 19:31:24 +01:00
|
|
|
assert 'perkb' not in feerates
|
|
|
|
assert feerates['perkw']['max_acceptable'] == 15000 * 10
|
|
|
|
assert feerates['perkw']['min_acceptable'] == 253
|
|
|
|
|
2021-05-04 12:36:11 +02:00
|
|
|
# Set ECONOMICAL/12 feerate, for all but min (so, no mutual_close feerate)
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 11000, 6250, 0), True)
|
2020-10-07 02:34:42 +02:00
|
|
|
wait_for(lambda: len(l1.rpc.feerates('perkb')['perkb']) == len(types) - 1 + 2)
|
2018-08-28 22:46:36 +02:00
|
|
|
feerates = l1.rpc.feerates('perkb')
|
2021-05-04 12:36:11 +02:00
|
|
|
assert feerates['perkb']['unilateral_close'] == 11000 * 4
|
2020-03-10 19:31:24 +01:00
|
|
|
assert feerates['perkb']['htlc_resolution'] == 11000 * 4
|
2020-10-07 02:34:42 +02:00
|
|
|
assert 'mutual_close' not in feerates['perkb']
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
for t in types:
|
2021-05-04 12:36:11 +02:00
|
|
|
if t not in ("unilateral_close", "htlc_resolution", "mutual_close"):
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
assert feerates['perkb'][t] == 25000
|
2020-07-14 08:27:50 +02:00
|
|
|
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
|
2018-08-25 01:39:17 +02:00
|
|
|
assert 'perkw' not in feerates
|
|
|
|
assert feerates['perkb']['max_acceptable'] == 15000 * 4 * 10
|
|
|
|
assert feerates['perkb']['min_acceptable'] == 253 * 4
|
|
|
|
|
2021-05-04 12:36:11 +02:00
|
|
|
# Set ECONOMICAL/100 feerate for min and mutual_close
|
2020-03-10 19:31:24 +01:00
|
|
|
l1.set_feerates((15000, 11000, 6250, 5000), True)
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) >= len(types) + 2)
|
2018-08-28 22:46:36 +02:00
|
|
|
feerates = l1.rpc.feerates('perkw')
|
2021-05-04 12:36:11 +02:00
|
|
|
assert feerates['perkw']['unilateral_close'] == 11000
|
2020-03-10 19:31:24 +01:00
|
|
|
assert feerates['perkw']['htlc_resolution'] == 11000
|
2020-10-07 02:34:42 +02:00
|
|
|
assert feerates['perkw']['mutual_close'] == 5000
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
for t in types:
|
2021-05-04 12:36:11 +02:00
|
|
|
if t not in ("unilateral_close", "htlc_resolution", "mutual_close"):
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
assert feerates['perkw'][t] == 25000 // 4
|
2018-08-24 04:22:02 +02:00
|
|
|
assert 'warning' not in feerates
|
2018-08-25 01:39:17 +02:00
|
|
|
assert 'perkb' not in feerates
|
|
|
|
assert feerates['perkw']['max_acceptable'] == 15000 * 10
|
|
|
|
assert feerates['perkw']['min_acceptable'] == 5000 // 2
|
2018-08-24 04:22:05 +02:00
|
|
|
|
2020-03-10 19:37:56 +01:00
|
|
|
assert len(feerates['onchain_fee_estimates']) == 5
|
chaintopology: better feerate targets differentiation
We kept track of an URGENT, a NORMAL, and a SLOW feerate. They were used
for opening (NORMAL), mutual (NORMAL), UNILATERAL (URGENT) transactions
as well as minimum and maximum estimations, and onchain resolution.
We now keep track of more fine-grained feerates:
- `opening` used for funding and also misc transactions
- `mutual_close` used for the mutual close transaction
- `unilateral_close` used for unilateral close (commitment transactions)
- `delayed_to_us` used for resolving our output from our unilateral close
- `htlc_resolution` used for resolving onchain HTLCs
- `penalty` used for resolving revoked transactions
We don't modify our requests to our Bitcoin backend, as the next commit
will batch them !
Changelog-deprecated: The "urgent", "slow", and "normal" field of the `feerates` command are now deprecated.
Changelog-added: The fields "opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution" and "penalty" have been added to the `feerates` command.
2020-03-10 17:52:13 +01:00
|
|
|
assert feerates['onchain_fee_estimates']['opening_channel_satoshis'] == feerates['perkw']['opening'] * 702 // 1000
|
|
|
|
assert feerates['onchain_fee_estimates']['mutual_close_satoshis'] == feerates['perkw']['mutual_close'] * 673 // 1000
|
|
|
|
assert feerates['onchain_fee_estimates']['unilateral_close_satoshis'] == feerates['perkw']['unilateral_close'] * 598 // 1000
|
2020-03-10 19:37:56 +01:00
|
|
|
htlc_feerate = feerates["perkw"]["htlc_resolution"]
|
|
|
|
htlc_timeout_cost = feerates["onchain_fee_estimates"]["htlc_timeout_satoshis"]
|
|
|
|
htlc_success_cost = feerates["onchain_fee_estimates"]["htlc_success_satoshis"]
|
2021-06-23 21:31:47 +02:00
|
|
|
|
|
|
|
# Try parsing the feerates, won't work because can't estimate
|
|
|
|
for t in types:
|
|
|
|
feerate = l1.rpc.parsefeerate(t)
|
|
|
|
assert feerate['perkw']
|
|
|
|
assert 'perkb' not in feerate
|
|
|
|
|
2020-08-14 03:30:42 +02:00
|
|
|
if EXPERIMENTAL_FEATURES:
|
|
|
|
# option_anchor_outputs
|
|
|
|
assert htlc_timeout_cost == htlc_feerate * 666 // 1000
|
|
|
|
assert htlc_success_cost == htlc_feerate * 706 // 1000
|
|
|
|
else:
|
|
|
|
assert htlc_timeout_cost == htlc_feerate * 663 // 1000
|
|
|
|
assert htlc_success_cost == htlc_feerate * 703 // 1000
|
2018-08-24 04:22:48 +02:00
|
|
|
|
2018-08-24 04:22:02 +02:00
|
|
|
|
2018-08-22 12:06:40 +02:00
|
|
|
def test_logging(node_factory):
|
|
|
|
# Since we redirect, node.start() will fail: do manually.
|
2019-11-29 16:12:30 +01:00
|
|
|
l1 = node_factory.get_node(options={'log-file': 'logfile'}, start=False)
|
2019-11-23 02:46:40 +01:00
|
|
|
logpath = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile')
|
|
|
|
logpath_moved = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile_moved')
|
2019-11-29 16:12:30 +01:00
|
|
|
l1.daemon.start(wait_for_initialized=False)
|
2018-08-22 12:06:40 +02:00
|
|
|
|
|
|
|
wait_for(lambda: os.path.exists(logpath))
|
|
|
|
|
|
|
|
shutil.move(logpath, logpath_moved)
|
|
|
|
l1.daemon.proc.send_signal(signal.SIGHUP)
|
|
|
|
wait_for(lambda: os.path.exists(logpath_moved))
|
|
|
|
wait_for(lambda: os.path.exists(logpath))
|
|
|
|
|
|
|
|
log1 = open(logpath_moved).readlines()
|
|
|
|
assert log1[-1].endswith("Ending log due to SIGHUP\n")
|
2018-09-12 01:12:19 +02:00
|
|
|
|
|
|
|
def check_new_log():
|
|
|
|
log2 = open(logpath).readlines()
|
2018-10-10 01:06:33 +02:00
|
|
|
return len(log2) > 0 and log2[0].endswith("Started log due to SIGHUP\n")
|
2018-09-12 01:12:19 +02:00
|
|
|
wait_for(check_new_log)
|
2018-08-22 12:06:42 +02:00
|
|
|
|
2020-12-02 03:27:17 +01:00
|
|
|
# Issue #4240
|
|
|
|
# Repeated SIGHUP should just re-open the log file
|
|
|
|
# and not terminate the daemon.
|
|
|
|
logpath_moved_2 = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile_moved_2')
|
|
|
|
shutil.move(logpath, logpath_moved_2)
|
|
|
|
l1.daemon.proc.send_signal(signal.SIGHUP)
|
|
|
|
wait_for(lambda: os.path.exists(logpath_moved_2))
|
|
|
|
wait_for(lambda: os.path.exists(logpath))
|
|
|
|
wait_for(check_new_log)
|
|
|
|
|
2018-08-22 12:06:42 +02:00
|
|
|
|
2018-09-25 22:50:47 +02:00
|
|
|
@unittest.skipIf(VALGRIND,
|
|
|
|
"Valgrind sometimes fails assert on injected SEGV")
|
2018-08-22 12:06:42 +02:00
|
|
|
def test_crashlog(node_factory):
|
2019-06-30 02:42:44 +02:00
|
|
|
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
|
2018-08-22 12:06:42 +02:00
|
|
|
|
2018-08-22 13:17:20 +02:00
|
|
|
def has_crash_log(n):
|
2019-11-23 02:46:40 +01:00
|
|
|
files = os.listdir(os.path.join(n.daemon.lightning_dir, TEST_NETWORK))
|
2018-08-22 13:17:20 +02:00
|
|
|
crashfiles = [f for f in files if 'crash.log' in f]
|
|
|
|
return len(crashfiles) > 0
|
|
|
|
|
|
|
|
assert not has_crash_log(l1)
|
2018-08-22 12:06:42 +02:00
|
|
|
l1.daemon.proc.send_signal(signal.SIGSEGV)
|
2018-08-22 13:17:20 +02:00
|
|
|
wait_for(lambda: has_crash_log(l1))
|
2018-11-20 06:24:17 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_configfile_before_chdir(node_factory):
|
|
|
|
"""Must read config file before chdir into lightning dir"""
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
olddir = os.getcwd()
|
|
|
|
# as lightning_dir ends in /, basename and dirname don't work as expected.
|
|
|
|
os.chdir(os.path.dirname(l1.daemon.lightning_dir[:-1]))
|
2019-11-23 02:46:40 +01:00
|
|
|
config = os.path.join(os.path.basename(l1.daemon.lightning_dir[:-1]), TEST_NETWORK, "test_configfile")
|
2018-11-20 06:24:17 +01:00
|
|
|
# Test both an early arg and a normal arg.
|
|
|
|
with open(config, 'wb') as f:
|
|
|
|
f.write(b'always-use-proxy=true\n')
|
|
|
|
f.write(b'proxy=127.0.0.1:100\n')
|
|
|
|
l1.daemon.opts['conf'] = config
|
|
|
|
|
|
|
|
# Update executable to point to right place
|
|
|
|
l1.daemon.executable = os.path.join(olddir, l1.daemon.executable)
|
|
|
|
l1.start()
|
|
|
|
assert l1.rpc.listconfigs()['always-use-proxy']
|
|
|
|
assert l1.rpc.listconfigs()['proxy'] == '127.0.0.1:100'
|
|
|
|
os.chdir(olddir)
|
2018-12-03 00:00:39 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_json_error(node_factory):
|
|
|
|
"""Must return valid json even if it quotes our weirdness"""
|
|
|
|
l1 = node_factory.get_node()
|
2020-08-25 23:20:50 +02:00
|
|
|
with pytest.raises(RpcError, match=r'id: should be a channel ID or short channel ID: invalid token'):
|
2018-12-03 00:00:39 +01:00
|
|
|
l1.rpc.close({"tx": "020000000001011490f737edd2ea2175a032b58ea7cd426dfc244c339cd044792096da3349b18a0100000000ffffffff021c900300000000001600140e64868e2f752314bc82a154c8c5bf32f3691bb74da00b00000000002200205b8cd3b914cf67cdd8fa6273c930353dd36476734fbd962102c2df53b90880cd0247304402202b2e3195a35dc694bbbc58942dc9ba59cc01d71ba55c9b0ad0610ccd6a65633702201a849254453d160205accc00843efb0ad1fe0e186efa6a7cee1fb6a1d36c736a012103d745445c9362665f22e0d96e9e766f273f3260dea39c8a76bfa05dd2684ddccf00000000", "txid": "2128c10f0355354479514f4a23eaa880d94e099406d419bbb0d800143accddbb", "channel_id": "bbddcc3a1400d8b0bb19d40694094ed980a8ea234a4f5179443555030fc12820"})
|
|
|
|
|
|
|
|
# Should not corrupt following RPC
|
|
|
|
l1.rpc.getinfo()
|
2018-12-05 03:02:07 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_check_command(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
l1.rpc.check(command_to_check='help')
|
|
|
|
l1.rpc.check(command_to_check='help', command='check')
|
|
|
|
# Note: this just checks form, not whether it's valid!
|
|
|
|
l1.rpc.check(command_to_check='help', command='badcommand')
|
2018-12-07 18:11:19 +01:00
|
|
|
with pytest.raises(RpcError, match=r'Unknown command'):
|
2018-12-05 03:02:07 +01:00
|
|
|
l1.rpc.check(command_to_check='badcommand')
|
|
|
|
with pytest.raises(RpcError, match=r'unknown parameter'):
|
|
|
|
l1.rpc.check(command_to_check='help', badarg='x')
|
|
|
|
|
|
|
|
# Ensures we have compulsory parameters.
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.check(command_to_check='connect')
|
|
|
|
# Even with optional parameters.
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.check(command_to_check='connect', host='x', port=77)
|
|
|
|
# Makes sure parameter types are correct.
|
|
|
|
with pytest.raises(RpcError, match=r'should be an integer'):
|
|
|
|
l1.rpc.check(command_to_check='connect', id='test', host='x', port="abcd")
|
|
|
|
|
|
|
|
# FIXME: python wrapper doesn't let us test array params.
|
|
|
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
|
|
|
sock.connect(l1.rpc.socket_path)
|
|
|
|
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help"]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['id'] == 1
|
|
|
|
assert 'result' in obj
|
|
|
|
assert 'error' not in obj
|
|
|
|
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help", "check"]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['id'] == 1
|
|
|
|
assert 'result' in obj
|
|
|
|
assert 'error' not in obj
|
|
|
|
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help", "a", "b"]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['id'] == 1
|
|
|
|
assert 'result' not in obj
|
|
|
|
assert 'error' in obj
|
|
|
|
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["badcommand"]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['id'] == 1
|
|
|
|
assert 'result' not in obj
|
|
|
|
assert 'error' in obj
|
|
|
|
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["connect"]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['id'] == 1
|
|
|
|
assert 'result' not in obj
|
|
|
|
assert 'error' in obj
|
|
|
|
|
|
|
|
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["connect", "test", "x", "abcd"]}')
|
|
|
|
obj, _ = l1.rpc._readobj(sock, b'')
|
|
|
|
assert obj['id'] == 1
|
|
|
|
assert 'result' not in obj
|
|
|
|
assert 'error' in obj
|
|
|
|
|
|
|
|
sock.close()
|
2019-01-08 01:17:50 +01:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("FIXME: without DEVELOPER=1 we timeout")
|
2019-01-08 01:17:50 +01:00
|
|
|
def test_bad_onion(node_factory, bitcoind):
|
|
|
|
"""Test that we get a reasonable error from sendpay when an onion is bad"""
|
2019-01-08 01:53:25 +01:00
|
|
|
l1, l2, l3, l4 = node_factory.line_graph(4, wait_for_announce=True,
|
2019-11-18 01:27:17 +01:00
|
|
|
opts={'log-level': 'io'})
|
2019-01-08 01:17:50 +01:00
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l4.rpc.invoice(123000, 'test_bad_onion', 'description')
|
2019-01-08 01:17:50 +01:00
|
|
|
route = l1.rpc.getroute(l4.info['id'], 123000, 1)['route']
|
|
|
|
|
|
|
|
assert len(route) == 3
|
|
|
|
|
|
|
|
mangled_nodeid = '0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6518'
|
|
|
|
|
|
|
|
# Replace id with a different pubkey, so onion encoded badly at third hop.
|
|
|
|
route[2]['id'] = mangled_nodeid
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv['payment_secret'])
|
2019-01-08 01:17:50 +01:00
|
|
|
with pytest.raises(RpcError) as err:
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.waitsendpay(inv['payment_hash'])
|
2019-01-08 01:17:50 +01:00
|
|
|
|
|
|
|
# FIXME: #define PAY_TRY_OTHER_ROUTE 204
|
|
|
|
PAY_TRY_OTHER_ROUTE = 204
|
|
|
|
assert err.value.error['code'] == PAY_TRY_OTHER_ROUTE
|
|
|
|
# FIXME: WIRE_INVALID_ONION_HMAC = BADONION|PERM|5
|
|
|
|
WIRE_INVALID_ONION_HMAC = 0x8000 | 0x4000 | 5
|
|
|
|
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
|
|
|
|
assert err.value.error['data']['erring_node'] == mangled_nodeid
|
|
|
|
assert err.value.error['data']['erring_channel'] == route[2]['channel']
|
|
|
|
|
2019-01-08 01:53:25 +01:00
|
|
|
# We should see a WIRE_UPDATE_FAIL_MALFORMED_HTLC from l4.
|
|
|
|
line = l4.daemon.is_in_log(r'\[OUT\] 0087')
|
|
|
|
# 008739d3149a5c37e95f9dae718ce46efc60248e110e10117d384870a6762e8e33030000000000000000d7fc52f6c32773aabca55628fe616058aecc44a384e0abfa85c0c48b449dd38dc005
|
|
|
|
# type<--------------channelid---------------------------------------><--htlc-id-----><--------------------------------------------- sha_of_onion --->code
|
|
|
|
sha = re.search(r' 0087.{64}.{16}(.{64})', line).group(1)
|
|
|
|
|
|
|
|
# Should see same sha in onionreply
|
2021-01-20 19:43:02 +01:00
|
|
|
l1.daemon.wait_for_log(r'failcode .* from onionreply .*{sha}'.format(sha=sha))
|
2019-01-08 01:53:25 +01:00
|
|
|
|
2019-01-08 01:17:50 +01:00
|
|
|
# Replace id with a different pubkey, so onion encoded badly at second hop.
|
|
|
|
route[1]['id'] = mangled_nodeid
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv['payment_secret'])
|
2019-01-08 01:17:50 +01:00
|
|
|
with pytest.raises(RpcError) as err:
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.waitsendpay(inv['payment_hash'])
|
2019-01-08 01:17:50 +01:00
|
|
|
|
|
|
|
# FIXME: #define PAY_TRY_OTHER_ROUTE 204
|
|
|
|
PAY_TRY_OTHER_ROUTE = 204
|
|
|
|
assert err.value.error['code'] == PAY_TRY_OTHER_ROUTE
|
|
|
|
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
|
|
|
|
assert err.value.error['data']['erring_node'] == mangled_nodeid
|
|
|
|
assert err.value.error['data']['erring_channel'] == route[1]['channel']
|
2019-03-04 04:13:20 +01:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("Needs DEVELOPER=1 to force onion fail")
|
2020-01-23 06:48:42 +01:00
|
|
|
def test_bad_onion_immediate_peer(node_factory, bitcoind):
|
|
|
|
"""Test that we handle the malformed msg when we're the origin"""
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts={'dev-fail-process-onionpacket': None})
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l2.rpc.invoice(123000, 'test_bad_onion_immediate_peer', 'description')
|
2020-01-23 06:48:42 +01:00
|
|
|
route = l1.rpc.getroute(l2.info['id'], 123000, 1)['route']
|
|
|
|
assert len(route) == 1
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, inv['payment_hash'], payment_secret=inv['payment_secret'])
|
2020-01-23 06:48:42 +01:00
|
|
|
with pytest.raises(RpcError) as err:
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.waitsendpay(inv['payment_hash'])
|
2020-01-23 06:48:42 +01:00
|
|
|
|
|
|
|
# FIXME: #define PAY_UNPARSEABLE_ONION 202
|
|
|
|
PAY_UNPARSEABLE_ONION = 202
|
|
|
|
assert err.value.error['code'] == PAY_UNPARSEABLE_ONION
|
|
|
|
# FIXME: WIRE_INVALID_ONION_HMAC = BADONION|PERM|5
|
|
|
|
WIRE_INVALID_ONION_HMAC = 0x8000 | 0x4000 | 5
|
|
|
|
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
|
|
|
|
|
|
|
|
|
2019-05-06 20:50:11 +02:00
|
|
|
def test_newaddr(node_factory, chainparams):
|
2019-03-04 04:13:20 +01:00
|
|
|
l1 = node_factory.get_node()
|
|
|
|
p2sh = l1.rpc.newaddr('p2sh-segwit')
|
|
|
|
assert 'bech32' not in p2sh
|
2019-05-06 20:50:11 +02:00
|
|
|
assert p2sh['p2sh-segwit'].startswith(chainparams['p2sh_prefix'])
|
2019-03-04 04:13:20 +01:00
|
|
|
bech32 = l1.rpc.newaddr('bech32')
|
|
|
|
assert 'p2sh-segwit' not in bech32
|
2019-05-06 20:50:11 +02:00
|
|
|
assert bech32['bech32'].startswith(chainparams['bip173_prefix'])
|
2019-03-04 04:13:20 +01:00
|
|
|
both = l1.rpc.newaddr('all')
|
2019-05-06 20:50:11 +02:00
|
|
|
assert both['p2sh-segwit'].startswith(chainparams['p2sh_prefix'])
|
|
|
|
assert both['bech32'].startswith(chainparams['bip173_prefix'])
|
2019-05-31 14:40:32 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_bitcoind_fail_first(node_factory, bitcoind, executor):
|
|
|
|
"""Make sure we handle spurious bitcoin-cli failures during startup
|
|
|
|
|
|
|
|
See [#2687](https://github.com/ElementsProject/lightning/issues/2687) for
|
|
|
|
details
|
|
|
|
|
|
|
|
"""
|
|
|
|
# Do not start the lightning node since we need to instrument bitcoind
|
|
|
|
# first.
|
|
|
|
l1 = node_factory.get_node(start=False)
|
|
|
|
|
|
|
|
# Instrument bitcoind to fail some queries first.
|
|
|
|
def mock_fail(*args):
|
|
|
|
raise ValueError()
|
|
|
|
|
2020-02-05 16:59:00 +01:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockhash', mock_fail)
|
2019-05-31 14:40:32 +02:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_fail)
|
|
|
|
|
|
|
|
f = executor.submit(l1.start)
|
|
|
|
|
|
|
|
wait_for(lambda: l1.daemon.running)
|
|
|
|
# Make sure it fails on the first `getblock` call (need to use `is_in_log`
|
|
|
|
# since the `wait_for_log` in `start` sets the offset)
|
|
|
|
wait_for(lambda: l1.daemon.is_in_log(
|
2020-02-05 16:59:00 +01:00
|
|
|
r'getblockhash [a-z0-9]* exited with status 1'))
|
2019-05-31 14:40:32 +02:00
|
|
|
wait_for(lambda: l1.daemon.is_in_log(
|
2020-03-05 10:58:34 +01:00
|
|
|
r'Unable to estimate opening fees'))
|
2019-05-31 14:40:32 +02:00
|
|
|
|
|
|
|
# Now unset the mock, so calls go through again
|
2020-02-05 16:59:00 +01:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
|
2019-05-31 14:40:32 +02:00
|
|
|
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', None)
|
|
|
|
|
|
|
|
f.result()
|
2019-07-16 03:41:51 +02:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs --dev-force-bip32-seed")
|
2019-09-16 19:35:22 +02:00
|
|
|
@unittest.skipIf(TEST_NETWORK != 'regtest', "Addresses are network specific")
|
2019-07-16 23:41:14 +02:00
|
|
|
def test_dev_force_bip32_seed(node_factory):
|
|
|
|
l1 = node_factory.get_node(options={'dev-force-bip32-seed': '0000000000000000000000000000000000000000000000000000000000000001'})
|
|
|
|
# First is m/0/0/1 ..
|
|
|
|
bech32 = l1.rpc.newaddr('bech32')['bech32']
|
|
|
|
assert bech32 == "bcrt1qsdzqt93xsyewdjvagndw9523m27e52er5ca7hm"
|
|
|
|
bech32 = l1.rpc.newaddr('bech32')['bech32']
|
|
|
|
assert bech32 == "bcrt1qlkt93775wmf33uacykc49v2j4tayn0yj25msjn"
|
|
|
|
bech32 = l1.rpc.newaddr('bech32')['bech32']
|
|
|
|
assert bech32 == "bcrt1q2ng546gs0ylfxrvwx0fauzcvhuz655en4kwe2c"
|
|
|
|
bech32 = l1.rpc.newaddr('bech32')['bech32']
|
|
|
|
assert bech32 == "bcrt1qrdpwrlrmrnvn535l5eldt64lxm8r2nwkv0ruxq"
|
|
|
|
bech32 = l1.rpc.newaddr('bech32')['bech32']
|
|
|
|
assert bech32 == "bcrt1q622lwmdzxxterumd746eu3d3t40pq53p62zhlz"
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs dev command")
|
2019-07-16 03:41:51 +02:00
|
|
|
def test_dev_demux(node_factory):
|
|
|
|
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
|
|
|
|
|
|
|
|
# Check should work.
|
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='crash')
|
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='slowcmd', msec=1000)
|
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='rhash', secret='00' * 32)
|
|
|
|
with pytest.raises(RpcError, match=r'Unknown subcommand'):
|
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='foobar')
|
|
|
|
with pytest.raises(RpcError, match=r'unknown parameter'):
|
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='crash', unk=1)
|
2020-08-25 23:20:50 +02:00
|
|
|
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
|
2019-07-16 03:41:51 +02:00
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='slowcmd', msec='aaa')
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.check(command_to_check='dev', subcommand='rhash')
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.check(command_to_check='dev')
|
|
|
|
|
|
|
|
# Non-check failures should fail, in both object and array form.
|
|
|
|
with pytest.raises(RpcError, match=r'Unknown subcommand'):
|
|
|
|
l1.rpc.call('dev', {'subcommand': 'foobar'})
|
|
|
|
with pytest.raises(RpcError, match=r'Unknown subcommand'):
|
|
|
|
l1.rpc.call('dev', ['foobar'])
|
|
|
|
with pytest.raises(RpcError, match=r'unknown parameter'):
|
|
|
|
l1.rpc.call('dev', {'subcommand': 'crash', 'unk': 1})
|
|
|
|
with pytest.raises(RpcError, match=r'too many parameters'):
|
|
|
|
l1.rpc.call('dev', ['crash', 1])
|
2020-08-25 23:20:50 +02:00
|
|
|
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
|
2019-07-16 03:41:51 +02:00
|
|
|
l1.rpc.call('dev', {'subcommand': 'slowcmd', 'msec': 'aaa'})
|
2020-08-25 23:20:50 +02:00
|
|
|
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
|
2019-07-16 03:41:51 +02:00
|
|
|
l1.rpc.call('dev', ['slowcmd', 'aaa'])
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.call('dev', {'subcommand': 'rhash'})
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.call('dev', ['rhash'])
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.call('dev')
|
|
|
|
|
|
|
|
# Help should list them all.
|
|
|
|
assert 'subcommand=crash|rhash|slowcmd' in l1.rpc.help('dev')['help'][0]['command']
|
|
|
|
|
|
|
|
# These work
|
|
|
|
assert l1.rpc.call('dev', ['slowcmd', '7'])['msec'] == 7
|
|
|
|
assert l1.rpc.call('dev', {'subcommand': 'slowcmd', 'msec': '7'})['msec'] == 7
|
|
|
|
assert l1.rpc.call('dev', {'subcommand': 'rhash', 'secret': '00' * 32})['rhash'] == '66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925'
|
|
|
|
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.call('dev', {'subcommand': 'crash'})
|
2019-08-29 03:01:55 +02:00
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2019-08-29 03:01:55 +02:00
|
|
|
def test_list_features_only(node_factory):
|
|
|
|
features = subprocess.check_output(['lightningd/lightningd',
|
|
|
|
'--list-features-only']).decode('utf-8').splitlines()
|
2019-12-12 11:57:07 +01:00
|
|
|
expected = ['option_data_loss_protect/odd',
|
|
|
|
'option_upfront_shutdown_script/odd',
|
|
|
|
'option_gossip_queries/odd',
|
|
|
|
'option_var_onion_optin/odd',
|
|
|
|
'option_gossip_queries_ex/odd',
|
|
|
|
'option_static_remotekey/odd',
|
2021-07-12 22:48:58 +02:00
|
|
|
'option_payment_secret/even',
|
2020-03-31 00:39:00 +02:00
|
|
|
'option_basic_mpp/odd',
|
2019-12-12 11:57:07 +01:00
|
|
|
]
|
2020-05-19 22:41:24 +02:00
|
|
|
if EXPERIMENTAL_FEATURES:
|
2020-08-14 03:30:42 +02:00
|
|
|
expected += ['option_anchor_outputs/odd']
|
2021-02-24 03:53:12 +01:00
|
|
|
expected += ['option_shutdown_anysegwit/odd']
|
2021-10-08 01:31:04 +02:00
|
|
|
expected += ['option_quiesce/odd']
|
2021-03-12 01:21:58 +01:00
|
|
|
expected += ['option_onion_messages/odd']
|
2021-09-10 04:14:23 +02:00
|
|
|
expected += ['supports_open_accept_channel_type']
|
2021-05-26 06:09:01 +02:00
|
|
|
else:
|
|
|
|
expected += ['option_shutdown_anysegwit/odd']
|
2019-08-29 03:01:55 +02:00
|
|
|
assert features == expected
|
2019-10-08 10:58:46 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_relative_config_dir(node_factory):
|
|
|
|
l1 = node_factory.get_node(start=False)
|
|
|
|
initial_dir = os.getcwd()
|
|
|
|
lndir = l1.daemon.opts.get("lightning-dir")[:-1]
|
|
|
|
*root_dir, l1.daemon.opts["lightning-dir"] = lndir.split('/')
|
|
|
|
os.chdir('/'.join(root_dir))
|
|
|
|
l1.daemon.executable = os.path.join(initial_dir, l1.daemon.executable)
|
|
|
|
l1.start()
|
|
|
|
assert os.path.isabs(l1.rpc.listconfigs()["lightning-dir"])
|
|
|
|
l1.stop()
|
|
|
|
os.chdir(initial_dir)
|
2019-10-12 10:11:25 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_signmessage(node_factory):
|
2019-10-13 07:18:39 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
|
2019-10-12 10:11:25 +02:00
|
|
|
|
|
|
|
corpus = [[None,
|
|
|
|
"this is a test!",
|
2019-10-15 02:53:41 +02:00
|
|
|
l1.rpc.signmessage("this is a test!")['zbase'],
|
2019-10-12 10:11:25 +02:00
|
|
|
l1.info['id']]]
|
|
|
|
|
|
|
|
# Other contributions from LND users!
|
|
|
|
corpus += [
|
|
|
|
['@bitconner',
|
|
|
|
"is this compatible?",
|
|
|
|
'rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t',
|
|
|
|
'02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5'],
|
|
|
|
['@duck1123',
|
|
|
|
'hi',
|
|
|
|
'rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg',
|
|
|
|
'02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b'],
|
|
|
|
['@jochemin',
|
|
|
|
'hi',
|
|
|
|
'ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3',
|
|
|
|
'022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931'],
|
|
|
|
]
|
|
|
|
|
|
|
|
for c in corpus:
|
|
|
|
print("Shout out to {}".format(c[0]))
|
|
|
|
assert subprocess.check_output(['devtools/lightning-checkmessage',
|
|
|
|
c[1], c[2]]).decode('utf-8') == "Signature claims to be from key {}\n".format(c[3])
|
|
|
|
|
|
|
|
subprocess.run(['devtools/lightning-checkmessage', c[1], c[2], c[3]], check=True)
|
|
|
|
|
|
|
|
with pytest.raises(subprocess.CalledProcessError):
|
|
|
|
subprocess.run(['devtools/lightning-checkmessage',
|
|
|
|
c[1] + "modified", c[2], c[3]], check=True)
|
|
|
|
|
2019-10-15 02:53:41 +02:00
|
|
|
assert l1.rpc.checkmessage(c[1], c[2], c[3])['verified']
|
|
|
|
assert not l1.rpc.checkmessage(c[1] + "modified", c[2], c[3])['verified']
|
|
|
|
checknokey = l1.rpc.checkmessage(c[1], c[2])
|
2019-10-13 07:18:39 +02:00
|
|
|
# Of course, we know our own pubkey
|
|
|
|
if c[3] == l1.info['id']:
|
|
|
|
assert checknokey['verified']
|
|
|
|
else:
|
|
|
|
assert not checknokey['verified']
|
|
|
|
assert checknokey['pubkey'] == c[3]
|
|
|
|
|
|
|
|
# l2 knows about l1, so it can validate it.
|
2019-10-15 02:53:41 +02:00
|
|
|
zm = l1.rpc.signmessage(message="message for you")['zbase']
|
|
|
|
checknokey = l2.rpc.checkmessage(message="message for you", zbase=zm)
|
2019-10-13 07:18:39 +02:00
|
|
|
assert checknokey['pubkey'] == l1.info['id']
|
|
|
|
assert checknokey['verified']
|
2019-11-23 02:44:51 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_include(node_factory):
|
|
|
|
l1 = node_factory.get_node(start=False)
|
|
|
|
|
|
|
|
subdir = os.path.join(l1.daemon.opts.get("lightning-dir"), "subdir")
|
|
|
|
os.makedirs(subdir)
|
|
|
|
with open(os.path.join(subdir, "conf1"), 'w') as f:
|
|
|
|
f.write('include conf2')
|
|
|
|
with open(os.path.join(subdir, "conf2"), 'w') as f:
|
|
|
|
f.write('alias=conf2')
|
|
|
|
l1.daemon.opts['conf'] = os.path.join(subdir, "conf1")
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
assert l1.rpc.listconfigs('alias')['alias'] == 'conf2'
|
2019-11-23 02:45:53 +01:00
|
|
|
|
|
|
|
|
2019-12-01 16:20:53 +01:00
|
|
|
def test_config_in_subdir(node_factory, chainparams):
|
2019-11-23 02:45:53 +01:00
|
|
|
l1 = node_factory.get_node(start=False)
|
2019-12-01 16:20:53 +01:00
|
|
|
network = chainparams['name']
|
2019-11-23 02:45:53 +01:00
|
|
|
|
2019-12-01 16:20:53 +01:00
|
|
|
subdir = os.path.join(l1.daemon.opts.get("lightning-dir"), network)
|
2019-11-23 02:45:53 +01:00
|
|
|
with open(os.path.join(subdir, "config"), 'w') as f:
|
|
|
|
f.write('alias=test_config_in_subdir')
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
assert l1.rpc.listconfigs('alias')['alias'] == 'test_config_in_subdir'
|
2019-11-23 02:46:58 +01:00
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# conf is not allowed in any config file.
|
|
|
|
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
|
2019-12-01 16:20:53 +01:00
|
|
|
f.write('conf={}/conf'.format(network))
|
2019-11-23 02:46:58 +01:00
|
|
|
|
|
|
|
out = subprocess.run(['lightningd/lightningd',
|
|
|
|
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
|
|
assert out.returncode == 1
|
|
|
|
assert "conf: not permitted in configuration files" in out.stderr.decode('utf-8')
|
|
|
|
|
|
|
|
# network is allowed in root config file.
|
|
|
|
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
|
2019-12-01 16:20:53 +01:00
|
|
|
f.write('network={}'.format(network))
|
2019-11-23 02:46:58 +01:00
|
|
|
|
|
|
|
l1.start()
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# but not in network config file.
|
|
|
|
with open(os.path.join(subdir, "config"), 'w') as f:
|
2019-12-01 16:20:53 +01:00
|
|
|
f.write('network={}'.format(network))
|
2019-11-23 02:46:58 +01:00
|
|
|
|
|
|
|
out = subprocess.run(['lightningd/lightningd',
|
|
|
|
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
|
|
assert out.returncode == 1
|
|
|
|
assert "network: not permitted in network-specific configuration files" in out.stderr.decode('utf-8')
|
|
|
|
|
|
|
|
# lightning-dir only allowed if we explicitly use --conf
|
|
|
|
os.unlink(os.path.join(subdir, "config"))
|
|
|
|
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
|
|
|
|
f.write('lightning-dir={}/test'.format(l1.daemon.opts.get("lightning-dir")))
|
|
|
|
|
|
|
|
out = subprocess.run(['lightningd/lightningd',
|
|
|
|
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
|
|
assert out.returncode == 1
|
|
|
|
assert "lightning-dir: not permitted in implicit configuration files" in out.stderr.decode('utf-8')
|
|
|
|
|
|
|
|
l1.daemon.opts['conf'] = os.path.join(l1.daemon.opts.get("lightning-dir"), "config")
|
|
|
|
l1.start()
|
2019-11-23 02:46:58 +01:00
|
|
|
|
|
|
|
|
|
|
|
def restore_valgrind(node, subdir):
|
|
|
|
"""Move valgrind files back to where fixtures expect them"""
|
|
|
|
for f in os.listdir(subdir):
|
|
|
|
if f.startswith('valgrind-errors.'):
|
|
|
|
shutil.move(os.path.join(subdir, f),
|
|
|
|
node.daemon.opts.get("lightning-dir"))
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(env('COMPAT') != 1, "Upgrade code requires COMPAT_V073")
|
|
|
|
def test_testnet_upgrade(node_factory):
|
|
|
|
"""Test that we move files correctly on old testnet upgrade (even without specifying the network)"""
|
|
|
|
l1 = node_factory.get_node(start=False, may_fail=True)
|
|
|
|
|
|
|
|
basedir = l1.daemon.opts.get("lightning-dir")
|
|
|
|
# Make it old-style
|
|
|
|
os.rename(os.path.join(basedir, TEST_NETWORK, 'hsm_secret'),
|
|
|
|
os.path.join(basedir, 'hsm_secret'))
|
|
|
|
shutil.rmtree(os.path.join(basedir, TEST_NETWORK))
|
|
|
|
# Add (empty!) config file; it should be left in place.
|
|
|
|
with open(os.path.join(basedir, 'config'), 'wb') as f:
|
|
|
|
f.write(b"# Test config file")
|
|
|
|
with open(os.path.join(basedir, 'another_file'), 'wb') as f:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# We need to allow this, otherwise no upgrade!
|
|
|
|
del l1.daemon.opts['allow-deprecated-apis']
|
|
|
|
# We want to test default network
|
|
|
|
del l1.daemon.opts['network']
|
|
|
|
|
|
|
|
# Wrong chain, will fail to start, but that's OK.
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
netdir = os.path.join(basedir, "testnet")
|
|
|
|
assert l1.daemon.is_in_log("Moving hsm_secret into {}/".format(netdir))
|
|
|
|
assert l1.daemon.is_in_log("Moving another_file into {}/".format(netdir))
|
|
|
|
assert not l1.daemon.is_in_log("Moving config into {}/".format(netdir))
|
|
|
|
assert not l1.daemon.is_in_log("Moving lightningd-testnet.pid into {}/"
|
|
|
|
.format(netdir))
|
|
|
|
|
|
|
|
# Should move these
|
|
|
|
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
|
|
|
|
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
|
|
|
|
assert os.path.isfile(os.path.join(netdir, "another_file"))
|
|
|
|
assert not os.path.isfile(os.path.join(basedir, "another_file"))
|
|
|
|
|
|
|
|
# Should NOT move these
|
|
|
|
assert not os.path.isfile(os.path.join(netdir, "lightningd-testnet.pid"))
|
|
|
|
assert os.path.isfile(os.path.join(basedir, "lightningd-testnet.pid"))
|
|
|
|
assert not os.path.isfile(os.path.join(netdir, "config"))
|
|
|
|
assert os.path.isfile(os.path.join(basedir, "config"))
|
|
|
|
|
|
|
|
restore_valgrind(l1, netdir)
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(env('COMPAT') != 1, "Upgrade code requires COMPAT_V073")
|
|
|
|
def test_regtest_upgrade(node_factory):
|
|
|
|
"""Test that we move files correctly on regtest upgrade"""
|
|
|
|
l1 = node_factory.get_node(start=False)
|
|
|
|
|
|
|
|
basedir = l1.daemon.opts.get("lightning-dir")
|
|
|
|
netdir = os.path.join(basedir, TEST_NETWORK)
|
|
|
|
|
|
|
|
# Make it old-style
|
|
|
|
os.rename(os.path.join(basedir, TEST_NETWORK, 'hsm_secret'),
|
|
|
|
os.path.join(basedir, 'hsm_secret'))
|
|
|
|
shutil.rmtree(os.path.join(basedir, TEST_NETWORK))
|
|
|
|
# Add config file which tells us it's regtest; it should be left in place.
|
|
|
|
with open(os.path.join(basedir, 'config'), 'wb') as f:
|
|
|
|
f.write(bytes("network={}".format(TEST_NETWORK), "utf-8"))
|
|
|
|
with open(os.path.join(basedir, 'another_file'), 'wb') as f:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# We need to allow this, otherwise no upgrade!
|
|
|
|
del l1.daemon.opts['allow-deprecated-apis']
|
|
|
|
# It should get this from the config file.
|
|
|
|
del l1.daemon.opts['network']
|
|
|
|
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
assert l1.daemon.is_in_log("Moving hsm_secret into {}/".format(netdir))
|
|
|
|
assert l1.daemon.is_in_log("Moving another_file into {}/".format(netdir))
|
|
|
|
assert not l1.daemon.is_in_log("Moving config into {}/".format(netdir))
|
|
|
|
assert not l1.daemon.is_in_log("Moving lightningd-testnet.pid into {}/"
|
|
|
|
.format(netdir))
|
|
|
|
|
|
|
|
# Should move these
|
|
|
|
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
|
|
|
|
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
|
|
|
|
assert os.path.isfile(os.path.join(netdir, "another_file"))
|
|
|
|
assert not os.path.isfile(os.path.join(basedir, "another_file"))
|
|
|
|
|
|
|
|
# Should NOT move these
|
|
|
|
assert not os.path.isfile(os.path.join(netdir, "lightningd-{}.pid".format(TEST_NETWORK)))
|
|
|
|
assert os.path.isfile(os.path.join(basedir, "lightningd-{}.pid".format(TEST_NETWORK)))
|
|
|
|
assert not os.path.isfile(os.path.join(netdir, "config"))
|
|
|
|
assert os.path.isfile(os.path.join(basedir, "config"))
|
|
|
|
|
|
|
|
# Should restart fine
|
|
|
|
l1.restart()
|
|
|
|
|
|
|
|
restore_valgrind(l1, netdir)
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(VALGRIND, "valgrind files can't be written since we rmdir")
|
|
|
|
@unittest.skipIf(TEST_NETWORK != "regtest", "needs bitcoin mainnet")
|
|
|
|
def test_new_node_is_mainnet(node_factory):
|
|
|
|
"""Test that an empty directory causes us to be on mainnet"""
|
|
|
|
l1 = node_factory.get_node(start=False, may_fail=True)
|
|
|
|
|
|
|
|
basedir = l1.daemon.opts.get("lightning-dir")
|
|
|
|
netdir = os.path.join(basedir, "bitcoin")
|
|
|
|
|
|
|
|
shutil.rmtree(basedir)
|
|
|
|
|
|
|
|
# Don't suppress upgrade (though it shouldn't happen!)
|
|
|
|
del l1.daemon.opts['allow-deprecated-apis']
|
|
|
|
# We want to test default network
|
|
|
|
del l1.daemon.opts['network']
|
|
|
|
|
|
|
|
# Wrong chain, will fail to start, but that's OK.
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# Should create these
|
|
|
|
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
|
|
|
|
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
|
|
|
|
assert not os.path.isfile(os.path.join(netdir, "lightningd-bitcoin.pid"))
|
|
|
|
assert os.path.isfile(os.path.join(basedir, "lightningd-bitcoin.pid"))
|
2019-11-02 15:48:42 +01:00
|
|
|
|
|
|
|
|
2019-12-26 11:19:09 +01:00
|
|
|
def test_unicode_rpc(node_factory, executor, bitcoind):
|
2019-11-02 15:48:42 +01:00
|
|
|
node = node_factory.get_node()
|
|
|
|
desc = "Some candy 🍬 and a nice glass of milk 🥛."
|
|
|
|
|
|
|
|
node.rpc.invoice(msatoshi=42, label=desc, description=desc)
|
|
|
|
invoices = node.rpc.listinvoices()['invoices']
|
|
|
|
assert(len(invoices) == 1)
|
|
|
|
assert(invoices[0]['description'] == desc)
|
|
|
|
assert(invoices[0]['label'] == desc)
|
2019-12-16 14:54:22 +01:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(VALGRIND, "Testing pyln doesn't exercise anything interesting in the c code.")
|
|
|
|
def test_unix_socket_path_length(node_factory, bitcoind, directory, executor, db_provider, test_base_dir):
|
|
|
|
lightning_dir = os.path.join(directory, "anode" + "far" * 30 + "away")
|
|
|
|
os.makedirs(lightning_dir)
|
|
|
|
db = db_provider.get_db(lightning_dir, "test_unix_socket_path_length", 1)
|
|
|
|
|
2020-08-07 05:14:59 +02:00
|
|
|
l1 = LightningNode(1, lightning_dir, bitcoind, executor, VALGRIND, db=db, port=node_factory.get_next_port())
|
2019-12-16 14:54:22 +01:00
|
|
|
|
|
|
|
# `LightningNode.start()` internally calls `LightningRpc.getinfo()` which
|
|
|
|
# exercises the socket logic, and raises an issue if it fails.
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
# Let's just call it again to make sure it really works.
|
|
|
|
l1.rpc.listconfigs()
|
|
|
|
l1.stop()
|
2019-12-26 11:19:09 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_waitblockheight(node_factory, executor, bitcoind):
|
|
|
|
node = node_factory.get_node()
|
|
|
|
|
|
|
|
sync_blockheight(bitcoind, [node])
|
|
|
|
|
|
|
|
blockheight = node.rpc.getinfo()['blockheight']
|
|
|
|
|
|
|
|
# Should succeed without waiting.
|
|
|
|
node.rpc.waitblockheight(blockheight - 2)
|
|
|
|
node.rpc.waitblockheight(blockheight - 1)
|
|
|
|
node.rpc.waitblockheight(blockheight)
|
|
|
|
|
|
|
|
# Should not succeed yet.
|
|
|
|
fut2 = executor.submit(node.rpc.waitblockheight, blockheight + 2)
|
|
|
|
fut1 = executor.submit(node.rpc.waitblockheight, blockheight + 1)
|
|
|
|
assert not fut1.done()
|
|
|
|
assert not fut2.done()
|
|
|
|
|
|
|
|
# Should take about ~1second and time out.
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
node.rpc.waitblockheight(blockheight + 2, 1)
|
|
|
|
|
|
|
|
# Others should still not be done.
|
|
|
|
assert not fut1.done()
|
|
|
|
assert not fut2.done()
|
|
|
|
|
|
|
|
# Trigger just one more block.
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [node])
|
|
|
|
fut1.result(5)
|
|
|
|
assert not fut2.done()
|
|
|
|
|
|
|
|
# Trigger two blocks.
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
sync_blockheight(bitcoind, [node])
|
|
|
|
fut2.result(5)
|
2019-12-04 16:53:12 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_sendcustommsg(node_factory):
|
|
|
|
"""Check that we can send custommsgs to peers in various states.
|
|
|
|
|
|
|
|
`l2` is the node under test. `l1` has a channel with `l2` and should
|
|
|
|
therefore be attached to `channeld`. `l4` is just connected, so it should
|
|
|
|
be attached to `openingd`. `l3` has a channel open, but is disconnected
|
|
|
|
and we can't send to it.
|
|
|
|
|
|
|
|
"""
|
2021-01-27 12:56:29 +01:00
|
|
|
opts = {'log-level': 'io', 'plugin': [
|
|
|
|
os.path.join(os.path.dirname(__file__), "plugins", "custommsg_b.py"),
|
|
|
|
os.path.join(os.path.dirname(__file__), "plugins", "custommsg_a.py")
|
|
|
|
]}
|
2020-08-07 05:14:55 +02:00
|
|
|
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)
|
|
|
|
node_factory.join_nodes([l1, l2, l3])
|
2019-12-04 16:53:12 +01:00
|
|
|
l2.connect(l4)
|
|
|
|
l3.stop()
|
2021-03-05 19:02:00 +01:00
|
|
|
msg = 'aa' + ('ff' * 30) + 'bb'
|
2019-12-04 16:53:12 +01:00
|
|
|
|
|
|
|
# This address doesn't exist so we should get an error when we try sending
|
|
|
|
# a message to it.
|
|
|
|
node_id = '02df5ffe895c778e10f7742a6c5b8a0cefbe9465df58b92fadeb883752c8107c8f'
|
|
|
|
with pytest.raises(RpcError, match=r'No such peer'):
|
2021-07-13 23:04:30 +02:00
|
|
|
l1.rpc.sendcustommsg(node_id, msg)
|
2019-12-04 16:53:12 +01:00
|
|
|
|
|
|
|
# `l3` is disconnected and we can't send messages to it
|
|
|
|
assert(not l2.rpc.listpeers(l3.info['id'])['peers'][0]['connected'])
|
|
|
|
with pytest.raises(RpcError, match=r'Peer is not connected'):
|
2021-07-13 23:04:30 +02:00
|
|
|
l2.rpc.sendcustommsg(l3.info['id'], msg)
|
2019-12-04 16:53:12 +01:00
|
|
|
|
|
|
|
# We should not be able to send a bogus `ping` message, since it collides
|
|
|
|
# with a message defined in the spec, and could potentially mess up our
|
|
|
|
# internal state.
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot send messages of type 18 .WIRE_PING.'):
|
2021-07-13 23:04:30 +02:00
|
|
|
l2.rpc.sendcustommsg(l2.info['id'], r'0012')
|
2019-12-04 16:53:12 +01:00
|
|
|
|
2019-12-04 23:01:20 +01:00
|
|
|
# The sendcustommsg RPC call is currently limited to odd-typed messages,
|
|
|
|
# since they will not result in disconnections or even worse channel
|
|
|
|
# failures.
|
|
|
|
with pytest.raises(RpcError, match=r'Cannot send even-typed [0-9]+ custom message'):
|
2021-07-13 23:04:30 +02:00
|
|
|
l2.rpc.sendcustommsg(l2.info['id'], r'00FE')
|
2019-12-04 23:01:20 +01:00
|
|
|
|
2019-12-04 17:03:49 +01:00
|
|
|
# This should work since the peer is currently owned by `channeld`
|
2021-07-13 23:04:30 +02:00
|
|
|
l2.rpc.sendcustommsg(l1.info['id'], msg)
|
2019-12-04 17:03:49 +01:00
|
|
|
l2.daemon.wait_for_log(
|
2022-01-29 04:33:05 +01:00
|
|
|
r'{peer_id}-{owner}: \[OUT\] {msg}'.format(
|
|
|
|
owner='connectd', msg=msg, peer_id=l1.info['id']
|
2019-12-04 17:03:49 +01:00
|
|
|
)
|
|
|
|
)
|
2021-03-05 19:02:00 +01:00
|
|
|
l1.daemon.wait_for_log(r'\[IN\] {}'.format(msg))
|
2021-01-27 12:56:29 +01:00
|
|
|
l1.daemon.wait_for_logs([
|
2021-02-23 11:36:56 +01:00
|
|
|
r'Got custommessage_a {msg} from peer {peer_id}'.format(
|
|
|
|
msg=msg, peer_id=l2.info['id']),
|
|
|
|
r'Got custommessage_b {msg} from peer {peer_id}'.format(
|
|
|
|
msg=msg, peer_id=l2.info['id'])
|
2021-01-27 12:56:29 +01:00
|
|
|
])
|
2019-12-04 17:03:49 +01:00
|
|
|
|
|
|
|
# This should work since the peer is currently owned by `openingd`
|
2021-07-13 23:04:30 +02:00
|
|
|
l2.rpc.sendcustommsg(l4.info['id'], msg)
|
2019-12-04 16:53:12 +01:00
|
|
|
l2.daemon.wait_for_log(
|
2022-01-29 04:33:05 +01:00
|
|
|
r'{peer_id}-{owner}: \[OUT\] {msg}'.format(
|
|
|
|
owner='connectd', msg=msg, peer_id=l4.info['id']
|
2019-12-04 16:53:12 +01:00
|
|
|
)
|
|
|
|
)
|
2021-03-05 19:02:00 +01:00
|
|
|
l4.daemon.wait_for_log(r'\[IN\] {}'.format(msg))
|
2021-01-27 12:56:29 +01:00
|
|
|
l4.daemon.wait_for_logs([
|
2021-02-23 11:36:56 +01:00
|
|
|
r'Got custommessage_a {msg} from peer {peer_id}'.format(
|
|
|
|
msg=msg, peer_id=l2.info['id']),
|
|
|
|
r'Got custommessage_b {msg} from peer {peer_id}'.format(
|
|
|
|
msg=msg, peer_id=l2.info['id']),
|
2021-01-27 12:56:29 +01:00
|
|
|
])
|
2020-02-04 06:53:17 +01:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs --dev-force-privkey")
|
2020-02-04 06:53:17 +01:00
|
|
|
def test_getsharedsecret(node_factory):
|
|
|
|
"""
|
|
|
|
Test getsharedsecret command.
|
|
|
|
"""
|
|
|
|
# From BOLT 8 test vectors.
|
|
|
|
options = [
|
|
|
|
{"dev-force-privkey": "1212121212121212121212121212121212121212121212121212121212121212"},
|
|
|
|
{}
|
|
|
|
]
|
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=options)
|
|
|
|
|
|
|
|
# Check BOLT 8 test vectors.
|
|
|
|
shared_secret = l1.rpc.getsharedsecret("028d7500dd4c12685d1f568b4c2b5048e8534b873319f3a8daa612b469132ec7f7")['shared_secret']
|
|
|
|
assert (shared_secret == "1e2fb3c8fe8fb9f262f649f64d26ecf0f2c0a805a767cf02dc2d77a6ef1fdcc3")
|
|
|
|
|
|
|
|
# Clear the forced privkey of l1.
|
|
|
|
del l1.daemon.opts["dev-force-privkey"]
|
|
|
|
l1.restart()
|
|
|
|
|
|
|
|
# l1 and l2 can generate the same shared secret
|
|
|
|
# knowing only the public key of the other.
|
|
|
|
assert (l1.rpc.getsharedsecret(l2.info["id"])["shared_secret"]
|
|
|
|
== l2.rpc.getsharedsecret(l1.info["id"])["shared_secret"])
|
2020-05-19 11:46:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_commitfee_option(node_factory):
|
|
|
|
"""Sanity check for the --commit-fee startup option."""
|
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[{"commit-fee": "200"}, {}])
|
|
|
|
|
|
|
|
mock_wu = 5000
|
|
|
|
for l in [l1, l2]:
|
2021-05-04 12:36:11 +02:00
|
|
|
l.set_feerates((0, mock_wu, 0, 0), True)
|
2020-05-19 11:46:27 +02:00
|
|
|
l1_commit_fees = l1.rpc.call("estimatefees")["unilateral_close"]
|
|
|
|
l2_commit_fees = l2.rpc.call("estimatefees")["unilateral_close"]
|
|
|
|
|
|
|
|
assert l1_commit_fees == 2 * l2_commit_fees == 2 * 4 * mock_wu # WU->VB
|
2020-05-22 11:38:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_listtransactions(node_factory):
|
|
|
|
"""Sanity check for the listtransactions RPC command"""
|
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[{}, {}])
|
|
|
|
|
2020-08-14 03:30:39 +02:00
|
|
|
wallettxid = l1.openchannel(l2, 10**5)["wallettxid"]
|
2020-05-22 11:38:15 +02:00
|
|
|
txids = [i["txid"] for tx in l1.rpc.listtransactions()["transactions"]
|
|
|
|
for i in tx["inputs"]]
|
|
|
|
# The txid of the transaction funding the channel is present, and
|
|
|
|
# represented as little endian (like bitcoind and explorers).
|
|
|
|
assert wallettxid in txids
|
2020-12-22 10:33:57 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_listfunds(node_factory):
|
|
|
|
"""Test listfunds command."""
|
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[{}, {}])
|
|
|
|
|
|
|
|
open_txid = l1.openchannel(l2, 10**5)["wallettxid"]
|
|
|
|
|
|
|
|
# unspent outputs
|
|
|
|
utxos = l1.rpc.listfunds()["outputs"]
|
|
|
|
|
|
|
|
# only 1 unspent output should be available
|
|
|
|
assert len(utxos) == 1
|
|
|
|
|
|
|
|
# both unspent and spent outputs
|
|
|
|
all_outputs = l1.rpc.listfunds(spent=True)["outputs"]
|
|
|
|
txids = [output['txid'] for output in all_outputs]
|
|
|
|
|
|
|
|
# 1 spent output (channel opening) and 1 unspent output
|
|
|
|
assert len(all_outputs) == 2
|
|
|
|
assert open_txid in txids
|
2021-01-26 09:27:25 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_listforwards(node_factory, bitcoind):
|
|
|
|
"""Test listfunds command."""
|
|
|
|
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=[{}, {}, {}, {}])
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
l2.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
|
|
|
|
|
|
|
c12, _ = l1.fundchannel(l2, 10**5)
|
|
|
|
c23, _ = l2.fundchannel(l3, 10**5)
|
|
|
|
c24, _ = l2.fundchannel(l4, 10**5)
|
|
|
|
|
|
|
|
# Wait until channels are active
|
|
|
|
bitcoind.generate_block(5)
|
|
|
|
l1.wait_channel_active(c23)
|
|
|
|
|
|
|
|
# successful payments
|
|
|
|
i31 = l3.rpc.invoice(1000, 'i31', 'desc')
|
|
|
|
l1.rpc.pay(i31['bolt11'])
|
|
|
|
|
|
|
|
i41 = l4.rpc.invoice(2000, 'i41', 'desc')
|
|
|
|
l1.rpc.pay(i41['bolt11'])
|
|
|
|
|
|
|
|
# failed payment
|
2021-07-12 08:49:19 +02:00
|
|
|
failed_inv = l3.rpc.invoice(4000, 'failed', 'desc')
|
2021-01-26 09:27:25 +01:00
|
|
|
failed_route = l1.rpc.getroute(l3.info['id'], 4000, 1)['route']
|
|
|
|
|
|
|
|
l2.rpc.close(c23, 1)
|
|
|
|
|
|
|
|
with pytest.raises(RpcError):
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(failed_route, failed_inv['payment_hash'], payment_secret=failed_inv['payment_secret'])
|
|
|
|
l1.rpc.waitsendpay(failed_inv['payment_hash'])
|
2021-01-26 09:27:25 +01:00
|
|
|
|
|
|
|
all_forwards = l2.rpc.listforwards()['forwards']
|
|
|
|
print(json.dumps(all_forwards, indent=True))
|
|
|
|
|
|
|
|
assert len(all_forwards) == 3
|
|
|
|
assert i31['payment_hash'] in map(lambda x: x['payment_hash'], all_forwards)
|
|
|
|
assert i41['payment_hash'] in map(lambda x: x['payment_hash'], all_forwards)
|
2021-07-12 08:49:19 +02:00
|
|
|
assert failed_inv['payment_hash'] in map(lambda x: x['payment_hash'], all_forwards)
|
2021-01-26 09:27:25 +01:00
|
|
|
|
|
|
|
# status=settled
|
|
|
|
settled_forwards = l2.rpc.listforwards(status='settled')['forwards']
|
|
|
|
assert len(settled_forwards) == 2
|
|
|
|
assert sum(x['out_msatoshi'] for x in settled_forwards) == 3000
|
|
|
|
|
|
|
|
# status=local_failed
|
|
|
|
failed_forwards = l2.rpc.listforwards(status='local_failed')['forwards']
|
|
|
|
assert len(failed_forwards) == 1
|
|
|
|
|
|
|
|
# in_channel=c23
|
|
|
|
c23_forwards = l2.rpc.listforwards(in_channel=c23, status='settled')['forwards']
|
|
|
|
assert len(c23_forwards) == 0
|
|
|
|
|
|
|
|
# out_channel=c24
|
|
|
|
c24_forwards = l2.rpc.listforwards(out_channel=c24)['forwards']
|
|
|
|
assert len(c24_forwards) == 1
|
2021-04-16 06:31:24 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_version_reexec(node_factory, bitcoind):
|
|
|
|
badopeningd = os.path.join(os.path.dirname(__file__), "plugins", "badopeningd.sh")
|
|
|
|
version = subprocess.check_output(['lightningd/lightningd',
|
|
|
|
'--version']).decode('utf-8').splitlines()[0]
|
|
|
|
|
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[{'subdaemon': 'openingd:' + badopeningd,
|
|
|
|
'start': False,
|
|
|
|
'allow_broken_log': True},
|
|
|
|
{}])
|
|
|
|
# We use a file to tell our openingd wrapper where the real one is
|
|
|
|
with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "openingd-real"), 'w') as f:
|
|
|
|
f.write(os.path.abspath('lightningd/lightning_openingd'))
|
|
|
|
|
|
|
|
l1.start()
|
|
|
|
# This is a "version" message
|
|
|
|
verfile = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "openingd-version")
|
|
|
|
with open(verfile, 'wb') as f:
|
|
|
|
f.write(bytes.fromhex('0000000d' # len
|
|
|
|
'fff6')) # type
|
|
|
|
f.write(bytes('badversion\0', encoding='utf8'))
|
|
|
|
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log("openingd.*version 'badversion' not '{}': restarting".format(version))
|
|
|
|
|
|
|
|
# Now "fix" it, it should restart.
|
|
|
|
os.unlink(verfile)
|
|
|
|
l1.daemon.wait_for_log("Server started with public key")
|
2021-05-03 05:19:43 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_notimestamp_logging(node_factory):
|
|
|
|
l1 = node_factory.get_node(options={'log-timestamps': False})
|
|
|
|
assert l1.daemon.logs[0].startswith("DEBUG")
|
|
|
|
|
|
|
|
assert l1.rpc.listconfigs()['log-timestamps'] is False
|
doc/schemas: disableoffer, disconnect, feerates, fetchinvoice, fundchannel, fundchannel_cancel, fundchannel_complete, fundchannel_start, fundpsbt, getinfo, getlog, getroute.
We also add a test for getlog, since it was never called by the
testsuite.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2021-05-26 09:35:01 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_getlog(node_factory):
|
|
|
|
"""Test the getlog command"""
|
|
|
|
l1 = node_factory.get_node(options={'log-level': 'io'})
|
|
|
|
|
|
|
|
# Default will skip some entries
|
|
|
|
logs = l1.rpc.getlog()['log']
|
|
|
|
assert [l for l in logs if l['type'] == 'SKIPPED'] != []
|
|
|
|
|
|
|
|
# This should not
|
|
|
|
logs = l1.rpc.getlog(level='io')['log']
|
|
|
|
assert [l for l in logs if l['type'] == 'SKIPPED'] == []
|
2021-07-08 04:47:03 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_force_feerates(node_factory):
|
|
|
|
l1 = node_factory.get_node(options={'force-feerates': 1111})
|
|
|
|
assert l1.rpc.listconfigs()['force-feerates'] == '1111'
|
|
|
|
|
|
|
|
assert l1.rpc.feerates('perkw')['perkw'] == {
|
|
|
|
"opening": 1111,
|
|
|
|
"mutual_close": 1111,
|
|
|
|
"unilateral_close": 1111,
|
|
|
|
"delayed_to_us": 1111,
|
|
|
|
"htlc_resolution": 1111,
|
|
|
|
"penalty": 1111,
|
|
|
|
"min_acceptable": 1875,
|
|
|
|
"max_acceptable": 150000}
|
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
l1.daemon.opts['force-feerates'] = '1111/2222'
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
assert l1.rpc.listconfigs()['force-feerates'] == '1111/2222'
|
|
|
|
assert l1.rpc.feerates('perkw')['perkw'] == {
|
|
|
|
"opening": 1111,
|
|
|
|
"mutual_close": 2222,
|
|
|
|
"unilateral_close": 2222,
|
|
|
|
"delayed_to_us": 2222,
|
|
|
|
"htlc_resolution": 2222,
|
|
|
|
"penalty": 2222,
|
|
|
|
"min_acceptable": 1875,
|
|
|
|
"max_acceptable": 150000}
|
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
l1.daemon.opts['force-feerates'] = '1111/2222/3333/4444/5555/6666'
|
|
|
|
l1.start()
|
|
|
|
|
|
|
|
assert l1.rpc.listconfigs()['force-feerates'] == '1111/2222/3333/4444/5555/6666'
|
|
|
|
assert l1.rpc.feerates('perkw')['perkw'] == {
|
|
|
|
"opening": 1111,
|
|
|
|
"mutual_close": 2222,
|
|
|
|
"unilateral_close": 3333,
|
|
|
|
"delayed_to_us": 4444,
|
|
|
|
"htlc_resolution": 5555,
|
|
|
|
"penalty": 6666,
|
|
|
|
"min_acceptable": 1875,
|
|
|
|
"max_acceptable": 150000}
|
2021-08-25 04:50:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_datastore(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
# Starts empty
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': []}
|
|
|
|
assert l1.rpc.listdatastore('somekey') == {'datastore': []}
|
|
|
|
|
|
|
|
# Add entries.
|
|
|
|
somedata = b'somedata'.hex()
|
2021-08-25 04:51:34 +02:00
|
|
|
somedata_expect = {'key': ['somekey'],
|
2021-08-25 04:51:04 +02:00
|
|
|
'generation': 0,
|
2021-08-25 04:50:13 +02:00
|
|
|
'hex': somedata,
|
|
|
|
'string': 'somedata'}
|
|
|
|
assert l1.rpc.datastore(key='somekey', hex=somedata) == somedata_expect
|
2021-08-25 04:50:12 +02:00
|
|
|
|
2021-08-25 04:50:13 +02:00
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [somedata_expect]}
|
|
|
|
assert l1.rpc.listdatastore('somekey') == {'datastore': [somedata_expect]}
|
2021-08-25 04:50:12 +02:00
|
|
|
assert l1.rpc.listdatastore('otherkey') == {'datastore': []}
|
|
|
|
|
2021-08-25 04:50:37 +02:00
|
|
|
# Cannot add by default.
|
|
|
|
with pytest.raises(RpcError, match='already exists'):
|
|
|
|
l1.rpc.datastore(key='somekey', hex=somedata)
|
|
|
|
|
|
|
|
with pytest.raises(RpcError, match='already exists'):
|
|
|
|
l1.rpc.datastore(key='somekey', hex=somedata, mode="must-create")
|
|
|
|
|
|
|
|
# But can insist on replace.
|
|
|
|
l1.rpc.datastore(key='somekey', hex=somedata[:-4], mode="must-replace")
|
|
|
|
assert only_one(l1.rpc.listdatastore('somekey')['datastore'])['hex'] == somedata[:-4]
|
|
|
|
# And append works.
|
|
|
|
l1.rpc.datastore(key='somekey', hex=somedata[-4:-2], mode="must-append")
|
|
|
|
assert only_one(l1.rpc.listdatastore('somekey')['datastore'])['hex'] == somedata[:-2]
|
|
|
|
l1.rpc.datastore(key='somekey', hex=somedata[-2:], mode="create-or-append")
|
|
|
|
assert only_one(l1.rpc.listdatastore('somekey')['datastore'])['hex'] == somedata
|
|
|
|
|
2021-08-25 04:51:04 +02:00
|
|
|
# Generation will have increased due to three ops above.
|
|
|
|
somedata_expect['generation'] += 3
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [somedata_expect]}
|
|
|
|
|
2021-08-25 04:50:37 +02:00
|
|
|
# Can't replace or append non-existing records if we say not to
|
|
|
|
with pytest.raises(RpcError, match='does not exist'):
|
|
|
|
l1.rpc.datastore(key='otherkey', hex=somedata, mode="must-replace")
|
|
|
|
|
|
|
|
with pytest.raises(RpcError, match='does not exist'):
|
|
|
|
l1.rpc.datastore(key='otherkey', hex=somedata, mode="must-append")
|
|
|
|
|
2021-08-25 04:50:12 +02:00
|
|
|
otherdata = b'otherdata'.hex()
|
2021-08-25 04:51:34 +02:00
|
|
|
otherdata_expect = {'key': ['otherkey'],
|
2021-08-25 04:51:04 +02:00
|
|
|
'generation': 0,
|
2021-08-25 04:50:13 +02:00
|
|
|
'hex': otherdata,
|
|
|
|
'string': 'otherdata'}
|
2021-08-25 04:50:37 +02:00
|
|
|
assert l1.rpc.datastore(key='otherkey', string='otherdata', mode="create-or-append") == otherdata_expect
|
2021-08-25 04:50:12 +02:00
|
|
|
|
2021-08-25 04:50:13 +02:00
|
|
|
assert l1.rpc.listdatastore('somekey') == {'datastore': [somedata_expect]}
|
|
|
|
assert l1.rpc.listdatastore('otherkey') == {'datastore': [otherdata_expect]}
|
2021-08-25 04:50:12 +02:00
|
|
|
assert l1.rpc.listdatastore('badkey') == {'datastore': []}
|
|
|
|
|
2021-08-25 04:51:34 +02:00
|
|
|
# Order is sorted!
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect, somedata_expect]}
|
2021-08-25 04:50:13 +02:00
|
|
|
|
|
|
|
assert l1.rpc.deldatastore('somekey') == somedata_expect
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]}
|
2021-08-25 04:50:12 +02:00
|
|
|
assert l1.rpc.listdatastore('somekey') == {'datastore': []}
|
2021-08-25 04:50:13 +02:00
|
|
|
assert l1.rpc.listdatastore('otherkey') == {'datastore': [otherdata_expect]}
|
2021-08-25 04:50:12 +02:00
|
|
|
assert l1.rpc.listdatastore('badkey') == {'datastore': []}
|
2021-08-25 04:50:13 +02:00
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]}
|
|
|
|
|
|
|
|
# if it's not a string, won't print
|
2021-08-25 04:51:34 +02:00
|
|
|
badstring_expect = {'key': ['badstring'],
|
2021-08-25 04:51:04 +02:00
|
|
|
'generation': 0,
|
2021-08-25 04:50:13 +02:00
|
|
|
'hex': '00'}
|
|
|
|
assert l1.rpc.datastore(key='badstring', hex='00') == badstring_expect
|
|
|
|
assert l1.rpc.listdatastore('badstring') == {'datastore': [badstring_expect]}
|
|
|
|
assert l1.rpc.deldatastore('badstring') == badstring_expect
|
2021-08-25 04:50:12 +02:00
|
|
|
|
|
|
|
# It's persistent
|
|
|
|
l1.restart()
|
|
|
|
|
2021-08-25 04:50:13 +02:00
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]}
|
2021-08-25 04:51:04 +02:00
|
|
|
|
|
|
|
# We can insist generation match on update.
|
|
|
|
with pytest.raises(RpcError, match='generation is different'):
|
|
|
|
l1.rpc.datastore(key='otherkey', hex='00', mode='must-replace',
|
|
|
|
generation=otherdata_expect['generation'] + 1)
|
|
|
|
|
|
|
|
otherdata_expect['generation'] += 1
|
|
|
|
otherdata_expect['string'] += 'a'
|
|
|
|
otherdata_expect['hex'] += '61'
|
|
|
|
assert (l1.rpc.datastore(key='otherkey', string='otherdataa',
|
|
|
|
mode='must-replace',
|
|
|
|
generation=otherdata_expect['generation'] - 1)
|
|
|
|
== otherdata_expect)
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]}
|
|
|
|
|
|
|
|
# We can insist generation match on delete.
|
|
|
|
with pytest.raises(RpcError, match='generation is different'):
|
|
|
|
l1.rpc.deldatastore(key='otherkey',
|
|
|
|
generation=otherdata_expect['generation'] + 1)
|
|
|
|
|
|
|
|
assert (l1.rpc.deldatastore(key='otherkey',
|
|
|
|
generation=otherdata_expect['generation'])
|
|
|
|
== otherdata_expect)
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': []}
|
2021-08-25 04:51:34 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_datastore_keylist(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
|
|
|
|
# Starts empty
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': []}
|
|
|
|
assert l1.rpc.listdatastore(['a']) == {'datastore': []}
|
|
|
|
assert l1.rpc.listdatastore(['a', 'b']) == {'datastore': []}
|
|
|
|
|
|
|
|
# Cannot add child to existing!
|
|
|
|
l1.rpc.datastore(key='a', string='aval')
|
|
|
|
with pytest.raises(RpcError, match=r'1206.*Parent key \[a\] exists'):
|
|
|
|
l1.rpc.datastore(key=['a', 'b'], string='abval',
|
|
|
|
mode='create-or-replace')
|
|
|
|
# Listing subkey gives DNE.
|
|
|
|
assert l1.rpc.listdatastore(['a', 'b']) == {'datastore': []}
|
|
|
|
l1.rpc.deldatastore(key=['a'])
|
|
|
|
|
|
|
|
# Create child key.
|
|
|
|
l1.rpc.datastore(key=['a', 'b'], string='abval')
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [{'key': ['a']}]}
|
|
|
|
assert l1.rpc.listdatastore(key=['a']) == {'datastore': [{'key': ['a', 'b'],
|
|
|
|
'generation': 0,
|
|
|
|
'string': 'abval',
|
|
|
|
'hex': b'abval'.hex()}]}
|
|
|
|
|
|
|
|
# Cannot create key over that
|
|
|
|
with pytest.raises(RpcError, match='has children'):
|
|
|
|
l1.rpc.datastore(key='a', string='aval', mode='create-or-replace')
|
|
|
|
|
|
|
|
# Can create another key.
|
|
|
|
l1.rpc.datastore(key=['a', 'b2'], string='ab2val')
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [{'key': ['a']}]}
|
|
|
|
assert l1.rpc.listdatastore(key=['a']) == {'datastore': [{'key': ['a', 'b'],
|
|
|
|
'string': 'abval',
|
|
|
|
'generation': 0,
|
|
|
|
'hex': b'abval'.hex()},
|
|
|
|
{'key': ['a', 'b2'],
|
|
|
|
'string': 'ab2val',
|
|
|
|
'generation': 0,
|
|
|
|
'hex': b'ab2val'.hex()}]}
|
|
|
|
|
|
|
|
# Can create subkey.
|
|
|
|
l1.rpc.datastore(key=['a', 'b3', 'c'], string='ab2val')
|
|
|
|
assert l1.rpc.listdatastore() == {'datastore': [{'key': ['a']}]}
|
|
|
|
assert l1.rpc.listdatastore(key=['a']) == {'datastore': [{'key': ['a', 'b'],
|
|
|
|
'string': 'abval',
|
|
|
|
'generation': 0,
|
|
|
|
'hex': b'abval'.hex()},
|
|
|
|
{'key': ['a', 'b2'],
|
|
|
|
'string': 'ab2val',
|
|
|
|
'generation': 0,
|
|
|
|
'hex': b'ab2val'.hex()},
|
|
|
|
{'key': ['a', 'b3']}]}
|
|
|
|
|
|
|
|
# Can update subkey
|
|
|
|
l1.rpc.datastore(key=['a', 'b3', 'c'], string='2', mode='must-append')
|
|
|
|
assert l1.rpc.listdatastore(key=['a', 'b3', 'c']) == {'datastore': [{'key': ['a', 'b3', 'c'],
|
|
|
|
'string': 'ab2val2',
|
|
|
|
'generation': 1,
|
|
|
|
'hex': b'ab2val2'.hex()}]}
|