2018-12-04 04:04:14 +01:00
|
|
|
from collections import OrderedDict
|
2020-10-28 11:46:21 +01:00
|
|
|
from datetime import datetime
|
2018-11-17 15:18:34 +01:00
|
|
|
from fixtures import * # noqa: F401,F403
|
2020-02-06 17:35:18 +01:00
|
|
|
from hashlib import sha256
|
2020-01-06 12:20:12 +01:00
|
|
|
from pyln.client import RpcError, Millisatoshi
|
2020-01-29 22:33:10 +01:00
|
|
|
from pyln.proto import Invoice
|
2020-01-06 12:20:12 +01:00
|
|
|
from utils import (
|
2020-04-02 15:12:46 +02:00
|
|
|
DEVELOPER, only_one, sync_blockheight, TIMEOUT, wait_for, TEST_NETWORK,
|
2020-05-19 22:41:24 +02:00
|
|
|
DEPRECATED_APIS, expected_peer_features, expected_node_features,
|
|
|
|
expected_channel_features, account_balance,
|
2022-01-30 04:37:23 +01:00
|
|
|
check_coin_moves, first_channel_id, EXPERIMENTAL_DUAL_FUND,
|
2022-03-08 15:31:26 +01:00
|
|
|
mine_funding_to_announce
|
2020-01-06 12:20:12 +01:00
|
|
|
)
|
2018-11-17 15:18:34 +01:00
|
|
|
|
2020-09-08 18:16:04 +02:00
|
|
|
import ast
|
2022-07-25 03:23:30 +02:00
|
|
|
import base64
|
2022-07-16 15:18:27 +02:00
|
|
|
import concurrent.futures
|
2019-06-29 18:25:45 +02:00
|
|
|
import json
|
2019-03-18 03:40:32 +01:00
|
|
|
import os
|
2018-11-26 19:56:44 +01:00
|
|
|
import pytest
|
2021-02-11 16:18:41 +01:00
|
|
|
import random
|
2019-06-29 18:25:45 +02:00
|
|
|
import re
|
2020-07-30 09:07:32 +02:00
|
|
|
import signal
|
2019-03-18 03:40:32 +01:00
|
|
|
import sqlite3
|
2021-06-18 11:03:15 +02:00
|
|
|
import stat
|
2018-11-17 15:18:34 +01:00
|
|
|
import subprocess
|
2022-09-12 23:19:12 +02:00
|
|
|
import sys
|
2019-01-22 23:23:34 +01:00
|
|
|
import time
|
2019-06-15 20:29:09 +02:00
|
|
|
import unittest
|
2018-11-17 15:18:34 +01:00
|
|
|
|
|
|
|
|
2019-08-05 19:07:45 +02:00
|
|
|
def test_option_passthrough(node_factory, directory):
|
2018-11-17 15:18:34 +01:00
|
|
|
""" Ensure that registering options works.
|
|
|
|
|
|
|
|
First attempts without the plugin and then with the plugin.
|
2022-06-15 09:26:21 +02:00
|
|
|
Then a plugin tries to register the same option "name" again, fails startup.
|
2018-11-17 15:18:34 +01:00
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')
|
2022-06-15 09:26:21 +02:00
|
|
|
plugin_path2 = os.path.join(os.getcwd(), 'tests/plugins/options.py')
|
2018-11-17 15:18:34 +01:00
|
|
|
|
|
|
|
help_out = subprocess.check_output([
|
|
|
|
'lightningd/lightningd',
|
2019-08-05 19:07:45 +02:00
|
|
|
'--lightning-dir={}'.format(directory),
|
2018-11-17 15:18:34 +01:00
|
|
|
'--help'
|
|
|
|
]).decode('utf-8')
|
|
|
|
assert('--greeting' not in help_out)
|
|
|
|
|
|
|
|
help_out = subprocess.check_output([
|
|
|
|
'lightningd/lightningd',
|
2019-08-05 19:07:45 +02:00
|
|
|
'--lightning-dir={}'.format(directory),
|
2018-11-17 15:18:34 +01:00
|
|
|
'--plugin={}'.format(plugin_path),
|
|
|
|
'--help'
|
|
|
|
]).decode('utf-8')
|
|
|
|
assert('--greeting' in help_out)
|
|
|
|
|
|
|
|
# Now try to see if it gets accepted, would fail to start if the
|
|
|
|
# option didn't exist
|
2018-12-10 16:38:36 +01:00
|
|
|
n = node_factory.get_node(options={'plugin': plugin_path, 'greeting': 'Ciao'})
|
2018-11-17 15:18:34 +01:00
|
|
|
n.stop()
|
2018-11-26 19:56:44 +01:00
|
|
|
|
2022-06-15 09:26:21 +02:00
|
|
|
with pytest.raises(subprocess.CalledProcessError):
|
|
|
|
err_out = subprocess.run([
|
|
|
|
'lightningd/lightningd',
|
|
|
|
'--lightning-dir={}'.format(directory),
|
|
|
|
'--plugin={}'.format(plugin_path),
|
|
|
|
'--plugin={}'.format(plugin_path2),
|
|
|
|
'--help'
|
|
|
|
], capture_output=True, check=True).stderr.decode('utf-8')
|
|
|
|
|
|
|
|
# first come first serve
|
|
|
|
assert("error starting plugin '{}': option name '--greeting' is already taken".format(plugin_path2) in err_out)
|
|
|
|
|
2018-11-26 19:56:44 +01:00
|
|
|
|
2020-03-08 01:15:37 +01:00
|
|
|
def test_option_types(node_factory):
|
|
|
|
"""Ensure that desired types of options are
|
|
|
|
respected in output """
|
|
|
|
|
|
|
|
plugin_path = os.path.join(os.getcwd(), 'tests/plugins/options.py')
|
|
|
|
n = node_factory.get_node(options={
|
|
|
|
'plugin': plugin_path,
|
|
|
|
'str_opt': 'ok',
|
|
|
|
'int_opt': 22,
|
2020-03-08 01:24:47 +01:00
|
|
|
'bool_opt': True,
|
2020-03-08 01:15:37 +01:00
|
|
|
})
|
|
|
|
|
2020-03-11 21:35:16 +01:00
|
|
|
assert n.daemon.is_in_log(r"option str_opt ok <class 'str'>")
|
|
|
|
assert n.daemon.is_in_log(r"option int_opt 22 <class 'int'>")
|
|
|
|
assert n.daemon.is_in_log(r"option bool_opt True <class 'bool'>")
|
2020-03-17 02:34:35 +01:00
|
|
|
# flag options aren't passed through if not flagged on
|
|
|
|
assert not n.daemon.is_in_log(r"option flag_opt")
|
2020-03-08 01:15:37 +01:00
|
|
|
n.stop()
|
|
|
|
|
|
|
|
# A blank bool_opt should default to false
|
|
|
|
n = node_factory.get_node(options={
|
|
|
|
'plugin': plugin_path, 'str_opt': 'ok',
|
|
|
|
'int_opt': 22,
|
2020-03-17 03:00:18 +01:00
|
|
|
'bool_opt': 'true',
|
2020-03-17 02:34:35 +01:00
|
|
|
'flag_opt': None,
|
2020-03-08 01:15:37 +01:00
|
|
|
})
|
|
|
|
|
2020-03-11 21:35:16 +01:00
|
|
|
assert n.daemon.is_in_log(r"option bool_opt True <class 'bool'>")
|
2020-03-17 02:34:35 +01:00
|
|
|
assert n.daemon.is_in_log(r"option flag_opt True <class 'bool'>")
|
2020-03-08 01:15:37 +01:00
|
|
|
n.stop()
|
|
|
|
|
|
|
|
# What happens if we give it a bad bool-option?
|
|
|
|
n = node_factory.get_node(options={
|
|
|
|
'plugin': plugin_path,
|
|
|
|
'str_opt': 'ok',
|
|
|
|
'int_opt': 22,
|
|
|
|
'bool_opt': '!',
|
2022-06-26 06:45:01 +02:00
|
|
|
}, may_fail=True, start=False)
|
2020-03-08 01:15:37 +01:00
|
|
|
|
2022-06-26 06:45:01 +02:00
|
|
|
# the node should fail after start, and we get a stderr msg
|
2022-07-21 06:39:30 +02:00
|
|
|
n.daemon.start(wait_for_initialized=False, stderr_redir=True)
|
2022-06-26 06:42:01 +02:00
|
|
|
assert n.daemon.wait() == 1
|
2022-06-26 06:36:01 +02:00
|
|
|
wait_for(lambda: n.daemon.is_in_stderr('bool_opt: ! does not parse as type bool'))
|
2020-03-08 01:15:37 +01:00
|
|
|
|
|
|
|
# What happens if we give it a bad int-option?
|
|
|
|
n = node_factory.get_node(options={
|
|
|
|
'plugin': plugin_path,
|
|
|
|
'str_opt': 'ok',
|
|
|
|
'int_opt': 'notok',
|
|
|
|
'bool_opt': 1,
|
2022-06-26 06:45:01 +02:00
|
|
|
}, may_fail=True, start=False)
|
2020-03-08 01:15:37 +01:00
|
|
|
|
2022-06-26 06:45:01 +02:00
|
|
|
# the node should fail after start, and we get a stderr msg
|
2022-07-21 06:39:30 +02:00
|
|
|
n.daemon.start(wait_for_initialized=False, stderr_redir=True)
|
2022-06-26 06:42:01 +02:00
|
|
|
assert n.daemon.wait() == 1
|
2020-03-08 01:15:37 +01:00
|
|
|
assert n.daemon.is_in_stderr('--int_opt: notok does not parse as type int')
|
|
|
|
|
2020-03-17 02:34:35 +01:00
|
|
|
# Flag opts shouldn't allow any input
|
|
|
|
n = node_factory.get_node(options={
|
|
|
|
'plugin': plugin_path,
|
|
|
|
'str_opt': 'ok',
|
|
|
|
'int_opt': 11,
|
|
|
|
'bool_opt': 1,
|
|
|
|
'flag_opt': True,
|
2022-06-26 06:45:01 +02:00
|
|
|
}, may_fail=True, start=False)
|
2020-03-17 02:34:35 +01:00
|
|
|
|
2022-06-26 06:45:01 +02:00
|
|
|
# the node should fail after start, and we get a stderr msg
|
2022-07-21 06:39:30 +02:00
|
|
|
n.daemon.start(wait_for_initialized=False, stderr_redir=True)
|
2022-06-26 06:42:01 +02:00
|
|
|
assert n.daemon.wait() == 1
|
2020-03-17 02:34:35 +01:00
|
|
|
assert n.daemon.is_in_stderr("--flag_opt: doesn't allow an argument")
|
|
|
|
|
2020-12-14 05:52:23 +01:00
|
|
|
n = node_factory.get_node(options={
|
|
|
|
'plugin': plugin_path,
|
|
|
|
'str_optm': ['ok', 'ok2'],
|
|
|
|
'int_optm': [11, 12, 13],
|
|
|
|
})
|
|
|
|
|
|
|
|
assert n.daemon.is_in_log(r"option str_optm \['ok', 'ok2'\] <class 'list'>")
|
|
|
|
assert n.daemon.is_in_log(r"option int_optm \[11, 12, 13\] <class 'list'>")
|
|
|
|
n.stop()
|
|
|
|
|
2020-03-08 01:15:37 +01:00
|
|
|
|
2019-02-25 05:15:56 +01:00
|
|
|
def test_millisatoshi_passthrough(node_factory):
|
|
|
|
""" Ensure that Millisatoshi arguments and return work.
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
plugin_path = os.path.join(os.getcwd(), 'tests/plugins/millisatoshis.py')
|
2019-02-25 05:15:56 +01:00
|
|
|
n = node_factory.get_node(options={'plugin': plugin_path, 'log-level': 'io'})
|
|
|
|
|
|
|
|
# By keyword
|
|
|
|
ret = n.rpc.call('echo', {'msat': Millisatoshi(17), 'not_an_msat': '22msat'})['echo_msat']
|
|
|
|
assert type(ret) == Millisatoshi
|
|
|
|
assert ret == Millisatoshi(17)
|
|
|
|
|
|
|
|
# By position
|
|
|
|
ret = n.rpc.call('echo', [Millisatoshi(18), '22msat'])['echo_msat']
|
|
|
|
assert type(ret) == Millisatoshi
|
|
|
|
assert ret == Millisatoshi(18)
|
|
|
|
|
|
|
|
|
2018-11-26 19:56:44 +01:00
|
|
|
def test_rpc_passthrough(node_factory):
|
|
|
|
"""Starting with a plugin exposes its RPC methods.
|
|
|
|
|
|
|
|
First check that the RPC method appears in the help output and
|
|
|
|
then try to call it.
|
|
|
|
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')
|
2018-12-10 16:38:36 +01:00
|
|
|
n = node_factory.get_node(options={'plugin': plugin_path, 'greeting': 'Ciao'})
|
2018-11-26 19:56:44 +01:00
|
|
|
|
|
|
|
# Make sure that the 'hello' command that the helloworld.py plugin
|
|
|
|
# has registered is available.
|
|
|
|
cmd = [hlp for hlp in n.rpc.help()['help'] if 'hello' in hlp['command']]
|
|
|
|
assert(len(cmd) == 1)
|
|
|
|
|
2019-02-05 01:44:16 +01:00
|
|
|
# Make sure usage message is present.
|
|
|
|
assert only_one(n.rpc.help('hello')['help'])['command'] == 'hello [name]'
|
2018-12-06 16:00:08 +01:00
|
|
|
# While we're at it, let's check that helloworld.py is logging
|
|
|
|
# correctly via the notifications plugin->lightningd
|
|
|
|
assert n.daemon.is_in_log('Plugin helloworld.py initialized')
|
|
|
|
|
2018-11-26 19:56:44 +01:00
|
|
|
# Now try to call it and see what it returns:
|
2018-12-10 16:38:36 +01:00
|
|
|
greet = n.rpc.hello(name='World')
|
|
|
|
assert(greet == "Ciao World")
|
2018-11-26 19:56:44 +01:00
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.fail()
|
2018-12-03 10:26:29 +01:00
|
|
|
|
2020-04-10 00:47:01 +02:00
|
|
|
# Try to call a method without enough arguments
|
|
|
|
with pytest.raises(RpcError, match="processing bye: missing a required"
|
|
|
|
" argument"):
|
|
|
|
n.rpc.bye()
|
|
|
|
|
2018-12-03 10:26:29 +01:00
|
|
|
|
|
|
|
def test_plugin_dir(node_factory):
|
|
|
|
"""--plugin-dir works"""
|
2019-08-03 07:10:40 +02:00
|
|
|
plugin_dir = os.path.join(os.getcwd(), 'contrib/plugins')
|
2018-12-03 10:26:29 +01:00
|
|
|
node_factory.get_node(options={'plugin-dir': plugin_dir, 'greeting': 'Mars'})
|
2018-12-04 04:04:14 +01:00
|
|
|
|
|
|
|
|
2019-07-29 09:30:49 +02:00
|
|
|
def test_plugin_slowinit(node_factory):
|
2019-09-15 15:19:28 +02:00
|
|
|
"""Tests that the 'plugin' RPC command times out if plugin doesnt respond"""
|
2020-05-05 03:13:56 +02:00
|
|
|
os.environ['SLOWINIT_TIME'] = '61'
|
2019-07-29 09:30:49 +02:00
|
|
|
n = node_factory.get_node()
|
|
|
|
|
2021-02-10 03:39:08 +01:00
|
|
|
with pytest.raises(RpcError, match=': timed out before replying to init'):
|
2019-10-11 13:30:25 +02:00
|
|
|
n.rpc.plugin_start(os.path.join(os.getcwd(), "tests/plugins/slow_init.py"))
|
2019-07-29 09:30:49 +02:00
|
|
|
|
|
|
|
# It's not actually configured yet, see what happens;
|
|
|
|
# make sure 'rescan' and 'list' controls dont crash
|
|
|
|
n.rpc.plugin_rescan()
|
|
|
|
n.rpc.plugin_list()
|
|
|
|
|
|
|
|
|
2019-07-23 01:24:47 +02:00
|
|
|
def test_plugin_command(node_factory):
|
|
|
|
"""Tests the 'plugin' RPC command"""
|
|
|
|
n = node_factory.get_node()
|
|
|
|
|
|
|
|
# Make sure that the 'hello' command from the helloworld.py plugin
|
|
|
|
# is not available.
|
|
|
|
cmd = [hlp for hlp in n.rpc.help()["help"] if "hello" in hlp["command"]]
|
|
|
|
assert(len(cmd) == 0)
|
|
|
|
|
|
|
|
# Add the 'contrib/plugins' test dir
|
|
|
|
n.rpc.plugin_startdir(directory=os.path.join(os.getcwd(), "contrib/plugins"))
|
|
|
|
# Make sure that the 'hello' command from the helloworld.py plugin
|
|
|
|
# is now available.
|
|
|
|
cmd = [hlp for hlp in n.rpc.help()["help"] if "hello" in hlp["command"]]
|
|
|
|
assert(len(cmd) == 1)
|
|
|
|
|
2019-09-15 15:19:28 +02:00
|
|
|
# Make sure 'rescan' and 'list' subcommands dont crash
|
2019-07-23 01:24:47 +02:00
|
|
|
n.rpc.plugin_rescan()
|
|
|
|
n.rpc.plugin_list()
|
|
|
|
|
|
|
|
# Make sure the plugin behaves normally after stop and restart
|
2019-12-24 23:27:47 +01:00
|
|
|
assert("Successfully stopped helloworld.py."
|
|
|
|
== n.rpc.plugin_stop(plugin="helloworld.py")["result"])
|
2020-05-05 03:14:21 +02:00
|
|
|
n.daemon.wait_for_log(r"Killing plugin: stopped by lightningd via RPC")
|
2019-07-23 01:24:47 +02:00
|
|
|
n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "contrib/plugins/helloworld.py"))
|
|
|
|
n.daemon.wait_for_log(r"Plugin helloworld.py initialized")
|
|
|
|
assert("Hello world" == n.rpc.call(method="hello"))
|
|
|
|
|
|
|
|
# Now stop the helloworld plugin
|
2019-12-24 23:27:47 +01:00
|
|
|
assert("Successfully stopped helloworld.py."
|
|
|
|
== n.rpc.plugin_stop(plugin="helloworld.py")["result"])
|
2020-05-05 03:14:21 +02:00
|
|
|
n.daemon.wait_for_log(r"Killing plugin: stopped by lightningd via RPC")
|
2019-07-23 01:24:47 +02:00
|
|
|
# Make sure that the 'hello' command from the helloworld.py plugin
|
|
|
|
# is not available anymore.
|
|
|
|
cmd = [hlp for hlp in n.rpc.help()["help"] if "hello" in hlp["command"]]
|
|
|
|
assert(len(cmd) == 0)
|
|
|
|
|
2019-09-15 15:19:28 +02:00
|
|
|
# Test that we cannot start a plugin with 'dynamic' set to False in
|
2019-07-18 15:32:48 +02:00
|
|
|
# getmanifest
|
2019-09-15 15:19:28 +02:00
|
|
|
with pytest.raises(RpcError, match=r"Not a dynamic plugin"):
|
|
|
|
n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "tests/plugins/static.py"))
|
|
|
|
|
|
|
|
# Test that we cannot stop a started plugin with 'dynamic' flag set to
|
|
|
|
# False
|
|
|
|
n2 = node_factory.get_node(options={
|
|
|
|
"plugin": os.path.join(os.getcwd(), "tests/plugins/static.py")
|
|
|
|
})
|
|
|
|
with pytest.raises(RpcError, match=r"static.py cannot be managed when lightningd is up"):
|
|
|
|
n2.rpc.plugin_stop(plugin="static.py")
|
|
|
|
|
|
|
|
# Test that we don't crash when starting a broken plugin
|
2021-02-10 03:39:08 +01:00
|
|
|
with pytest.raises(RpcError, match=r": exited before replying to getmanifest"):
|
2019-09-15 15:19:28 +02:00
|
|
|
n2.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "tests/plugins/broken.py"))
|
2019-07-18 15:32:48 +02:00
|
|
|
|
2021-02-10 03:39:08 +01:00
|
|
|
with pytest.raises(RpcError, match=r': timed out before replying to getmanifest'):
|
|
|
|
n2.rpc.plugin_start(os.path.join(os.getcwd(), 'contrib/plugins/fail/failtimeout.py'))
|
|
|
|
|
2020-05-05 03:13:50 +02:00
|
|
|
# Test that we can add a directory with more than one new plugin in it.
|
|
|
|
try:
|
2020-11-12 22:37:34 +01:00
|
|
|
n.rpc.plugin_startdir(os.path.join(os.getcwd(), "contrib/plugins"))
|
2020-05-05 03:13:50 +02:00
|
|
|
except RpcError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Usually, it crashes after the above return.
|
|
|
|
n.rpc.stop()
|
|
|
|
|
2019-07-23 01:24:47 +02:00
|
|
|
|
2018-12-04 04:04:14 +01:00
|
|
|
def test_plugin_disable(node_factory):
|
|
|
|
"""--disable-plugin works"""
|
2019-08-03 07:10:40 +02:00
|
|
|
plugin_dir = os.path.join(os.getcwd(), 'contrib/plugins')
|
2020-05-05 03:15:21 +02:00
|
|
|
# We used to need plugin-dir before disable-plugin!
|
2018-12-04 04:04:14 +01:00
|
|
|
n = node_factory.get_node(options=OrderedDict([('plugin-dir', plugin_dir),
|
|
|
|
('disable-plugin',
|
|
|
|
'{}/helloworld.py'
|
|
|
|
.format(plugin_dir))]))
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.hello(name='Sun')
|
2020-05-05 03:15:21 +02:00
|
|
|
assert n.daemon.is_in_log('helloworld.py: disabled via disable-plugin')
|
2021-11-27 11:14:51 +01:00
|
|
|
n.stop()
|
2018-12-04 04:04:14 +01:00
|
|
|
|
|
|
|
# Also works by basename.
|
|
|
|
n = node_factory.get_node(options=OrderedDict([('plugin-dir', plugin_dir),
|
|
|
|
('disable-plugin',
|
|
|
|
'helloworld.py')]))
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.hello(name='Sun')
|
2020-05-05 03:15:21 +02:00
|
|
|
assert n.daemon.is_in_log('helloworld.py: disabled via disable-plugin')
|
2021-11-27 11:14:51 +01:00
|
|
|
n.stop()
|
2020-05-05 03:15:21 +02:00
|
|
|
|
|
|
|
# Other order also works!
|
|
|
|
n = node_factory.get_node(options=OrderedDict([('disable-plugin',
|
|
|
|
'helloworld.py'),
|
|
|
|
('plugin-dir', plugin_dir)]))
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.hello(name='Sun')
|
|
|
|
assert n.daemon.is_in_log('helloworld.py: disabled via disable-plugin')
|
2021-11-27 11:14:51 +01:00
|
|
|
n.stop()
|
2020-05-05 03:15:21 +02:00
|
|
|
|
|
|
|
# Both orders of explicit specification work.
|
|
|
|
n = node_factory.get_node(options=OrderedDict([('disable-plugin',
|
|
|
|
'helloworld.py'),
|
|
|
|
('plugin',
|
|
|
|
'{}/helloworld.py'
|
|
|
|
.format(plugin_dir))]))
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.hello(name='Sun')
|
|
|
|
assert n.daemon.is_in_log('helloworld.py: disabled via disable-plugin')
|
2021-11-27 11:14:51 +01:00
|
|
|
n.stop()
|
2020-05-05 03:15:21 +02:00
|
|
|
|
|
|
|
# Both orders of explicit specification work.
|
|
|
|
n = node_factory.get_node(options=OrderedDict([('plugin',
|
|
|
|
'{}/helloworld.py'
|
|
|
|
.format(plugin_dir)),
|
|
|
|
('disable-plugin',
|
|
|
|
'helloworld.py')]))
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.hello(name='Sun')
|
|
|
|
assert n.daemon.is_in_log('helloworld.py: disabled via disable-plugin')
|
|
|
|
|
|
|
|
# Still disabled if we load directory.
|
|
|
|
n.rpc.plugin_startdir(directory=os.path.join(os.getcwd(), "contrib/plugins"))
|
|
|
|
n.daemon.wait_for_log('helloworld.py: disabled via disable-plugin')
|
2021-11-27 11:14:51 +01:00
|
|
|
n.stop()
|
2018-12-03 22:00:27 +01:00
|
|
|
|
2020-05-05 03:15:34 +02:00
|
|
|
# Check that list works
|
|
|
|
n = node_factory.get_node(options={'disable-plugin':
|
|
|
|
['something-else.py', 'helloworld.py']})
|
|
|
|
|
|
|
|
assert n.rpc.listconfigs()['disable-plugin'] == ['something-else.py', 'helloworld.py']
|
|
|
|
|
2018-12-03 22:00:27 +01:00
|
|
|
|
2019-01-03 19:49:31 +01:00
|
|
|
def test_plugin_hook(node_factory, executor):
|
|
|
|
"""The helloworld plugin registers a htlc_accepted hook.
|
|
|
|
|
|
|
|
The hook will sleep for a few seconds and log a
|
|
|
|
message. `lightningd` should wait for the response and only then
|
|
|
|
complete the payment.
|
|
|
|
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts={'plugin': os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')})
|
2019-01-03 19:49:31 +01:00
|
|
|
start_time = time.time()
|
|
|
|
f = executor.submit(l1.pay, l2, 100000)
|
|
|
|
l2.daemon.wait_for_log(r'on_htlc_accepted called')
|
|
|
|
|
|
|
|
# The hook will sleep for 20 seconds before answering, so `f`
|
|
|
|
# should take at least that long.
|
|
|
|
f.result()
|
|
|
|
end_time = time.time()
|
|
|
|
assert(end_time >= start_time + 20)
|
|
|
|
|
|
|
|
|
2019-06-04 09:42:43 +02:00
|
|
|
def test_plugin_connect_notifications(node_factory):
|
|
|
|
""" test 'connect' and 'disconnect' notifications
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
l1, l2 = node_factory.get_nodes(2, opts={'plugin': os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')})
|
2018-12-13 17:35:01 +01:00
|
|
|
|
|
|
|
l1.connect(l2)
|
|
|
|
l1.daemon.wait_for_log(r'Received connect event')
|
|
|
|
l2.daemon.wait_for_log(r'Received connect event')
|
|
|
|
|
|
|
|
l2.rpc.disconnect(l1.info['id'])
|
|
|
|
l1.daemon.wait_for_log(r'Received disconnect event')
|
|
|
|
l2.daemon.wait_for_log(r'Received disconnect event')
|
|
|
|
|
|
|
|
|
2019-08-05 19:07:45 +02:00
|
|
|
def test_failing_plugins(directory):
|
2018-12-03 22:00:27 +01:00
|
|
|
fail_plugins = [
|
2019-08-03 07:10:40 +02:00
|
|
|
os.path.join(os.getcwd(), 'contrib/plugins/fail/failtimeout.py'),
|
|
|
|
os.path.join(os.getcwd(), 'contrib/plugins/fail/doesnotexist.py'),
|
2018-12-03 22:00:27 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
for p in fail_plugins:
|
|
|
|
with pytest.raises(subprocess.CalledProcessError):
|
|
|
|
subprocess.check_output([
|
|
|
|
'lightningd/lightningd',
|
2019-08-05 19:07:45 +02:00
|
|
|
'--lightning-dir={}'.format(directory),
|
2018-12-03 22:00:27 +01:00
|
|
|
'--plugin={}'.format(p),
|
|
|
|
'--help',
|
|
|
|
])
|
2019-01-15 05:14:27 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_pay_plugin(node_factory):
|
|
|
|
l1, l2 = node_factory.line_graph(2)
|
|
|
|
inv = l2.rpc.invoice(123000, 'label', 'description', 3700)
|
|
|
|
|
2019-01-15 11:04:07 +01:00
|
|
|
res = l1.rpc.pay(bolt11=inv['bolt11'])
|
2019-01-15 05:14:27 +01:00
|
|
|
assert res['status'] == 'complete'
|
2019-01-20 04:33:32 +01:00
|
|
|
|
|
|
|
with pytest.raises(RpcError, match=r'missing required parameter'):
|
|
|
|
l1.rpc.call('pay')
|
2019-02-05 01:44:16 +01:00
|
|
|
|
|
|
|
# Make sure usage messages are present.
|
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
|
|
|
msg = 'pay bolt11 [amount_msat] [label] [riskfactor] [maxfeepercent] '\
|
2022-11-09 03:32:01 +01:00
|
|
|
'[retry_for] [maxdelay] [exemptfee] [localinvreqid] [exclude] '\
|
2022-04-02 04:33:05 +02:00
|
|
|
'[maxfee] [description]'
|
2019-11-04 15:59:01 +01:00
|
|
|
if DEVELOPER:
|
|
|
|
msg += ' [use_shadow]'
|
|
|
|
assert only_one(l1.rpc.help('pay')['help'])['command'] == msg
|
2019-02-07 16:57:59 +01:00
|
|
|
|
|
|
|
|
2021-01-26 19:35:33 +01:00
|
|
|
def test_plugin_connected_hook_chaining(node_factory):
|
|
|
|
""" l1 uses the logger_a, reject and logger_b plugin.
|
2019-02-07 16:57:59 +01:00
|
|
|
|
|
|
|
l1 is configured to accept connections from l2, but not from l3.
|
2021-01-26 19:35:33 +01:00
|
|
|
we check that logger_a is always called and logger_b only for l2.
|
2019-02-07 16:57:59 +01:00
|
|
|
"""
|
2021-02-03 05:11:09 +01:00
|
|
|
opts = [{'plugin':
|
|
|
|
[os.path.join(os.getcwd(),
|
|
|
|
'tests/plugins/peer_connected_logger_a.py'),
|
|
|
|
os.path.join(os.getcwd(),
|
|
|
|
'tests/plugins/reject.py'),
|
|
|
|
os.path.join(os.getcwd(),
|
|
|
|
'tests/plugins/peer_connected_logger_b.py')],
|
|
|
|
'allow_warning': True},
|
|
|
|
{},
|
|
|
|
{'allow_warning': True}]
|
2021-01-26 19:35:33 +01:00
|
|
|
|
2019-02-07 16:57:59 +01:00
|
|
|
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
|
2021-01-26 19:35:33 +01:00
|
|
|
l2id = l2.info['id']
|
|
|
|
l3id = l3.info['id']
|
2019-02-07 16:57:59 +01:00
|
|
|
l1.rpc.reject(l3.info['id'])
|
|
|
|
|
|
|
|
l2.connect(l1)
|
2021-01-26 19:35:33 +01:00
|
|
|
l1.daemon.wait_for_logs([
|
|
|
|
f"peer_connected_logger_a {l2id}",
|
|
|
|
f"{l2id} is allowed",
|
|
|
|
f"peer_connected_logger_b {l2id}"
|
|
|
|
])
|
|
|
|
assert len(l1.rpc.listpeers(l2id)['peers']) == 1
|
2019-02-07 16:57:59 +01:00
|
|
|
|
|
|
|
l3.connect(l1)
|
2021-01-26 19:35:33 +01:00
|
|
|
l1.daemon.wait_for_logs([
|
|
|
|
f"peer_connected_logger_a {l3id}",
|
|
|
|
f"{l3id} is in reject list"
|
|
|
|
])
|
2019-02-07 16:57:59 +01:00
|
|
|
|
2019-04-16 02:11:48 +02:00
|
|
|
# FIXME: this error occurs *after* connection, so we connect then drop.
|
2022-03-22 09:52:13 +01:00
|
|
|
l3.daemon.wait_for_log(r"-connectd: peer_in WIRE_WARNING")
|
2019-04-16 02:11:48 +02:00
|
|
|
l3.daemon.wait_for_log(r"You are in reject list")
|
|
|
|
|
2019-04-29 19:26:04 +02:00
|
|
|
def check_disconnect():
|
2021-01-26 19:35:33 +01:00
|
|
|
peers = l1.rpc.listpeers(l3id)['peers']
|
2019-04-29 19:26:04 +02:00
|
|
|
return peers == [] or not peers[0]['connected']
|
|
|
|
|
|
|
|
wait_for(check_disconnect)
|
2021-10-23 12:32:38 +02:00
|
|
|
assert not l1.daemon.is_in_log(f"peer_connected_logger_b {l3id}")
|
2019-01-22 23:23:34 +01:00
|
|
|
|
|
|
|
|
2022-02-24 16:48:16 +01:00
|
|
|
@pytest.mark.developer("localhost remote_addr will be filtered without DEVELOEPR")
|
2022-01-01 13:02:08 +01:00
|
|
|
def test_peer_connected_remote_addr(node_factory):
|
|
|
|
"""This tests the optional tlv `remote_addr` being passed to a plugin.
|
|
|
|
|
|
|
|
The optional tlv `remote_addr` should only be visible to the initiator l1.
|
|
|
|
"""
|
2022-06-01 12:14:56 +02:00
|
|
|
pluginpath = os.path.join(os.getcwd(), 'tests/plugins/peer_connected_logger_a.py')
|
|
|
|
l1, l2 = node_factory.get_nodes(2, opts={
|
|
|
|
'plugin': pluginpath,
|
|
|
|
'dev-allow-localhost': None})
|
2022-01-01 13:02:08 +01:00
|
|
|
l1id = l1.info['id']
|
|
|
|
l2id = l2.info['id']
|
|
|
|
|
|
|
|
l1.connect(l2)
|
|
|
|
l1log = l1.daemon.wait_for_log(f"peer_connected_logger_a {l2id}")
|
|
|
|
l2log = l2.daemon.wait_for_log(f"peer_connected_logger_a {l1id}")
|
|
|
|
|
|
|
|
# the log entries are followed by the peer_connected payload as dict {} like:
|
|
|
|
# {'id': '022d223...', 'direction': 'out', 'addr': '127.0.0.1:35289',
|
|
|
|
# 'remote_addr': '127.0.0.1:59582', 'features': '8808226aa2'}
|
|
|
|
l1payload = eval(l1log[l1log.find('{'):])
|
|
|
|
l2payload = eval(l2log[l2log.find('{'):])
|
|
|
|
|
|
|
|
# check that l1 sees its remote_addr as l2 sees l1
|
|
|
|
assert(l1payload['remote_addr'] == l2payload['addr'])
|
|
|
|
assert(not l2payload.get('remote_addr')) # l2 can't see a remote_addr
|
|
|
|
|
|
|
|
|
2019-01-22 23:23:34 +01:00
|
|
|
def test_async_rpcmethod(node_factory, executor):
|
|
|
|
"""This tests the async rpcmethods.
|
|
|
|
|
|
|
|
It works in conjunction with the `asynctest` plugin which stashes
|
|
|
|
requests and then resolves all of them on the fifth call.
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/asynctest.py')})
|
2019-01-22 23:23:34 +01:00
|
|
|
|
|
|
|
results = []
|
|
|
|
for i in range(10):
|
|
|
|
results.append(executor.submit(l1.rpc.asyncqueue))
|
|
|
|
|
|
|
|
time.sleep(3)
|
|
|
|
|
|
|
|
# None of these should have returned yet
|
|
|
|
assert len([r for r in results if r.done()]) == 0
|
|
|
|
|
|
|
|
# This last one triggers the release and all results should be 42,
|
|
|
|
# since the last number is returned for all
|
|
|
|
l1.rpc.asyncflush(42)
|
|
|
|
|
|
|
|
assert [r.result() for r in results] == [42] * len(results)
|
2019-03-04 04:00:33 +01:00
|
|
|
|
|
|
|
|
2019-09-12 22:49:42 +02:00
|
|
|
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Only sqlite3 implements the db_write_hook currently")
|
2019-03-18 03:40:32 +01:00
|
|
|
def test_db_hook(node_factory, executor):
|
|
|
|
"""This tests the db hook."""
|
|
|
|
dbfile = os.path.join(node_factory.directory, "dblog.sqlite3")
|
2019-08-03 07:10:40 +02:00
|
|
|
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/dblog.py'),
|
2019-03-18 03:40:32 +01:00
|
|
|
'dblog-file': dbfile})
|
|
|
|
|
|
|
|
# It should see the db being created, and sometime later actually get
|
|
|
|
# initted.
|
|
|
|
# This precedes startup, so needle already past
|
2020-11-23 06:05:56 +01:00
|
|
|
assert l1.daemon.is_in_log(r'plugin-dblog.py: deferring \d+ commands')
|
|
|
|
l1.daemon.logsearch_start = 0
|
|
|
|
l1.daemon.wait_for_log('plugin-dblog.py: replaying pre-init data:')
|
|
|
|
l1.daemon.wait_for_log('plugin-dblog.py: CREATE TABLE version \\(version INTEGER\\)')
|
|
|
|
l1.daemon.wait_for_log("plugin-dblog.py: initialized.* 'startup': True")
|
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# Databases should be identical.
|
|
|
|
db1 = sqlite3.connect(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'lightningd.sqlite3'))
|
|
|
|
db2 = sqlite3.connect(dbfile)
|
|
|
|
|
|
|
|
assert [x for x in db1.iterdump()] == [x for x in db2.iterdump()]
|
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Only sqlite3 implements the db_write_hook currently")
|
|
|
|
def test_db_hook_multiple(node_factory, executor):
|
|
|
|
"""This tests the db hook for multiple-plugin case."""
|
|
|
|
dbfile = os.path.join(node_factory.directory, "dblog.sqlite3")
|
|
|
|
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/dblog.py'),
|
|
|
|
'important-plugin': os.path.join(os.getcwd(), 'tests/plugins/dbdummy.py'),
|
|
|
|
'dblog-file': dbfile})
|
|
|
|
|
|
|
|
# It should see the db being created, and sometime later actually get
|
|
|
|
# initted.
|
|
|
|
# This precedes startup, so needle already past
|
2019-11-18 01:27:17 +01:00
|
|
|
assert l1.daemon.is_in_log(r'plugin-dblog.py: deferring \d+ commands')
|
2019-03-18 03:40:32 +01:00
|
|
|
l1.daemon.logsearch_start = 0
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log('plugin-dblog.py: replaying pre-init data:')
|
|
|
|
l1.daemon.wait_for_log('plugin-dblog.py: CREATE TABLE version \\(version INTEGER\\)')
|
|
|
|
l1.daemon.wait_for_log("plugin-dblog.py: initialized.* 'startup': True")
|
2019-03-18 03:40:32 +01:00
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
|
|
|
|
# Databases should be identical.
|
2019-11-23 02:46:40 +01:00
|
|
|
db1 = sqlite3.connect(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'lightningd.sqlite3'))
|
2019-03-18 03:40:32 +01:00
|
|
|
db2 = sqlite3.connect(dbfile)
|
|
|
|
|
|
|
|
assert [x for x in db1.iterdump()] == [x for x in db2.iterdump()]
|
|
|
|
|
|
|
|
|
2019-03-04 04:00:33 +01:00
|
|
|
def test_utf8_passthrough(node_factory, executor):
|
2019-08-03 07:10:40 +02:00
|
|
|
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/utf8.py'),
|
2019-03-04 04:00:33 +01:00
|
|
|
'log-level': 'io'})
|
|
|
|
|
|
|
|
# This works because Python unmangles.
|
|
|
|
res = l1.rpc.call('utf8', ['ナンセンス 1杯'])
|
|
|
|
assert '\\u' not in res['utf8']
|
|
|
|
assert res['utf8'] == 'ナンセンス 1杯'
|
|
|
|
|
|
|
|
# Now, try native.
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
2019-11-23 02:46:40 +01:00
|
|
|
'--network={}'.format(TEST_NETWORK),
|
2019-03-04 04:00:33 +01:00
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'utf8', 'ナンセンス 1杯']).decode('utf-8')
|
|
|
|
assert '\\u' not in out
|
2019-08-08 04:36:42 +02:00
|
|
|
assert out == '{\n "utf8": "ナンセンス 1杯"\n}\n'
|
2019-04-11 04:15:22 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_invoice_payment_hook(node_factory):
|
|
|
|
""" l1 uses the reject-payment plugin to reject invoices with odd preimages.
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/reject_some_invoices.py')}]
|
2019-04-11 04:15:22 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=opts)
|
|
|
|
|
|
|
|
# This one works
|
2020-07-10 15:57:48 +02:00
|
|
|
inv1 = l2.rpc.invoice(1230, 'label', 'description', preimage='1' * 64)
|
2019-04-11 04:15:22 +02:00
|
|
|
l1.rpc.pay(inv1['bolt11'])
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log('label=label')
|
|
|
|
l2.daemon.wait_for_log('msat=')
|
|
|
|
l2.daemon.wait_for_log('preimage=' + '1' * 64)
|
|
|
|
|
|
|
|
# This one will be rejected.
|
2020-07-10 15:57:48 +02:00
|
|
|
inv2 = l2.rpc.invoice(1230, 'label2', 'description', preimage='0' * 64)
|
2019-04-11 04:15:22 +02:00
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.pay(inv2['bolt11'])
|
|
|
|
|
|
|
|
pstatus = l1.rpc.call('paystatus', [inv2['bolt11']])['pay'][0]
|
2020-07-10 15:57:48 +02:00
|
|
|
assert pstatus['attempts'][-1]['failure']['data']['failcodename'] == 'WIRE_TEMPORARY_NODE_FAILURE'
|
2019-04-11 04:15:22 +02:00
|
|
|
|
|
|
|
l2.daemon.wait_for_log('label=label2')
|
|
|
|
l2.daemon.wait_for_log('msat=')
|
|
|
|
l2.daemon.wait_for_log('preimage=' + '0' * 64)
|
2019-05-21 00:45:20 +02:00
|
|
|
|
|
|
|
|
2019-05-31 09:30:19 +02:00
|
|
|
def test_invoice_payment_hook_hold(node_factory):
|
|
|
|
""" l1 uses the hold_invoice plugin to delay invoice payment.
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'), 'holdtime': TIMEOUT / 2}]
|
2019-05-31 09:30:19 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=opts)
|
|
|
|
|
2020-07-10 15:57:48 +02:00
|
|
|
inv1 = l2.rpc.invoice(1230, 'label', 'description', preimage='1' * 64)
|
2019-05-31 09:30:19 +02:00
|
|
|
l1.rpc.pay(inv1['bolt11'])
|
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2019-05-21 00:45:20 +02:00
|
|
|
def test_openchannel_hook(node_factory, bitcoind):
|
|
|
|
""" l2 uses the reject_odd_funding_amounts plugin to reject some openings.
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/reject_odd_funding_amounts.py')}]
|
2019-05-21 00:45:20 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
|
2020-08-21 12:38:51 +02:00
|
|
|
l1.fundwallet(10**6)
|
2019-05-21 00:45:20 +02:00
|
|
|
|
|
|
|
# Even amount: works.
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 100000)
|
|
|
|
|
|
|
|
# Make sure plugin got all the vars we expect
|
2020-12-14 20:01:14 +01:00
|
|
|
expected = {
|
|
|
|
'channel_flags': '1',
|
2022-06-20 12:22:10 +02:00
|
|
|
'dust_limit_msat': 546000,
|
|
|
|
'htlc_minimum_msat': 0,
|
2020-12-14 20:01:14 +01:00
|
|
|
'id': l1.info['id'],
|
|
|
|
'max_accepted_htlcs': '483',
|
2022-06-20 12:22:10 +02:00
|
|
|
'max_htlc_value_in_flight_msat': 18446744073709551615,
|
2020-12-14 20:01:14 +01:00
|
|
|
'to_self_delay': '5',
|
|
|
|
}
|
|
|
|
|
|
|
|
if l2.config('experimental-dual-fund'):
|
|
|
|
# openchannel2 var checks
|
|
|
|
expected.update({
|
2021-04-20 22:45:10 +02:00
|
|
|
'channel_id': '.*',
|
|
|
|
'commitment_feerate_per_kw': '7500',
|
|
|
|
'funding_feerate_per_kw': '7500',
|
2020-12-14 20:01:14 +01:00
|
|
|
'feerate_our_max': '150000',
|
|
|
|
'feerate_our_min': '1875',
|
|
|
|
'locktime': '.*',
|
2022-06-20 12:22:10 +02:00
|
|
|
'their_funding_msat': 100000000,
|
|
|
|
'channel_max_msat': 16777215000,
|
2020-12-14 20:01:14 +01:00
|
|
|
})
|
|
|
|
else:
|
|
|
|
expected.update({
|
2022-06-20 12:22:10 +02:00
|
|
|
'channel_reserve_msat': 1000000,
|
2020-12-14 20:01:14 +01:00
|
|
|
'feerate_per_kw': '7500',
|
2022-06-20 12:22:10 +02:00
|
|
|
'funding_msat': 100000000,
|
|
|
|
'push_msat': 0,
|
2020-12-14 20:01:14 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: {} VARS'.format(len(expected)))
|
|
|
|
for k, v in expected.items():
|
|
|
|
assert l2.daemon.is_in_log('reject_odd_funding_amounts.py: {}={}'.format(k, v))
|
2019-05-21 00:45:20 +02:00
|
|
|
|
|
|
|
# Close it.
|
2019-06-04 11:33:24 +02:00
|
|
|
txid = l1.rpc.close(l2.info['id'])['txid']
|
|
|
|
bitcoind.generate_block(1, txid)
|
2019-05-21 00:45:20 +02:00
|
|
|
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']] == ['ONCHAIN'])
|
|
|
|
|
|
|
|
# Odd amount: fails
|
|
|
|
l1.connect(l2)
|
|
|
|
with pytest.raises(RpcError, match=r"I don't like odd amounts"):
|
|
|
|
l1.rpc.fundchannel(l2.info['id'], 100001)
|
2019-01-11 22:39:18 +01:00
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-08-21 12:38:51 +02:00
|
|
|
def test_openchannel_hook_error_handling(node_factory, bitcoind):
|
|
|
|
""" l2 uses a plugin that should fatal() crash the node.
|
|
|
|
|
|
|
|
This is because the plugin rejects a channel while
|
|
|
|
also setting a close_to address which isn't allowed.
|
|
|
|
"""
|
|
|
|
opts = {'plugin': os.path.join(os.getcwd(), 'tests/plugins/openchannel_hook_accepter.py')}
|
|
|
|
# openchannel_reject_but_set_close_to.py')}
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node(options=opts,
|
|
|
|
expect_fail=True,
|
|
|
|
may_fail=True,
|
|
|
|
allow_broken_log=True)
|
|
|
|
l1.connect(l2)
|
|
|
|
l1.fundwallet(10**6)
|
|
|
|
|
|
|
|
# next fundchannel should fail fatal() for l2
|
2020-12-15 00:53:31 +01:00
|
|
|
with pytest.raises(RpcError, match=r'Owning subdaemon (openingd|dualopend) died'):
|
2020-08-21 12:38:51 +02:00
|
|
|
l1.rpc.fundchannel(l2.info['id'], 100004)
|
2020-12-15 00:53:31 +01:00
|
|
|
assert l2.daemon.is_in_log("BROKEN.*Plugin rejected openchannel[2]? but also set close_to")
|
2020-08-21 12:38:51 +02:00
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-08-21 12:38:51 +02:00
|
|
|
def test_openchannel_hook_chaining(node_factory, bitcoind):
|
|
|
|
""" l2 uses a set of plugin that all use the openchannel_hook.
|
|
|
|
|
|
|
|
We test that chaining works by using multiple plugins in a way
|
|
|
|
that we check for the first plugin that rejects prevents from evaluating
|
|
|
|
further plugin responses down the chain.
|
|
|
|
|
|
|
|
"""
|
|
|
|
opts = [{}, {'plugin': [
|
|
|
|
os.path.join(os.path.dirname(__file__), '..', 'tests/plugins/openchannel_hook_accept.py'),
|
|
|
|
os.path.join(os.path.dirname(__file__), '..', 'tests/plugins/openchannel_hook_accepter.py'),
|
|
|
|
os.path.join(os.path.dirname(__file__), '..', 'tests/plugins/openchannel_hook_reject.py')
|
|
|
|
]}]
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
|
|
|
|
l1.fundwallet(10**6)
|
|
|
|
|
2021-01-17 13:48:36 +01:00
|
|
|
hook_msg = "openchannel2? hook rejects and says '"
|
2020-08-21 12:38:51 +02:00
|
|
|
# 100005sat fundchannel should fail fatal() for l2
|
|
|
|
# because hook_accepter.py rejects on that amount 'for a reason'
|
2021-05-07 20:39:23 +02:00
|
|
|
with pytest.raises(RpcError, match=r'reject for a reason'):
|
2020-08-21 12:38:51 +02:00
|
|
|
l1.rpc.fundchannel(l2.info['id'], 100005)
|
|
|
|
|
2020-11-03 00:24:00 +01:00
|
|
|
assert l2.daemon.wait_for_log(hook_msg + "reject for a reason")
|
2021-01-27 13:17:20 +01:00
|
|
|
# first plugin in the chain was called
|
|
|
|
assert l2.daemon.is_in_log("accept on principle")
|
2020-11-03 00:24:00 +01:00
|
|
|
# the third plugin must now not be called anymore
|
|
|
|
assert not l2.daemon.is_in_log("reject on principle")
|
2020-08-21 12:38:51 +02:00
|
|
|
|
2022-07-18 14:12:28 +02:00
|
|
|
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2020-08-21 12:38:51 +02:00
|
|
|
# 100000sat is good for hook_accepter, so it should fail 'on principle'
|
|
|
|
# at third hook openchannel_reject.py
|
2021-05-07 20:39:23 +02:00
|
|
|
with pytest.raises(RpcError, match=r'reject on principle'):
|
2020-08-21 12:38:51 +02:00
|
|
|
l1.rpc.fundchannel(l2.info['id'], 100000)
|
|
|
|
assert l2.daemon.wait_for_log(hook_msg + "reject on principle")
|
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-10-28 11:46:13 +01:00
|
|
|
def test_channel_state_changed_bilateral(node_factory, bitcoind):
|
|
|
|
""" We open and close a channel and check notifications both sides.
|
|
|
|
|
|
|
|
The misc_notifications.py plugin logs `channel_state_changed` events.
|
|
|
|
"""
|
|
|
|
opts = {"plugin": os.path.join(os.getcwd(), "tests/plugins/misc_notifications.py")}
|
2020-09-08 18:16:04 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=opts)
|
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
l1_id = l1.rpc.getinfo()["id"]
|
|
|
|
l2_id = l2.rpc.getinfo()["id"]
|
2020-09-08 18:16:04 +02:00
|
|
|
cid = l1.get_channel_id(l2)
|
|
|
|
scid = l1.get_channel_scid(l2)
|
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
# a helper that gives us the next channel_state_changed log entry
|
|
|
|
def wait_for_event(node):
|
|
|
|
msg = node.daemon.wait_for_log("channel_state_changed.*new_state.*")
|
|
|
|
event = ast.literal_eval(re.findall(".*({.*}).*", msg)[0])
|
|
|
|
return event
|
|
|
|
|
2020-11-13 09:36:03 +01:00
|
|
|
# check channel 'opener' and 'closer' within this testcase ...
|
|
|
|
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['opener'] == 'local')
|
|
|
|
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['opener'] == 'remote')
|
2021-06-16 03:00:17 +02:00
|
|
|
# the 'closer' should be missing initially
|
|
|
|
assert 'closer' not in l1.rpc.listpeers()['peers'][0]['channels'][0]
|
|
|
|
assert 'closer' not in l2.rpc.listpeers()['peers'][0]['channels'][0]
|
2020-11-13 09:36:03 +01:00
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
event1 = wait_for_event(l1)
|
2020-12-15 01:43:05 +01:00
|
|
|
event2 = wait_for_event(l2)
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event1['peer_id'] == l2_id) # we only test these IDs the first time
|
|
|
|
assert(event1['channel_id'] == cid)
|
2022-07-06 17:01:58 +02:00
|
|
|
assert(event1['short_channel_id'] is None) # None until locked in
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
|
|
|
|
assert(event2['peer_id'] == l1_id) # we only test these IDs the first time
|
|
|
|
assert(event2['channel_id'] == cid)
|
|
|
|
assert(event2['short_channel_id'] is None) # None until locked in
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
|
|
|
|
for ev in [event1, event2]:
|
|
|
|
# Dual funded channels
|
|
|
|
if l1.config('experimental-dual-fund'):
|
|
|
|
assert(ev['old_state'] == "DUALOPEND_OPEN_INIT")
|
|
|
|
assert(ev['new_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
assert(ev['message'] == "Sigs exchanged, waiting for lock-in")
|
|
|
|
else:
|
|
|
|
assert(ev['old_state'] == "unknown")
|
|
|
|
assert(ev['new_state'] == "CHANNELD_AWAITING_LOCKIN")
|
|
|
|
assert(ev['message'] == "new channel opened")
|
|
|
|
|
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
event2 = wait_for_event(l2)
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event1['short_channel_id'] == scid)
|
2020-12-15 01:43:05 +01:00
|
|
|
if l1.config('experimental-dual-fund'):
|
|
|
|
assert(event1['old_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
else:
|
|
|
|
assert(event1['old_state'] == "CHANNELD_AWAITING_LOCKIN")
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event1['new_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
assert(event1['message'] == "Lockin complete")
|
2020-12-15 01:43:05 +01:00
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event2['short_channel_id'] == scid)
|
2020-12-15 01:43:05 +01:00
|
|
|
if l1.config('experimental-dual-fund'):
|
|
|
|
assert(event2['old_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
else:
|
|
|
|
assert(event2['old_state'] == "CHANNELD_AWAITING_LOCKIN")
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event2['new_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Lockin complete")
|
2020-09-08 18:16:04 +02:00
|
|
|
|
2020-10-28 11:46:21 +01:00
|
|
|
# also test the correctness of timestamps once
|
2020-11-16 14:28:12 +01:00
|
|
|
assert(datetime.strptime(event1['timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'))
|
|
|
|
assert(datetime.strptime(event2['timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'))
|
2020-10-28 11:46:21 +01:00
|
|
|
|
2020-09-08 18:16:04 +02:00
|
|
|
# close channel and look for stateful events
|
|
|
|
l1.rpc.close(scid)
|
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
assert(event1['old_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event1['new_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
assert(event1['message'] == "User or plugin invoked close command")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event2['new_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Peer closes channel")
|
|
|
|
|
2020-11-13 09:36:03 +01:00
|
|
|
# 'closer' should now be set accordingly ...
|
|
|
|
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local')
|
|
|
|
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'remote')
|
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
assert(event1['old_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(event1['new_state'] == "CLOSINGD_SIGEXCHANGE")
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
assert(event1['message'] == "Start closingd")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(event2['new_state'] == "CLOSINGD_SIGEXCHANGE")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Start closingd")
|
|
|
|
|
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
assert(event1['old_state'] == "CLOSINGD_SIGEXCHANGE")
|
|
|
|
assert(event1['new_state'] == "CLOSINGD_COMPLETE")
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
assert(event1['message'] == "Closing complete")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "CLOSINGD_SIGEXCHANGE")
|
|
|
|
assert(event2['new_state'] == "CLOSINGD_COMPLETE")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Closing complete")
|
2020-09-08 18:16:04 +02:00
|
|
|
|
2021-01-14 12:42:05 +01:00
|
|
|
bitcoind.generate_block(100, wait_for_mempool=1) # so it gets settled
|
2020-09-08 18:16:04 +02:00
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
assert(event1['old_state'] == "CLOSINGD_COMPLETE")
|
|
|
|
assert(event1['new_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
assert(event1['message'] == "Onchain funding spend")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "CLOSINGD_COMPLETE")
|
|
|
|
assert(event2['new_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Onchain funding spend")
|
|
|
|
|
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
assert(event1['old_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event1['new_state'] == "ONCHAIN")
|
|
|
|
assert(event1['cause'] == "user")
|
|
|
|
assert(event1['message'] == "Onchain init reply")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event2['new_state'] == "ONCHAIN")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Onchain init reply")
|
2020-09-08 18:16:04 +02:00
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-09-08 18:16:04 +02:00
|
|
|
def test_channel_state_changed_unilateral(node_factory, bitcoind):
|
2020-10-28 11:46:13 +01:00
|
|
|
""" We open, disconnect, force-close a channel and check for notifications.
|
|
|
|
|
|
|
|
The misc_notifications.py plugin logs `channel_state_changed` events.
|
|
|
|
"""
|
2021-02-03 05:11:09 +01:00
|
|
|
opts = {"plugin": os.path.join(os.getcwd(), "tests/plugins/misc_notifications.py"),
|
2022-01-20 05:56:06 +01:00
|
|
|
"allow_warning": True,
|
|
|
|
'may_reconnect': True}
|
2021-05-10 20:46:52 +02:00
|
|
|
|
2020-09-08 18:16:04 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=opts)
|
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
l1_id = l1.rpc.getinfo()["id"]
|
2020-09-08 18:16:04 +02:00
|
|
|
cid = l1.get_channel_id(l2)
|
|
|
|
scid = l1.get_channel_scid(l2)
|
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
# a helper that gives us the next channel_state_changed log entry
|
|
|
|
def wait_for_event(node):
|
|
|
|
msg = node.daemon.wait_for_log("channel_state_changed.*new_state.*")
|
|
|
|
event = ast.literal_eval(re.findall(".*({.*}).*", msg)[0])
|
|
|
|
return event
|
|
|
|
|
|
|
|
event2 = wait_for_event(l2)
|
2022-07-06 17:01:58 +02:00
|
|
|
assert(event2['peer_id'] == l1_id)
|
|
|
|
assert(event2['channel_id'] == cid)
|
|
|
|
assert(event2['short_channel_id'] is None)
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
|
2020-12-15 01:47:30 +01:00
|
|
|
if l2.config('experimental-dual-fund'):
|
|
|
|
assert(event2['old_state'] == "DUALOPEND_OPEN_INIT")
|
|
|
|
assert(event2['new_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
assert(event2['message'] == "Sigs exchanged, waiting for lock-in")
|
2022-07-06 17:01:58 +02:00
|
|
|
else:
|
|
|
|
assert(event2['old_state'] == "unknown")
|
|
|
|
assert(event2['new_state'] == "CHANNELD_AWAITING_LOCKIN")
|
|
|
|
assert(event2['message'] == "new channel opened")
|
2020-12-15 01:47:30 +01:00
|
|
|
|
2022-07-06 17:01:58 +02:00
|
|
|
event2 = wait_for_event(l2)
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event2['short_channel_id'] == scid)
|
2020-12-15 01:47:30 +01:00
|
|
|
if l2.config('experimental-dual-fund'):
|
|
|
|
assert(event2['old_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
else:
|
|
|
|
assert(event2['old_state'] == "CHANNELD_AWAITING_LOCKIN")
|
2020-10-28 11:46:13 +01:00
|
|
|
assert(event2['new_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event2['cause'] == "remote")
|
|
|
|
assert(event2['message'] == "Lockin complete")
|
2020-09-08 18:16:04 +02:00
|
|
|
|
|
|
|
# close channel unilaterally and look for stateful events
|
|
|
|
l1.rpc.stop()
|
|
|
|
wait_for(lambda: not only_one(l2.rpc.listpeers()['peers'])['connected'])
|
2020-10-28 11:46:13 +01:00
|
|
|
l2.rpc.close(scid, 1) # force close after 1sec timeout
|
|
|
|
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event2['new_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(event2['cause'] == "user")
|
|
|
|
assert(event2['message'] == "User or plugin invoked close command")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(event2['new_state'] == "AWAITING_UNILATERAL")
|
|
|
|
assert(event2['cause'] == "user")
|
|
|
|
assert(event2['message'] == "Forcibly closed by `close` command timeout")
|
2020-09-08 18:16:04 +02:00
|
|
|
|
2022-01-08 14:18:29 +01:00
|
|
|
# restart l1 now, it will reconnect and l2 will send it an error.
|
2020-11-12 11:25:19 +01:00
|
|
|
l1.restart()
|
|
|
|
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 1)
|
|
|
|
# check 'closer' on l2 while the peer is not yet forgotten
|
|
|
|
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local')
|
2021-05-10 20:46:52 +02:00
|
|
|
if EXPERIMENTAL_DUAL_FUND:
|
|
|
|
l1.daemon.wait_for_log(r'Peer has reconnected, state')
|
2022-03-22 21:30:54 +01:00
|
|
|
l2.daemon.wait_for_log(r'Telling connectd to send error')
|
2020-11-12 11:25:19 +01:00
|
|
|
|
2022-01-20 05:56:06 +01:00
|
|
|
# l1 will receive error, and go into AWAITING_UNILATERAL
|
|
|
|
# FIXME: l2 should re-xmit shutdown, but it doesn't until it's mined :(
|
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
# Doesn't have closer, since it blames the "protocol"?
|
|
|
|
assert 'closer' not in l1.rpc.listpeers()['peers'][0]['channels'][0]
|
|
|
|
assert(event1['old_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(event1['new_state'] == "AWAITING_UNILATERAL")
|
|
|
|
assert(event1['cause'] == "protocol")
|
|
|
|
assert(event1['message'] == "channeld: received ERROR error channel {}: Forcibly closed by `close` command timeout".format(cid))
|
|
|
|
|
2020-11-12 11:25:19 +01:00
|
|
|
# settle the channel closure
|
|
|
|
bitcoind.generate_block(100)
|
2020-09-08 18:16:04 +02:00
|
|
|
|
2020-10-28 11:46:13 +01:00
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "AWAITING_UNILATERAL")
|
|
|
|
assert(event2['new_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event2['cause'] == "user")
|
|
|
|
assert(event2['message'] == "Onchain funding spend")
|
|
|
|
event2 = wait_for_event(l2)
|
|
|
|
assert(event2['old_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event2['new_state'] == "ONCHAIN")
|
|
|
|
assert(event2['cause'] == "user")
|
|
|
|
assert(event2['message'] == "Onchain init reply")
|
|
|
|
|
2020-11-12 11:25:19 +01:00
|
|
|
# Check 'closer' on l1 while the peer is not yet forgotten
|
2020-10-28 11:46:13 +01:00
|
|
|
event1 = wait_for_event(l1)
|
2020-11-12 11:25:19 +01:00
|
|
|
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'remote')
|
|
|
|
|
2022-01-20 05:56:06 +01:00
|
|
|
assert(event1['old_state'] == "AWAITING_UNILATERAL")
|
|
|
|
assert(event1['new_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event1['cause'] == "onchain")
|
|
|
|
assert(event1['message'] == "Onchain funding spend")
|
2022-01-08 14:18:29 +01:00
|
|
|
|
2022-01-20 05:56:06 +01:00
|
|
|
event1 = wait_for_event(l1)
|
|
|
|
assert(event1['old_state'] == "FUNDING_SPEND_SEEN")
|
|
|
|
assert(event1['new_state'] == "ONCHAIN")
|
|
|
|
assert(event1['cause'] == "onchain")
|
|
|
|
assert(event1['message'] == "Onchain init reply")
|
2020-09-08 18:16:04 +02:00
|
|
|
|
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-10-28 11:46:24 +01:00
|
|
|
def test_channel_state_change_history(node_factory, bitcoind):
|
|
|
|
""" We open and close a channel and check for state_canges entries.
|
|
|
|
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2)
|
|
|
|
scid = l1.get_channel_scid(l2)
|
|
|
|
l1.rpc.close(scid)
|
|
|
|
|
|
|
|
history = l1.rpc.listpeers()['peers'][0]['channels'][0]['state_changes']
|
2020-12-15 00:57:53 +01:00
|
|
|
if l1.config('experimental-dual-fund'):
|
|
|
|
assert(history[0]['cause'] == "user")
|
|
|
|
assert(history[0]['old_state'] == "DUALOPEND_OPEN_INIT")
|
|
|
|
assert(history[0]['new_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
assert(history[1]['cause'] == "user")
|
|
|
|
assert(history[1]['old_state'] == "DUALOPEND_AWAITING_LOCKIN")
|
|
|
|
assert(history[1]['new_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(history[2]['cause'] == "user")
|
|
|
|
assert(history[2]['new_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(history[3]['cause'] == "user")
|
|
|
|
assert(history[3]['new_state'] == "CLOSINGD_SIGEXCHANGE")
|
|
|
|
assert(history[4]['cause'] == "user")
|
|
|
|
assert(history[4]['new_state'] == "CLOSINGD_COMPLETE")
|
|
|
|
assert(history[4]['message'] == "Closing complete")
|
|
|
|
else:
|
|
|
|
assert(history[0]['cause'] == "user")
|
|
|
|
assert(history[0]['old_state'] == "CHANNELD_AWAITING_LOCKIN")
|
|
|
|
assert(history[0]['new_state'] == "CHANNELD_NORMAL")
|
|
|
|
assert(history[1]['cause'] == "user")
|
|
|
|
assert(history[1]['new_state'] == "CHANNELD_SHUTTING_DOWN")
|
|
|
|
assert(history[2]['cause'] == "user")
|
|
|
|
assert(history[2]['new_state'] == "CLOSINGD_SIGEXCHANGE")
|
|
|
|
assert(history[3]['cause'] == "user")
|
|
|
|
assert(history[3]['new_state'] == "CLOSINGD_COMPLETE")
|
|
|
|
assert(history[3]['message'] == "Closing complete")
|
2020-10-28 11:46:24 +01:00
|
|
|
|
|
|
|
|
2022-11-08 02:12:17 +01:00
|
|
|
@pytest.mark.developer("Gossip slow, and we test --dev-onion-reply-length")
|
2019-01-11 22:39:18 +01:00
|
|
|
def test_htlc_accepted_hook_fail(node_factory):
|
|
|
|
"""Send payments from l1 to l2, but l2 just declines everything.
|
|
|
|
|
|
|
|
l2 is configured with a plugin that'll hook into htlc_accepted and
|
|
|
|
always return failures. The same should also work for forwarded
|
|
|
|
htlcs in the second half.
|
|
|
|
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[
|
|
|
|
{},
|
2022-11-08 02:12:17 +01:00
|
|
|
{'dev-onion-reply-length': 1111,
|
|
|
|
'plugin': os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')},
|
2019-01-11 22:39:18 +01:00
|
|
|
{}
|
|
|
|
], wait_for_announce=True)
|
|
|
|
|
|
|
|
# This must fail
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l2.rpc.invoice(1000, "lbl", "desc")
|
|
|
|
phash = inv['payment_hash']
|
2019-09-05 16:43:07 +02:00
|
|
|
route = l1.rpc.getroute(l2.info['id'], 1000, 1)['route']
|
|
|
|
|
|
|
|
# Here shouldn't use `pay` command because l2 rejects with WIRE_TEMPORARY_NODE_FAILURE,
|
|
|
|
# then it will be excluded when l1 try another pay attempt.
|
|
|
|
# Note if the destination is excluded, the route result is undefined.
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, phash, payment_secret=inv['payment_secret'])
|
2019-01-11 22:39:18 +01:00
|
|
|
with pytest.raises(RpcError) as excinfo:
|
2019-09-05 16:43:07 +02:00
|
|
|
l1.rpc.waitsendpay(phash)
|
|
|
|
assert excinfo.value.error['data']['failcode'] == 0x2002
|
2019-01-11 22:39:18 +01:00
|
|
|
assert excinfo.value.error['data']['erring_index'] == 1
|
|
|
|
|
|
|
|
# And the invoice must still be unpaid
|
|
|
|
inv = l2.rpc.listinvoices("lbl")['invoices']
|
|
|
|
assert len(inv) == 1 and inv[0]['status'] == 'unpaid'
|
|
|
|
|
|
|
|
# Now try with forwarded HTLCs: l2 should still fail them
|
|
|
|
# This must fail
|
|
|
|
inv = l3.rpc.invoice(1000, "lbl", "desc")['bolt11']
|
2019-09-05 16:43:07 +02:00
|
|
|
with pytest.raises(RpcError):
|
2019-01-11 22:39:18 +01:00
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
# And the invoice must still be unpaid
|
|
|
|
inv = l3.rpc.listinvoices("lbl")['invoices']
|
|
|
|
assert len(inv) == 1 and inv[0]['status'] == 'unpaid'
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("without DEVELOPER=1, gossip v slow")
|
2019-01-11 22:39:18 +01:00
|
|
|
def test_htlc_accepted_hook_resolve(node_factory):
|
|
|
|
"""l3 creates an invoice, l2 knows the preimage and will shortcircuit.
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[
|
|
|
|
{},
|
2019-08-03 07:10:40 +02:00
|
|
|
{'plugin': os.path.join(os.getcwd(), 'tests/plugins/shortcircuit.py')},
|
2019-01-11 22:39:18 +01:00
|
|
|
{}
|
|
|
|
], wait_for_announce=True)
|
|
|
|
|
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
|
|
|
inv = l3.rpc.invoice(amount_msat=1000, label="lbl", description="desc", preimage="00" * 32)['bolt11']
|
2019-01-11 22:39:18 +01:00
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
# And the invoice must still be unpaid
|
|
|
|
inv = l3.rpc.listinvoices("lbl")['invoices']
|
|
|
|
assert len(inv) == 1 and inv[0]['status'] == 'unpaid'
|
2019-05-21 14:16:00 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_htlc_accepted_hook_direct_restart(node_factory, executor):
|
|
|
|
"""l2 restarts while it is pondering what to do with an HTLC.
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[
|
|
|
|
{'may_reconnect': True},
|
2019-08-03 07:10:40 +02:00
|
|
|
{'may_reconnect': True,
|
|
|
|
'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_htlcs.py')}
|
2019-05-21 14:16:00 +02:00
|
|
|
])
|
|
|
|
|
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
|
|
|
i1 = l2.rpc.invoice(amount_msat=1000, label="direct", description="desc")['bolt11']
|
2019-05-21 14:16:00 +02:00
|
|
|
f1 = executor.submit(l1.rpc.pay, i1)
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
|
2021-06-02 18:04:01 +02:00
|
|
|
|
|
|
|
# Check that the status mentions the HTLC being held
|
|
|
|
l2.rpc.listpeers()
|
|
|
|
peers = l2.rpc.listpeers()['peers']
|
|
|
|
htlc_status = peers[0]['channels'][0]['htlcs'][0].get('status', None)
|
|
|
|
assert htlc_status == "Waiting for the htlc_accepted hook of plugin hold_htlcs.py"
|
|
|
|
|
2019-08-10 07:24:55 +02:00
|
|
|
needle = l2.daemon.logsearch_start
|
2019-05-21 14:16:00 +02:00
|
|
|
l2.restart()
|
|
|
|
|
2019-08-10 07:24:55 +02:00
|
|
|
# Now it should try again, *after* initializing.
|
|
|
|
# This may be before "Server started with public key" swallowed by restart()
|
|
|
|
l2.daemon.logsearch_start = needle + 1
|
|
|
|
l2.daemon.wait_for_log(r'hold_htlcs.py initializing')
|
|
|
|
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
|
2019-05-21 14:16:00 +02:00
|
|
|
f1.result()
|
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("without DEVELOPER=1, gossip v slow")
|
2019-05-21 14:16:00 +02:00
|
|
|
def test_htlc_accepted_hook_forward_restart(node_factory, executor):
|
|
|
|
"""l2 restarts while it is pondering what to do with an HTLC.
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[
|
|
|
|
{'may_reconnect': True},
|
2019-08-03 07:10:40 +02:00
|
|
|
{'may_reconnect': True,
|
|
|
|
'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_htlcs.py')},
|
2019-05-21 14:16:00 +02:00
|
|
|
{'may_reconnect': True},
|
|
|
|
], wait_for_announce=True)
|
|
|
|
|
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
|
|
|
i1 = l3.rpc.invoice(amount_msat=1000, label="direct", description="desc")['bolt11']
|
2022-04-01 06:13:33 +02:00
|
|
|
f1 = executor.submit(l1.dev_pay, i1, use_shadow=False)
|
2019-05-21 14:16:00 +02:00
|
|
|
|
|
|
|
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
|
2019-06-29 18:25:45 +02:00
|
|
|
|
2019-08-10 07:24:55 +02:00
|
|
|
needle = l2.daemon.logsearch_start
|
2019-05-21 14:16:00 +02:00
|
|
|
l2.restart()
|
|
|
|
|
2019-08-10 07:24:55 +02:00
|
|
|
# Now it should try again, *after* initializing.
|
|
|
|
# This may be before "Server started with public key" swallowed by restart()
|
|
|
|
l2.daemon.logsearch_start = needle + 1
|
|
|
|
l2.daemon.wait_for_log(r'hold_htlcs.py initializing')
|
|
|
|
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
|
|
|
|
|
2019-06-29 18:25:45 +02:00
|
|
|
# Grab the file where the plugin wrote the onion and read it in for some
|
|
|
|
# additional checks
|
|
|
|
logline = l2.daemon.wait_for_log(r'Onion written to')
|
|
|
|
fname = re.search(r'Onion written to (.*\.json)', logline).group(1)
|
|
|
|
onion = json.load(open(fname))
|
2019-12-12 11:57:07 +01:00
|
|
|
assert onion['type'] == 'tlv'
|
|
|
|
assert re.match(r'^11020203e80401..0608................$', onion['payload'])
|
2019-06-29 18:25:45 +02:00
|
|
|
assert len(onion['shared_secret']) == 64
|
2022-06-20 12:22:10 +02:00
|
|
|
assert onion['forward_msat'] == Millisatoshi(1000)
|
2019-06-29 18:25:45 +02:00
|
|
|
assert len(onion['next_onion']) == 2 * (1300 + 32 + 33 + 1)
|
|
|
|
|
2019-05-21 14:16:00 +02:00
|
|
|
f1.result()
|
2019-06-04 09:42:43 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_warning_notification(node_factory):
|
|
|
|
""" test 'warning' notifications
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/pretend_badlog.py')}, allow_broken_log=True)
|
2019-06-04 09:42:43 +02:00
|
|
|
|
|
|
|
# 1. test 'warn' level
|
|
|
|
event = "Test warning notification(for unusual event)"
|
|
|
|
l1.rpc.call('pretendbad', {'event': event, 'level': 'warn'})
|
|
|
|
|
|
|
|
# ensure an unusual log_entry was produced by 'pretendunusual' method
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Test warning notification\\(for unusual event\\)')
|
2019-06-04 09:42:43 +02:00
|
|
|
|
|
|
|
# now wait for notification
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Received warning')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: level: warn')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: time: *')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: source: plugin-pretend_badlog.py')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: log: Test warning notification\\(for unusual event\\)')
|
2019-06-04 09:42:43 +02:00
|
|
|
|
|
|
|
# 2. test 'error' level, steps like above
|
|
|
|
event = "Test warning notification(for broken event)"
|
|
|
|
l1.rpc.call('pretendbad', {'event': event, 'level': 'error'})
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log(r'\*\*BROKEN\*\* plugin-pretend_badlog.py: Test warning notification\(for broken event\)')
|
2019-06-04 09:42:43 +02:00
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Received warning')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: level: error')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: time: *')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: source: plugin-pretend_badlog.py')
|
|
|
|
l1.daemon.wait_for_log('plugin-pretend_badlog.py: log: Test warning notification\\(for broken event\\)')
|
2019-07-21 14:08:33 +02:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs to deactivate shadow routing")
|
2019-07-21 14:08:33 +02:00
|
|
|
def test_invoice_payment_notification(node_factory):
|
|
|
|
"""
|
|
|
|
Test the 'invoice_payment' notification
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
opts = [{}, {"plugin": os.path.join(os.getcwd(), "contrib/plugins/helloworld.py")}]
|
2019-07-21 14:08:33 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=opts)
|
|
|
|
|
|
|
|
msats = 12345
|
|
|
|
preimage = '1' * 64
|
|
|
|
label = "a_descriptive_label"
|
|
|
|
inv1 = l2.rpc.invoice(msats, label, 'description', preimage=preimage)
|
2022-04-01 06:13:33 +02:00
|
|
|
l1.dev_pay(inv1['bolt11'], use_shadow=False)
|
2019-07-21 14:08:33 +02:00
|
|
|
|
|
|
|
l2.daemon.wait_for_log(r"Received invoice_payment event for label {},"
|
|
|
|
" preimage {}, and amount of {}msat"
|
|
|
|
.format(label, preimage, msats))
|
2019-07-25 15:57:53 +02:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs to deactivate shadow routing")
|
2020-04-21 03:04:01 +02:00
|
|
|
def test_invoice_creation_notification(node_factory):
|
|
|
|
"""
|
|
|
|
Test the 'invoice_creation' notification
|
|
|
|
"""
|
|
|
|
opts = [{}, {"plugin": os.path.join(os.getcwd(), "contrib/plugins/helloworld.py")}]
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts=opts)
|
|
|
|
|
|
|
|
msats = 12345
|
|
|
|
preimage = '1' * 64
|
|
|
|
label = "a_descriptive_label"
|
|
|
|
l2.rpc.invoice(msats, label, 'description', preimage=preimage)
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log(r"Received invoice_creation event for label {},"
|
|
|
|
" preimage {}, and amount of {}msat"
|
|
|
|
.format(label, preimage, msats))
|
|
|
|
|
|
|
|
|
2019-07-25 15:57:53 +02:00
|
|
|
def test_channel_opened_notification(node_factory):
|
|
|
|
"""
|
|
|
|
Test the 'channel_opened' notification sent at channel funding success.
|
|
|
|
"""
|
2019-08-03 07:10:40 +02:00
|
|
|
opts = [{}, {"plugin": os.path.join(os.getcwd(), "tests/plugins/misc_notifications.py")}]
|
2019-07-25 15:57:53 +02:00
|
|
|
amount = 10**6
|
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=amount,
|
|
|
|
opts=opts)
|
2020-08-07 05:09:37 +02:00
|
|
|
|
|
|
|
# Might have already passed, so reset start.
|
|
|
|
l2.daemon.logsearch_start = 0
|
2019-07-25 15:57:53 +02:00
|
|
|
l2.daemon.wait_for_log(r"A channel was opened to us by {}, "
|
|
|
|
"with an amount of {}*"
|
|
|
|
.format(l1.info["id"], amount))
|
2019-06-15 20:29:09 +02:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("needs DEVELOPER=1")
|
2019-06-15 20:29:09 +02:00
|
|
|
def test_forward_event_notification(node_factory, bitcoind, executor):
|
|
|
|
""" test 'forward_event' notifications
|
|
|
|
"""
|
|
|
|
amount = 10**8
|
|
|
|
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
|
2021-02-06 12:18:36 +01:00
|
|
|
plugin = os.path.join(
|
|
|
|
os.path.dirname(__file__),
|
|
|
|
'plugins',
|
|
|
|
'forward_payment_status.py'
|
|
|
|
)
|
2020-08-07 05:14:55 +02:00
|
|
|
l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=[
|
2019-06-15 20:29:09 +02:00
|
|
|
{},
|
2021-02-06 12:18:36 +01:00
|
|
|
{'plugin': plugin},
|
2020-08-07 05:14:55 +02:00
|
|
|
{},
|
|
|
|
{},
|
|
|
|
{'disconnect': disconnects}])
|
|
|
|
|
2021-02-24 02:01:14 +01:00
|
|
|
l1.openchannel(l2, confirm=False, wait_for_announce=False)
|
|
|
|
l2.openchannel(l3, confirm=False, wait_for_announce=False)
|
|
|
|
l2.openchannel(l4, confirm=False, wait_for_announce=False)
|
|
|
|
l2.openchannel(l5, confirm=False, wait_for_announce=False)
|
|
|
|
|
|
|
|
# Generate 5, then make sure everyone is up to date before
|
|
|
|
# last one, otherwise they might think it's in the future!
|
|
|
|
bitcoind.generate_block(5)
|
2021-01-25 14:58:07 +01:00
|
|
|
sync_blockheight(bitcoind, [l1, l2, l3, l4, l5])
|
2021-02-24 02:01:14 +01:00
|
|
|
bitcoind.generate_block(1)
|
2019-06-15 20:29:09 +02:00
|
|
|
|
|
|
|
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 8)
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l3.rpc.invoice(amount, "first", "desc")
|
|
|
|
payment_hash13 = inv['payment_hash']
|
2019-06-15 20:29:09 +02:00
|
|
|
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
|
|
|
|
|
|
|
|
# status: offered -> settled
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash13, payment_secret=inv['payment_secret'])
|
2019-06-15 20:29:09 +02:00
|
|
|
l1.rpc.waitsendpay(payment_hash13)
|
|
|
|
|
|
|
|
# status: offered -> failed
|
|
|
|
route = l1.rpc.getroute(l4.info['id'], amount, 1)['route']
|
|
|
|
payment_hash14 = "f" * 64
|
|
|
|
with pytest.raises(RpcError):
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash14, payment_secret="f" * 64)
|
2019-06-15 20:29:09 +02:00
|
|
|
l1.rpc.waitsendpay(payment_hash14)
|
|
|
|
|
|
|
|
# status: offered -> local_failed
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l5.rpc.invoice(amount, 'onchain_timeout', 'desc')
|
|
|
|
payment_hash15 = inv['payment_hash']
|
2019-06-15 20:29:09 +02:00
|
|
|
fee = amount * 10 // 1000000 + 1
|
|
|
|
c12 = l1.get_channel_scid(l2)
|
|
|
|
c25 = l2.get_channel_scid(l5)
|
2022-06-20 12:22:09 +02:00
|
|
|
route = [{'amount_msat': amount + fee - 1,
|
2019-06-15 20:29:09 +02:00
|
|
|
'id': l2.info['id'],
|
|
|
|
'delay': 12,
|
|
|
|
'channel': c12},
|
2022-06-20 12:22:09 +02:00
|
|
|
{'amount_msat': amount - 1,
|
2019-06-15 20:29:09 +02:00
|
|
|
'id': l5.info['id'],
|
|
|
|
'delay': 5,
|
|
|
|
'channel': c25}]
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
executor.submit(l1.rpc.sendpay, route, payment_hash15, payment_secret=inv['payment_secret'])
|
2019-06-15 20:29:09 +02:00
|
|
|
|
|
|
|
l5.daemon.wait_for_log('permfail')
|
|
|
|
l5.wait_for_channel_onchain(l2.info['id'])
|
|
|
|
l2.bitcoin.generate_block(1)
|
|
|
|
l2.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
l5.daemon.wait_for_log(' to ONCHAIN')
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
|
|
|
|
bitcoind.generate_block(6)
|
|
|
|
|
|
|
|
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
|
|
|
|
'THEIR_UNILATERAL/OUR_HTLC')
|
|
|
|
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
l2.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
|
|
|
|
l5.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
|
|
|
|
|
|
|
|
bitcoind.generate_block(100)
|
|
|
|
sync_blockheight(bitcoind, [l2])
|
|
|
|
|
2019-08-27 03:06:38 +02:00
|
|
|
stats = l2.rpc.listforwards()['forwards']
|
|
|
|
assert len(stats) == 3
|
|
|
|
plugin_stats = l2.rpc.call('listforwards_plugin')['forwards']
|
|
|
|
assert len(plugin_stats) == 6
|
|
|
|
|
2022-09-19 02:49:53 +02:00
|
|
|
# We don't have payment_hash in listforwards any more.
|
|
|
|
for p in plugin_stats:
|
|
|
|
del p['payment_hash']
|
|
|
|
|
2019-08-27 03:06:38 +02:00
|
|
|
# use stats to build what we expect went to plugin.
|
|
|
|
expect = stats[0].copy()
|
|
|
|
# First event won't have conclusion.
|
|
|
|
del expect['resolved_time']
|
2022-09-19 02:49:53 +02:00
|
|
|
del expect['out_htlc_id']
|
2019-08-27 03:06:38 +02:00
|
|
|
expect['status'] = 'offered'
|
|
|
|
assert plugin_stats[0] == expect
|
|
|
|
expect = stats[0].copy()
|
2022-09-19 02:49:53 +02:00
|
|
|
del expect['out_htlc_id']
|
2019-08-27 03:06:38 +02:00
|
|
|
assert plugin_stats[1] == expect
|
|
|
|
|
|
|
|
expect = stats[1].copy()
|
|
|
|
del expect['resolved_time']
|
2022-09-19 02:49:53 +02:00
|
|
|
del expect['out_htlc_id']
|
2019-08-27 03:06:38 +02:00
|
|
|
expect['status'] = 'offered'
|
|
|
|
assert plugin_stats[2] == expect
|
|
|
|
expect = stats[1].copy()
|
2022-09-19 02:49:53 +02:00
|
|
|
del expect['out_htlc_id']
|
2019-08-27 03:06:38 +02:00
|
|
|
assert plugin_stats[3] == expect
|
|
|
|
|
|
|
|
expect = stats[2].copy()
|
|
|
|
del expect['failcode']
|
|
|
|
del expect['failreason']
|
2022-09-19 02:49:53 +02:00
|
|
|
del expect['out_htlc_id']
|
2019-08-27 03:06:38 +02:00
|
|
|
expect['status'] = 'offered'
|
|
|
|
assert plugin_stats[4] == expect
|
|
|
|
expect = stats[2].copy()
|
2022-09-19 02:49:53 +02:00
|
|
|
del expect['out_htlc_id']
|
2019-08-27 03:06:38 +02:00
|
|
|
assert plugin_stats[5] == expect
|
2019-08-03 08:25:12 +02:00
|
|
|
|
|
|
|
|
2019-06-25 10:53:25 +02:00
|
|
|
def test_sendpay_notifications(node_factory, bitcoind):
|
|
|
|
""" test 'sendpay_success' and 'sendpay_failure' notifications
|
|
|
|
"""
|
|
|
|
amount = 10**8
|
|
|
|
opts = [{'plugin': os.path.join(os.getcwd(), 'tests/plugins/sendpay_notifications.py')},
|
|
|
|
{},
|
|
|
|
{'may_reconnect': False}]
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=opts, wait_for_announce=True)
|
|
|
|
chanid23 = l2.get_channel_scid(l3)
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
inv1 = l3.rpc.invoice(amount, "first", "desc")
|
|
|
|
payment_hash1 = inv1['payment_hash']
|
|
|
|
inv2 = l3.rpc.invoice(amount, "second", "desc")
|
|
|
|
payment_hash2 = inv2['payment_hash']
|
2019-06-25 10:53:25 +02:00
|
|
|
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash1, payment_secret=inv1['payment_secret'])
|
2019-06-25 10:53:25 +02:00
|
|
|
response1 = l1.rpc.waitsendpay(payment_hash1)
|
|
|
|
|
|
|
|
l2.rpc.close(chanid23, 1)
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash2, payment_secret=inv2['payment_secret'])
|
2019-06-25 10:53:25 +02:00
|
|
|
with pytest.raises(RpcError) as err:
|
|
|
|
l1.rpc.waitsendpay(payment_hash2)
|
|
|
|
|
|
|
|
results = l1.rpc.call('listsendpays_plugin')
|
|
|
|
assert len(results['sendpay_success']) == 1
|
|
|
|
assert len(results['sendpay_failure']) == 1
|
|
|
|
|
|
|
|
assert results['sendpay_success'][0] == response1
|
|
|
|
assert results['sendpay_failure'][0] == err.value.error
|
2019-09-09 02:16:53 +02:00
|
|
|
|
|
|
|
|
2020-01-07 17:15:20 +01:00
|
|
|
def test_sendpay_notifications_nowaiter(node_factory):
|
|
|
|
opts = [{'plugin': os.path.join(os.getcwd(), 'tests/plugins/sendpay_notifications.py')},
|
|
|
|
{},
|
|
|
|
{'may_reconnect': False}]
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=opts, wait_for_announce=True)
|
|
|
|
chanid23 = l2.get_channel_scid(l3)
|
|
|
|
amount = 10**8
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
inv1 = l3.rpc.invoice(amount, "first", "desc")
|
|
|
|
payment_hash1 = inv1['payment_hash']
|
|
|
|
inv2 = l3.rpc.invoice(amount, "second", "desc")
|
|
|
|
payment_hash2 = inv2['payment_hash']
|
2020-01-07 17:15:20 +01:00
|
|
|
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash1, payment_secret=inv1['payment_secret'])
|
2020-01-07 17:15:20 +01:00
|
|
|
l1.daemon.wait_for_log(r'Received a sendpay_success')
|
|
|
|
|
|
|
|
l2.rpc.close(chanid23, 1)
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash2, payment_secret=inv2['payment_secret'])
|
2020-01-07 17:15:20 +01:00
|
|
|
l1.daemon.wait_for_log(r'Received a sendpay_failure')
|
|
|
|
|
|
|
|
results = l1.rpc.call('listsendpays_plugin')
|
|
|
|
assert len(results['sendpay_success']) == 1
|
|
|
|
assert len(results['sendpay_failure']) == 1
|
|
|
|
|
|
|
|
|
2019-09-09 02:16:53 +02:00
|
|
|
def test_rpc_command_hook(node_factory):
|
2021-02-11 16:18:41 +01:00
|
|
|
"""Test the `rpc_command` hook chain"""
|
|
|
|
plugin = [
|
|
|
|
os.path.join(os.getcwd(), "tests/plugins/rpc_command_1.py"),
|
|
|
|
os.path.join(os.getcwd(), "tests/plugins/rpc_command_2.py")
|
|
|
|
]
|
2019-09-09 02:16:53 +02:00
|
|
|
l1 = node_factory.get_node(options={"plugin": plugin})
|
|
|
|
|
2021-02-11 16:18:41 +01:00
|
|
|
# rpc_command_2 plugin restricts using "sendpay"
|
|
|
|
with pytest.raises(RpcError, match=r"rpc_command_2 cannot do this"):
|
2019-09-09 02:16:53 +02:00
|
|
|
l1.rpc.call("sendpay")
|
|
|
|
|
2021-02-11 16:18:41 +01:00
|
|
|
# Both plugins will replace calls made for the "invoice" command
|
|
|
|
# The first will win, for the second a warning should be logged
|
2019-09-09 02:16:53 +02:00
|
|
|
invoice = l1.rpc.invoice(10**6, "test_side", "test_input")
|
|
|
|
decoded = l1.rpc.decodepay(invoice["bolt11"])
|
2021-02-11 16:18:41 +01:00
|
|
|
assert decoded["description"] == "rpc_command_1 modified this description"
|
|
|
|
l1.daemon.wait_for_log("rpc_command hook 'invoice' already modified, ignoring.")
|
2019-09-09 02:16:53 +02:00
|
|
|
|
2021-06-16 03:07:17 +02:00
|
|
|
# Disable schema checking here!
|
|
|
|
schemas = l1.rpc.jsonschemas
|
|
|
|
l1.rpc.jsonschemas = {}
|
2021-02-11 16:18:41 +01:00
|
|
|
# rpc_command_1 plugin sends a custom response to "listfunds"
|
2019-09-09 02:16:53 +02:00
|
|
|
funds = l1.rpc.listfunds()
|
2021-02-11 16:18:41 +01:00
|
|
|
assert funds[0] == "Custom rpc_command_1 result"
|
2019-12-06 04:54:41 +01:00
|
|
|
|
2019-12-06 10:34:17 +01:00
|
|
|
# Test command redirection to a plugin
|
|
|
|
l1.rpc.call('help', [0])
|
|
|
|
|
2021-02-11 16:18:41 +01:00
|
|
|
# Check the 'already modified' warning is not logged on just 'continue'
|
|
|
|
assert not l1.daemon.is_in_log("rpc_command hook 'listfunds' already modified, ignoring.")
|
|
|
|
|
|
|
|
# Tests removing a chained hook in random order.
|
|
|
|
# Note: This will get flaky by design if theres a problem.
|
|
|
|
if bool(random.getrandbits(1)):
|
|
|
|
l1.rpc.plugin_stop('rpc_command_2.py')
|
|
|
|
l1.rpc.plugin_stop('rpc_command_1.py')
|
|
|
|
else:
|
|
|
|
l1.rpc.plugin_stop('rpc_command_1.py')
|
|
|
|
l1.rpc.plugin_stop('rpc_command_2.py')
|
2020-01-31 18:36:26 +01:00
|
|
|
|
2021-06-16 03:07:17 +02:00
|
|
|
l1.rpc.jsonschemas = schemas
|
|
|
|
|
2020-01-31 18:36:26 +01:00
|
|
|
|
|
|
|
def test_libplugin(node_factory):
|
|
|
|
"""Sanity checks for plugins made with libplugin"""
|
|
|
|
plugin = os.path.join(os.getcwd(), "tests/plugins/test_libplugin")
|
2020-08-06 02:30:51 +02:00
|
|
|
l1 = node_factory.get_node(options={"plugin": plugin,
|
2022-09-13 02:27:42 +02:00
|
|
|
'allow-deprecated-apis': False,
|
2022-12-12 05:14:15 +01:00
|
|
|
'log-level': 'io'},
|
|
|
|
allow_broken_log=True)
|
2020-01-31 18:36:26 +01:00
|
|
|
|
|
|
|
# Test startup
|
|
|
|
assert l1.daemon.is_in_log("test_libplugin initialised!")
|
2022-12-12 05:14:15 +01:00
|
|
|
assert l1.daemon.is_in_log("String name from datastore: NOT FOUND")
|
|
|
|
assert l1.daemon.is_in_log("Hex name from datastore: NOT FOUND")
|
|
|
|
|
|
|
|
# This will look on datastore for default, won't find it.
|
|
|
|
assert l1.rpc.call("helloworld") == {"hello": "NOT FOUND"}
|
|
|
|
l1.daemon.wait_for_log("get_ds_bin_done: NOT FOUND")
|
|
|
|
|
2020-01-31 18:36:26 +01:00
|
|
|
# Test dynamic startup
|
|
|
|
l1.rpc.plugin_stop(plugin)
|
2022-12-12 05:14:15 +01:00
|
|
|
# Non-string datastore value:
|
|
|
|
l1.rpc.datastore(["test_libplugin", "name"], hex="00010203")
|
2020-01-31 18:36:26 +01:00
|
|
|
l1.rpc.plugin_start(plugin)
|
|
|
|
l1.rpc.check("helloworld")
|
|
|
|
|
2022-09-12 23:19:12 +02:00
|
|
|
myname = os.path.splitext(os.path.basename(sys.argv[0]))[0]
|
|
|
|
|
2022-11-21 02:48:32 +01:00
|
|
|
# Note: getmanifest always uses numeric ids, since it doesn't know
|
|
|
|
# yet whether strings are allowed:
|
|
|
|
l1.daemon.wait_for_log(r"test_libplugin: [0-9]*\[OUT\]")
|
2022-09-12 23:19:11 +02:00
|
|
|
|
2022-12-12 05:14:15 +01:00
|
|
|
l1.daemon.wait_for_log("String name from datastore: NOT FOUND")
|
|
|
|
l1.daemon.wait_for_log("Hex name from datastore: 00010203")
|
|
|
|
|
2020-01-31 18:36:26 +01:00
|
|
|
# Test commands
|
2022-12-12 05:14:15 +01:00
|
|
|
assert l1.rpc.call("helloworld") == {"hello": "NOT FOUND"}
|
|
|
|
l1.daemon.wait_for_log("get_ds_bin_done: 00010203")
|
|
|
|
l1.daemon.wait_for_log("BROKEN.* Datastore gave nonstring result.*00010203")
|
2021-05-26 07:42:01 +02:00
|
|
|
assert l1.rpc.call("helloworld", {"name": "test"}) == {"hello": "test"}
|
2020-01-31 18:36:26 +01:00
|
|
|
l1.stop()
|
|
|
|
l1.daemon.opts["plugin"] = plugin
|
2022-12-12 05:14:15 +01:00
|
|
|
l1.daemon.opts["somearg"] = "test_opt"
|
2020-01-31 18:36:26 +01:00
|
|
|
l1.start()
|
2022-12-12 05:14:15 +01:00
|
|
|
assert l1.daemon.is_in_log("somearg = test_opt")
|
|
|
|
l1.rpc.datastore(["test_libplugin", "name"], "foobar", mode="must-replace")
|
|
|
|
|
|
|
|
assert l1.rpc.call("helloworld") == {"hello": "foobar"}
|
|
|
|
l1.daemon.wait_for_log("get_ds_bin_done: 666f6f626172")
|
|
|
|
|
2020-01-31 18:36:26 +01:00
|
|
|
# But param takes over!
|
2021-05-26 07:42:01 +02:00
|
|
|
assert l1.rpc.call("helloworld", {"name": "test"}) == {"hello": "test"}
|
2020-02-01 16:00:32 +01:00
|
|
|
|
2022-09-12 23:19:12 +02:00
|
|
|
# Test hooks and notifications (add plugin, so we can test hook id)
|
2022-09-13 02:27:42 +02:00
|
|
|
l2 = node_factory.get_node(options={"plugin": plugin, 'log-level': 'io'})
|
2020-02-01 16:00:32 +01:00
|
|
|
l2.connect(l1)
|
2023-01-03 05:22:42 +01:00
|
|
|
l2.daemon.wait_for_log(r': "{}:connect#[0-9]*/cln:peer_connected#[0-9]*"\[OUT\]'.format(myname))
|
2022-09-12 23:19:12 +02:00
|
|
|
|
2020-03-04 05:20:32 +01:00
|
|
|
l1.daemon.wait_for_log("{} peer_connected".format(l2.info["id"]))
|
2020-02-01 16:00:32 +01:00
|
|
|
l1.daemon.wait_for_log("{} connected".format(l2.info["id"]))
|
2020-02-01 18:25:49 +01:00
|
|
|
|
|
|
|
# Test RPC calls FIXME: test concurrent ones ?
|
|
|
|
assert l1.rpc.call("testrpc") == l1.rpc.getinfo()
|
2020-01-29 22:33:10 +01:00
|
|
|
|
2020-08-06 02:30:51 +02:00
|
|
|
# Make sure deprecated options nor commands are mentioned.
|
|
|
|
with pytest.raises(RpcError, match=r'Command "testrpc-deprecated" is deprecated'):
|
|
|
|
l1.rpc.call('testrpc-deprecated')
|
|
|
|
|
|
|
|
assert not any([h['command'] == 'testrpc-deprecated'
|
|
|
|
for h in l1.rpc.help()['help']])
|
|
|
|
with pytest.raises(RpcError, match=r"Deprecated command.*testrpc-deprecated"):
|
|
|
|
l1.rpc.help('testrpc-deprecated')
|
|
|
|
|
2022-12-12 05:14:15 +01:00
|
|
|
assert 'somearg-deprecated' not in str(l1.rpc.listconfigs())
|
2020-08-06 02:30:51 +02:00
|
|
|
|
|
|
|
l1.stop()
|
2022-12-12 05:14:15 +01:00
|
|
|
l1.daemon.opts["somearg-deprecated"] = "test_opt"
|
2020-08-06 02:30:51 +02:00
|
|
|
|
2022-07-21 06:39:30 +02:00
|
|
|
l1.daemon.start(wait_for_initialized=False, stderr_redir=True)
|
2022-06-26 06:42:01 +02:00
|
|
|
# Will exit with failure code.
|
|
|
|
assert l1.daemon.wait() == 1
|
2022-12-12 05:14:15 +01:00
|
|
|
assert l1.daemon.is_in_stderr(r"somearg-deprecated: deprecated option")
|
2020-08-06 02:30:51 +02:00
|
|
|
|
2022-12-12 05:14:15 +01:00
|
|
|
del l1.daemon.opts["somearg-deprecated"]
|
2020-08-06 02:30:51 +02:00
|
|
|
l1.start()
|
|
|
|
|
|
|
|
|
|
|
|
def test_libplugin_deprecated(node_factory):
|
|
|
|
"""Sanity checks for plugins made with libplugin using deprecated args"""
|
|
|
|
plugin = os.path.join(os.getcwd(), "tests/plugins/test_libplugin")
|
|
|
|
l1 = node_factory.get_node(options={"plugin": plugin,
|
2022-12-12 05:14:15 +01:00
|
|
|
'somearg-deprecated': 'test_opt depr',
|
2020-08-06 02:30:51 +02:00
|
|
|
'allow-deprecated-apis': True})
|
|
|
|
|
2022-12-12 05:14:15 +01:00
|
|
|
assert l1.daemon.is_in_log("somearg = test_opt depr")
|
2020-08-06 02:30:51 +02:00
|
|
|
l1.rpc.help('testrpc-deprecated')
|
|
|
|
assert l1.rpc.call("testrpc-deprecated") == l1.rpc.getinfo()
|
|
|
|
|
2020-01-29 22:33:10 +01:00
|
|
|
|
2020-04-02 15:12:46 +02:00
|
|
|
@unittest.skipIf(
|
|
|
|
not DEVELOPER or DEPRECATED_APIS, "needs LIGHTNINGD_DEV_LOG_IO and new API"
|
|
|
|
)
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-01-29 22:33:10 +01:00
|
|
|
def test_plugin_feature_announce(node_factory):
|
|
|
|
"""Check that features registered by plugins show up in messages.
|
|
|
|
|
|
|
|
l1 is the node under test, l2 only serves as the counterparty for a
|
|
|
|
channel to check the featurebits in the `channel_announcement`. The plugin
|
|
|
|
registers an individual featurebit for each of the locations we can stash
|
|
|
|
feature bits in:
|
|
|
|
|
2020-05-19 22:41:24 +02:00
|
|
|
- 1 << 201 for `init` messages
|
|
|
|
- 1 << 203 for `node_announcement`
|
|
|
|
- 1 << 205 for bolt11 invoices
|
2020-01-29 22:33:10 +01:00
|
|
|
|
|
|
|
"""
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/feature-test.py')
|
|
|
|
l1, l2 = node_factory.line_graph(
|
|
|
|
2, opts=[{'plugin': plugin, 'log-level': 'io'}, {}],
|
|
|
|
wait_for_announce=True
|
|
|
|
)
|
|
|
|
|
2020-12-15 01:05:12 +01:00
|
|
|
extra = []
|
|
|
|
if l1.config('experimental-dual-fund'):
|
2021-05-06 21:11:11 +02:00
|
|
|
extra.append(21) # option-anchor-outputs
|
|
|
|
extra.append(29) # option-dual-fund
|
2020-12-15 01:05:12 +01:00
|
|
|
|
2020-01-29 22:33:10 +01:00
|
|
|
# Check the featurebits we've set in the `init` message from
|
2020-05-19 22:41:24 +02:00
|
|
|
# feature-test.py.
|
2022-03-07 19:31:36 +01:00
|
|
|
assert l1.daemon.is_in_log(r'\[OUT\] 001000022100....{}'
|
2020-12-15 01:05:12 +01:00
|
|
|
.format(expected_peer_features(extra=[201] + extra)))
|
2020-01-29 22:33:10 +01:00
|
|
|
|
|
|
|
# Check the invoice featurebit we set in feature-test.py
|
|
|
|
inv = l1.rpc.invoice(123, 'lbl', 'desc')['bolt11']
|
|
|
|
details = Invoice.decode(inv)
|
2020-05-19 22:41:24 +02:00
|
|
|
assert(details.featurebits.int & (1 << 205) != 0)
|
2020-01-29 22:33:10 +01:00
|
|
|
|
|
|
|
# Check the featurebit set in the `node_announcement`
|
|
|
|
node = l1.rpc.listnodes(l1.info['id'])['nodes'][0]
|
2020-12-15 01:05:12 +01:00
|
|
|
assert node['features'] == expected_node_features(extra=[203] + extra)
|
2020-02-06 17:35:18 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_hook_chaining(node_factory):
|
|
|
|
"""Check that hooks are called in order and the chain exits correctly
|
|
|
|
|
|
|
|
We start two nodes, l2 will have two plugins registering the same hook
|
|
|
|
(`htlc_accepted`) but handle different cases:
|
|
|
|
|
|
|
|
- the `odd` plugin only handles the "AA"*32 preimage
|
|
|
|
- the `even` plugin only handles the "BB"*32 preimage
|
|
|
|
|
|
|
|
We check that plugins are called in the order they are registering the
|
|
|
|
hook, and that they exit the call chain as soon as one plugin returns a
|
|
|
|
result that isn't `continue`. On exiting the chain the remaining plugins
|
|
|
|
are not called. If no plugin exits the chain we continue to handle
|
|
|
|
internally as usual.
|
|
|
|
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(2)
|
|
|
|
|
|
|
|
# Start the plugins manually instead of specifying them on the command
|
|
|
|
# line, otherwise we cannot guarantee the order in which the hooks are
|
|
|
|
# registered.
|
|
|
|
p1 = os.path.join(os.path.dirname(__file__), "plugins/hook-chain-odd.py")
|
|
|
|
p2 = os.path.join(os.path.dirname(__file__), "plugins/hook-chain-even.py")
|
|
|
|
l2.rpc.plugin_start(p1)
|
|
|
|
l2.rpc.plugin_start(p2)
|
|
|
|
|
|
|
|
preimage1 = b'\xAA' * 32
|
|
|
|
preimage2 = b'\xBB' * 32
|
|
|
|
preimage3 = b'\xCC' * 32
|
|
|
|
hash1 = sha256(preimage1).hexdigest()
|
|
|
|
hash2 = sha256(preimage2).hexdigest()
|
|
|
|
hash3 = sha256(preimage3).hexdigest()
|
|
|
|
|
|
|
|
inv = l2.rpc.invoice(123, 'odd', "Odd payment handled by the first plugin",
|
|
|
|
preimage="AA" * 32)['bolt11']
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
# The first plugin will handle this, the second one should not be called.
|
|
|
|
assert(l2.daemon.is_in_log(
|
|
|
|
r'plugin-hook-chain-odd.py: htlc_accepted called for payment_hash {}'.format(hash1)
|
|
|
|
))
|
|
|
|
assert(not l2.daemon.is_in_log(
|
|
|
|
r'plugin-hook-chain-even.py: htlc_accepted called for payment_hash {}'.format(hash1)
|
|
|
|
))
|
|
|
|
|
|
|
|
# The second run is with a payment_hash that `hook-chain-even.py` knows
|
|
|
|
# about. `hook-chain-odd.py` is called, it returns a `continue`, and then
|
|
|
|
# `hook-chain-even.py` resolves it.
|
|
|
|
inv = l2.rpc.invoice(
|
|
|
|
123, 'even', "Even payment handled by the second plugin", preimage="BB" * 32
|
|
|
|
)['bolt11']
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
assert(l2.daemon.is_in_log(
|
|
|
|
r'plugin-hook-chain-odd.py: htlc_accepted called for payment_hash {}'.format(hash2)
|
|
|
|
))
|
|
|
|
assert(l2.daemon.is_in_log(
|
|
|
|
r'plugin-hook-chain-even.py: htlc_accepted called for payment_hash {}'.format(hash2)
|
|
|
|
))
|
|
|
|
|
|
|
|
# And finally an invoice that neither know about, so it should get settled
|
|
|
|
# by the internal invoice handling.
|
|
|
|
inv = l2.rpc.invoice(123, 'neither', "Neither plugin handles this",
|
|
|
|
preimage="CC" * 32)['bolt11']
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
assert(l2.daemon.is_in_log(
|
|
|
|
r'plugin-hook-chain-odd.py: htlc_accepted called for payment_hash {}'.format(hash3)
|
|
|
|
))
|
|
|
|
assert(l2.daemon.is_in_log(
|
|
|
|
r'plugin-hook-chain-even.py: htlc_accepted called for payment_hash {}'.format(hash3)
|
|
|
|
))
|
2020-02-06 19:00:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_bitcoin_backend(node_factory, bitcoind):
|
|
|
|
"""
|
|
|
|
This tests interaction with the Bitcoin backend, but not specifically bcli
|
|
|
|
"""
|
|
|
|
l1 = node_factory.get_node(start=False, options={"disable-plugin": "bcli"},
|
|
|
|
may_fail=True, allow_broken_log=True)
|
|
|
|
|
|
|
|
# We don't start if we haven't all the required methods registered.
|
|
|
|
plugin = os.path.join(os.getcwd(), "tests/plugins/bitcoin/part1.py")
|
|
|
|
l1.daemon.opts["plugin"] = plugin
|
2022-07-21 06:39:30 +02:00
|
|
|
l1.daemon.start(wait_for_initialized=False, stderr_redir=True)
|
2022-06-26 06:42:01 +02:00
|
|
|
l1.daemon.wait_for_log("Missing a Bitcoin plugin command")
|
|
|
|
# Will exit with failure code.
|
|
|
|
assert l1.daemon.wait() == 1
|
|
|
|
assert l1.daemon.is_in_stderr(r"Could not access the plugin for sendrawtransaction")
|
|
|
|
# Now we should start if all the commands are registered, even if they
|
|
|
|
# are registered by two distincts plugins.
|
|
|
|
del l1.daemon.opts["plugin"]
|
|
|
|
l1.daemon.opts["plugin-dir"] = os.path.join(os.getcwd(),
|
|
|
|
"tests/plugins/bitcoin/")
|
|
|
|
# (it fails when it tries to use them, so startup fails)
|
|
|
|
l1.daemon.start(wait_for_initialized=False)
|
|
|
|
l1.daemon.wait_for_log("All Bitcoin plugin commands registered")
|
|
|
|
assert l1.daemon.wait() == 1
|
2020-02-06 19:00:14 +01:00
|
|
|
|
|
|
|
# But restarting with just bcli is ok
|
|
|
|
del l1.daemon.opts["plugin-dir"]
|
|
|
|
del l1.daemon.opts["disable-plugin"]
|
|
|
|
l1.start()
|
|
|
|
assert l1.daemon.is_in_log("bitcoin-cli initialized and connected to"
|
|
|
|
" bitcoind")
|
|
|
|
|
|
|
|
|
|
|
|
def test_bcli(node_factory, bitcoind, chainparams):
|
|
|
|
"""
|
|
|
|
This tests the bcli plugin, used to gather Bitcoin data from a local
|
|
|
|
bitcoind.
|
|
|
|
Mostly sanity checks of the interface..
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.get_nodes(2)
|
|
|
|
|
|
|
|
# We cant stop it dynamically
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.plugin_stop("bcli")
|
|
|
|
|
|
|
|
# Failure case of feerate is tested in test_misc.py
|
2020-03-05 12:00:57 +01:00
|
|
|
estimates = l1.rpc.call("estimatefees")
|
|
|
|
for est in ["opening", "mutual_close", "unilateral_close", "delayed_to_us",
|
|
|
|
"htlc_resolution", "penalty", "min_acceptable",
|
|
|
|
"max_acceptable"]:
|
|
|
|
assert est in estimates
|
2020-02-06 19:00:14 +01:00
|
|
|
|
|
|
|
resp = l1.rpc.call("getchaininfo")
|
|
|
|
assert resp["chain"] == chainparams['name']
|
|
|
|
for field in ["headercount", "blockcount", "ibd"]:
|
|
|
|
assert field in resp
|
|
|
|
|
|
|
|
# We shouldn't get upset if we ask for an unknown-yet block
|
|
|
|
resp = l1.rpc.call("getrawblockbyheight", {"height": 500})
|
|
|
|
assert resp["blockhash"] is resp["block"] is None
|
|
|
|
resp = l1.rpc.call("getrawblockbyheight", {"height": 50})
|
|
|
|
assert resp["blockhash"] is not None and resp["blockhash"] is not None
|
|
|
|
# Some other bitcoind-failure cases for this call are covered in
|
|
|
|
# tests/test_misc.py
|
|
|
|
|
|
|
|
l1.fundwallet(10**5)
|
|
|
|
l1.connect(l2)
|
2020-08-25 06:46:35 +02:00
|
|
|
fc = l1.rpc.fundchannel(l2.info["id"], 10**4 * 3)
|
2020-08-07 03:29:47 +02:00
|
|
|
txo = l1.rpc.call("getutxout", {"txid": fc['txid'], "vout": fc['outnum']})
|
2020-08-25 06:46:35 +02:00
|
|
|
assert (Millisatoshi(txo["amount"]) == Millisatoshi(10**4 * 3 * 10**3)
|
2020-02-06 19:00:14 +01:00
|
|
|
and txo["script"].startswith("0020"))
|
|
|
|
l1.rpc.close(l2.info["id"])
|
|
|
|
# When output is spent, it should give us null !
|
2020-08-07 03:29:47 +02:00
|
|
|
wait_for(lambda: l1.rpc.call("getutxout", {
|
|
|
|
"txid": fc['txid'],
|
|
|
|
"vout": fc['outnum']
|
|
|
|
})['amount'] is None)
|
2020-02-06 19:00:14 +01:00
|
|
|
|
2020-09-08 05:22:41 +02:00
|
|
|
resp = l1.rpc.call("sendrawtransaction", {"tx": "dummy", "allowhighfees": False})
|
2020-02-06 19:00:14 +01:00
|
|
|
assert not resp["success"] and "decode failed" in resp["errmsg"]
|
2020-02-05 23:01:28 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_hook_crash(node_factory, executor, bitcoind):
|
|
|
|
"""Verify that we fail over if a plugin crashes while handling a hook.
|
|
|
|
|
|
|
|
We create a star topology, with l1 opening channels to the other nodes,
|
|
|
|
and then triggering the plugins on those nodes in order to exercise the
|
|
|
|
hook chain. p0 is the interesting plugin because as soon as it get called
|
|
|
|
for the htlc_accepted hook it'll crash on purpose. We should still make it
|
|
|
|
through the chain, the plugins should all be called and the payment should
|
|
|
|
still go through.
|
|
|
|
|
|
|
|
"""
|
|
|
|
p0 = os.path.join(os.path.dirname(__file__), "plugins/hook-crash.py")
|
|
|
|
p1 = os.path.join(os.path.dirname(__file__), "plugins/hook-chain-odd.py")
|
|
|
|
p2 = os.path.join(os.path.dirname(__file__), "plugins/hook-chain-even.py")
|
|
|
|
perm = [
|
|
|
|
(p0, p1, p2), # Crashing plugin is first in chain
|
|
|
|
(p1, p0, p2), # Crashing plugin is in the middle of the chain
|
|
|
|
(p1, p2, p0), # Crashing plugin is last in chain
|
|
|
|
]
|
|
|
|
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
nodes = [node_factory.get_node() for _ in perm]
|
|
|
|
|
|
|
|
# Start them in any order and we should still always end up with each
|
|
|
|
# plugin being called and ultimately the `pay` call should succeed:
|
|
|
|
for plugins, n in zip(perm, nodes):
|
|
|
|
for p in plugins:
|
|
|
|
n.rpc.plugin_start(p)
|
|
|
|
l1.openchannel(n, 10**6, confirm=False, wait_for_announce=False)
|
|
|
|
|
2022-01-30 04:37:23 +01:00
|
|
|
# Mine final openchannel tx first.
|
|
|
|
sync_blockheight(bitcoind, [l1] + nodes)
|
|
|
|
mine_funding_to_announce(bitcoind, [l1] + nodes, wait_for_mempool=1)
|
2020-02-05 23:01:28 +01:00
|
|
|
|
|
|
|
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2 * len(perm))
|
|
|
|
|
2020-04-09 17:36:04 +02:00
|
|
|
# Start an RPC call that should error once the plugin crashes.
|
|
|
|
f1 = executor.submit(nodes[0].rpc.hold_rpc_call)
|
|
|
|
|
2020-02-05 23:01:28 +01:00
|
|
|
futures = []
|
|
|
|
for n in nodes:
|
|
|
|
inv = n.rpc.invoice(123, "lbl", "desc")['bolt11']
|
|
|
|
futures.append(executor.submit(l1.rpc.pay, inv))
|
|
|
|
|
|
|
|
for n in nodes:
|
|
|
|
n.daemon.wait_for_logs([
|
|
|
|
r'Plugin is about to crash.',
|
|
|
|
r'plugin-hook-chain-odd.py: htlc_accepted called for payment_hash',
|
|
|
|
r'plugin-hook-chain-even.py: htlc_accepted called for payment_hash',
|
|
|
|
])
|
|
|
|
|
|
|
|
# Collect the results:
|
|
|
|
[f.result(TIMEOUT) for f in futures]
|
2020-04-02 06:05:47 +02:00
|
|
|
|
2020-04-09 17:36:04 +02:00
|
|
|
# Make sure the RPC call was terminated with the correct error
|
|
|
|
with pytest.raises(RpcError, match=r'Plugin terminated before replying'):
|
|
|
|
f1.result(10)
|
|
|
|
|
2020-04-02 06:05:47 +02:00
|
|
|
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-04-02 06:05:47 +02:00
|
|
|
def test_feature_set(node_factory):
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/show_feature_set.py')
|
|
|
|
l1 = node_factory.get_node(options={"plugin": plugin})
|
|
|
|
|
|
|
|
fs = l1.rpc.call('getfeatureset')
|
2020-12-15 01:06:35 +01:00
|
|
|
|
2021-05-06 21:11:11 +02:00
|
|
|
assert fs['init'] == expected_peer_features()
|
|
|
|
assert fs['node'] == expected_node_features()
|
2020-05-19 22:41:24 +02:00
|
|
|
assert fs['channel'] == expected_channel_features()
|
2020-04-02 06:05:47 +02:00
|
|
|
assert 'invoice' in fs
|
2020-04-07 09:10:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_replacement_payload(node_factory):
|
|
|
|
"""Test that htlc_accepted plugin hook can replace payload"""
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/replace_payload.py')
|
2020-05-26 13:16:34 +02:00
|
|
|
l1, l2 = node_factory.line_graph(
|
|
|
|
2,
|
|
|
|
opts=[{}, {"plugin": plugin}],
|
|
|
|
wait_for_announce=True
|
|
|
|
)
|
2020-04-07 09:10:30 +02:00
|
|
|
|
|
|
|
# Replace with an invalid payload.
|
|
|
|
l2.rpc.call('setpayload', ['0000'])
|
|
|
|
inv = l2.rpc.invoice(123, 'test_replacement_payload', 'test_replacement_payload')['bolt11']
|
|
|
|
with pytest.raises(RpcError, match=r"WIRE_INVALID_ONION_PAYLOAD \(reply from remote\)"):
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
# Replace with valid payload, but corrupt payment_secret
|
|
|
|
l2.rpc.call('setpayload', ['corrupt_secret'])
|
|
|
|
|
|
|
|
with pytest.raises(RpcError, match=r"WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS \(reply from remote\)"):
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
assert l2.daemon.wait_for_log("Attept to pay.*with wrong secret")
|
2020-05-05 03:13:28 +02:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("Requires dev_sign_last_tx")
|
2020-05-07 02:54:27 +02:00
|
|
|
def test_watchtower(node_factory, bitcoind, directory, chainparams):
|
|
|
|
"""Test watchtower hook.
|
|
|
|
|
|
|
|
l1 and l2 open a channel, make a couple of updates and then l1 cheats on
|
|
|
|
l2 while that one is offline. The watchtower plugin meanwhile stashes all
|
|
|
|
the penalty transactions and we release the one matching the offending
|
|
|
|
commitment transaction.
|
|
|
|
|
|
|
|
"""
|
|
|
|
p = os.path.join(os.path.dirname(__file__), "plugins/watchtower.py")
|
|
|
|
l1, l2 = node_factory.line_graph(
|
|
|
|
2,
|
|
|
|
opts=[{'may_fail': True, 'allow_broken_log': True}, {'plugin': p}]
|
|
|
|
)
|
2021-09-03 16:30:09 +02:00
|
|
|
channel_id = l1.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
|
2020-05-07 02:54:27 +02:00
|
|
|
|
|
|
|
# Force a new commitment
|
|
|
|
l1.rpc.pay(l2.rpc.invoice(25000000, 'lbl1', 'desc1')['bolt11'])
|
|
|
|
|
|
|
|
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
|
|
|
|
|
|
|
|
# Now make sure it is out of date
|
|
|
|
l1.rpc.pay(l2.rpc.invoice(25000000, 'lbl2', 'desc2')['bolt11'])
|
|
|
|
|
|
|
|
# l2 stops watching the chain, allowing the watchtower to react
|
|
|
|
l2.stop()
|
|
|
|
|
|
|
|
# Now l1 cheats
|
|
|
|
bitcoind.rpc.sendrawtransaction(tx)
|
|
|
|
time.sleep(1)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
wt_file = os.path.join(
|
|
|
|
l2.daemon.lightning_dir,
|
|
|
|
chainparams['name'],
|
|
|
|
'watchtower.csv'
|
|
|
|
)
|
|
|
|
|
|
|
|
cheat_tx = bitcoind.rpc.decoderawtransaction(tx)
|
2021-09-03 16:30:09 +02:00
|
|
|
lastcommitnum = 0
|
2020-05-07 02:54:27 +02:00
|
|
|
for l in open(wt_file, 'r'):
|
2021-09-03 16:30:09 +02:00
|
|
|
txid, penalty, channel_id_hook, commitnum = l.strip().split(', ')
|
|
|
|
assert lastcommitnum == int(commitnum)
|
|
|
|
assert channel_id_hook == channel_id
|
|
|
|
lastcommitnum += 1
|
2020-05-07 02:54:27 +02:00
|
|
|
if txid == cheat_tx['txid']:
|
|
|
|
# This one should succeed, since it is a response to the cheat_tx
|
|
|
|
bitcoind.rpc.sendrawtransaction(penalty)
|
|
|
|
break
|
|
|
|
|
|
|
|
# Need this to check that l2 gets the funds
|
|
|
|
penalty_meta = bitcoind.rpc.decoderawtransaction(penalty)
|
|
|
|
|
|
|
|
time.sleep(1)
|
|
|
|
bitcoind.generate_block(1)
|
|
|
|
|
|
|
|
# Make sure l2's normal penalty_tx doesn't reach the network
|
|
|
|
def mock_sendrawtransaction(tx):
|
|
|
|
print("NOT broadcasting", tx)
|
|
|
|
|
|
|
|
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', mock_sendrawtransaction)
|
|
|
|
|
|
|
|
# Restart l2, and it should continue where the watchtower left off:
|
|
|
|
l2.start()
|
|
|
|
|
|
|
|
# l2 will still try to broadcast its latest commitment tx, but it'll fail
|
|
|
|
# since l1 has cheated. All commitments share the same prefix, so look for
|
|
|
|
# that.
|
|
|
|
penalty_prefix = tx[:(4 + 1 + 36) * 2] # version, txin_count, first txin in hex
|
|
|
|
l2.daemon.wait_for_log(r'Expected error broadcasting tx {}'.format(penalty_prefix))
|
|
|
|
|
|
|
|
# Now make sure the penalty output ends up in our wallet
|
|
|
|
fund_txids = [o['txid'] for o in l2.rpc.listfunds()['outputs']]
|
|
|
|
assert(penalty_meta['txid'] in fund_txids)
|
|
|
|
|
|
|
|
|
2020-05-05 03:13:28 +02:00
|
|
|
def test_plugin_fail(node_factory):
|
|
|
|
"""Test that a plugin which fails (not during a command)"""
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/fail_by_itself.py')
|
|
|
|
l1 = node_factory.get_node(options={"plugin": plugin})
|
|
|
|
|
|
|
|
time.sleep(2)
|
|
|
|
# It should clean up!
|
|
|
|
assert 'failcmd' not in [h['command'] for h in l1.rpc.help()['help']]
|
2022-11-26 03:29:53 +01:00
|
|
|
# Can happen *before* the 'Server started with public key'
|
|
|
|
l1.daemon.logsearch_start = 0
|
2021-02-10 03:39:08 +01:00
|
|
|
l1.daemon.wait_for_log(r': exited during normal operation')
|
2020-05-05 03:13:28 +02:00
|
|
|
|
|
|
|
l1.rpc.plugin_start(plugin)
|
|
|
|
time.sleep(2)
|
|
|
|
# It should clean up!
|
|
|
|
assert 'failcmd' not in [h['command'] for h in l1.rpc.help()['help']]
|
2021-02-10 03:39:08 +01:00
|
|
|
l1.daemon.wait_for_log(r': exited during normal operation')
|
2020-03-19 00:46:17 +01:00
|
|
|
|
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("without DEVELOPER=1, gossip v slow")
|
2021-05-07 20:39:23 +02:00
|
|
|
@pytest.mark.openchannel('v1')
|
|
|
|
@pytest.mark.openchannel('v2')
|
2020-06-23 03:46:35 +02:00
|
|
|
def test_coin_movement_notices(node_factory, bitcoind, chainparams):
|
2021-12-01 16:32:55 +01:00
|
|
|
"""Verify that channel coin movements are triggered correctly. """
|
2020-03-19 00:46:17 +01:00
|
|
|
|
|
|
|
l1_l2_mvts = [
|
2022-06-19 09:19:11 +02:00
|
|
|
{'type': 'chain_mvt', 'credit_msat': 0, 'debit_msat': 0, 'tags': ['channel_open']},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 100001001, 'debit_msat': 0, 'tags': ['routed'], 'fees_msat': '1001msat'},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 0, 'debit_msat': 50000000, 'tags': ['routed'], 'fees_msat': '501msat'},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 100000000, 'debit_msat': 0, 'tags': ['invoice'], 'fees_msat': '0msat'},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 0, 'debit_msat': 50000000, 'tags': ['invoice'], 'fees_msat': '0msat'},
|
|
|
|
{'type': 'chain_mvt', 'credit_msat': 0, 'debit_msat': 100001001, 'tags': ['channel_close']},
|
2021-12-01 16:32:55 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
l2_l3_mvts = [
|
2022-06-19 09:19:11 +02:00
|
|
|
{'type': 'chain_mvt', 'credit_msat': 1000000000, 'debit_msat': 0, 'tags': ['channel_open', 'opener']},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 0, 'debit_msat': 100000000, 'tags': ['routed'], 'fees_msat': '1001msat'},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 50000501, 'debit_msat': 0, 'tags': ['routed'], 'fees_msat': '501msat'},
|
|
|
|
{'type': 'chain_mvt', 'credit_msat': 0, 'debit_msat': 950000501, 'tags': ['channel_close']},
|
2020-03-19 00:46:17 +01:00
|
|
|
]
|
|
|
|
|
2021-12-07 21:09:28 +01:00
|
|
|
l3_l2_mvts = [
|
2022-06-19 09:19:11 +02:00
|
|
|
{'type': 'chain_mvt', 'credit_msat': 0, 'debit_msat': 0, 'tags': ['channel_open']},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 100000000, 'debit_msat': 0, 'tags': ['invoice'], 'fees_msat': '0msat'},
|
|
|
|
{'type': 'channel_mvt', 'credit_msat': 0, 'debit_msat': 50000501, 'tags': ['invoice'], 'fees_msat': '501msat'},
|
|
|
|
{'type': 'chain_mvt', 'credit_msat': 0, 'debit_msat': 49999499, 'tags': ['channel_close']},
|
2021-12-07 21:09:28 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
coin_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
|
2020-03-19 00:46:17 +01:00
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[
|
2020-04-04 01:58:04 +02:00
|
|
|
{'may_reconnect': True},
|
2021-12-07 21:09:28 +01:00
|
|
|
{'may_reconnect': True, 'plugin': coin_plugin},
|
|
|
|
{'may_reconnect': True, 'plugin': coin_plugin},
|
2020-03-19 00:46:17 +01:00
|
|
|
], wait_for_announce=True)
|
|
|
|
|
2022-01-30 04:37:23 +01:00
|
|
|
mine_funding_to_announce(bitcoind, [l1, l2, l3])
|
2020-03-19 00:46:17 +01:00
|
|
|
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 4)
|
|
|
|
amount = 10**8
|
|
|
|
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l3.rpc.invoice(amount, "first", "desc")
|
|
|
|
payment_hash13 = inv['payment_hash']
|
2020-03-19 00:46:17 +01:00
|
|
|
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
|
|
|
|
|
|
|
|
# status: offered -> settled
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash13, payment_secret=inv['payment_secret'])
|
2020-03-19 00:46:17 +01:00
|
|
|
l1.rpc.waitsendpay(payment_hash13)
|
|
|
|
|
|
|
|
# status: offered -> failed
|
|
|
|
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
|
|
|
|
payment_hash13 = "f" * 64
|
|
|
|
with pytest.raises(RpcError):
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash13, payment_secret=inv['payment_secret'])
|
2020-03-19 00:46:17 +01:00
|
|
|
l1.rpc.waitsendpay(payment_hash13)
|
|
|
|
|
|
|
|
# go the other direction
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l1.rpc.invoice(amount // 2, "first", "desc")
|
|
|
|
payment_hash31 = inv['payment_hash']
|
2020-03-19 00:46:17 +01:00
|
|
|
route = l3.rpc.getroute(l1.info['id'], amount // 2, 1)['route']
|
2021-07-12 08:49:19 +02:00
|
|
|
l3.rpc.sendpay(route, payment_hash31, payment_secret=inv['payment_secret'])
|
2020-03-19 00:46:17 +01:00
|
|
|
l3.rpc.waitsendpay(payment_hash31)
|
|
|
|
|
|
|
|
# receive a payment (endpoint)
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l2.rpc.invoice(amount, "first", "desc")
|
|
|
|
payment_hash12 = inv['payment_hash']
|
2020-03-19 00:46:17 +01:00
|
|
|
route = l1.rpc.getroute(l2.info['id'], amount, 1)['route']
|
2021-07-12 08:49:19 +02:00
|
|
|
l1.rpc.sendpay(route, payment_hash12, payment_secret=inv['payment_secret'])
|
2020-03-19 00:46:17 +01:00
|
|
|
l1.rpc.waitsendpay(payment_hash12)
|
|
|
|
|
|
|
|
# send a payment (originator)
|
2021-07-12 08:49:19 +02:00
|
|
|
inv = l1.rpc.invoice(amount // 2, "second", "desc")
|
|
|
|
payment_hash21 = inv['payment_hash']
|
2020-03-19 00:46:17 +01:00
|
|
|
route = l2.rpc.getroute(l1.info['id'], amount // 2, 1)['route']
|
2021-07-12 08:49:19 +02:00
|
|
|
l2.rpc.sendpay(route, payment_hash21, payment_secret=inv['payment_secret'])
|
2020-03-19 00:46:17 +01:00
|
|
|
l2.rpc.waitsendpay(payment_hash21)
|
|
|
|
|
2020-04-04 01:58:04 +02:00
|
|
|
# restart to test index
|
|
|
|
l2.restart()
|
2020-04-22 01:51:13 +02:00
|
|
|
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in l2.rpc.listpeers()['peers']))
|
2020-04-04 01:58:04 +02:00
|
|
|
|
2020-04-22 01:51:13 +02:00
|
|
|
# close the channels down
|
2020-03-19 00:46:17 +01:00
|
|
|
chan1 = l2.get_channel_scid(l1)
|
|
|
|
chan3 = l2.get_channel_scid(l3)
|
|
|
|
chanid_1 = first_channel_id(l2, l1)
|
|
|
|
chanid_3 = first_channel_id(l2, l3)
|
|
|
|
|
|
|
|
l2.rpc.close(chan1)
|
2020-04-22 01:51:13 +02:00
|
|
|
l2.daemon.wait_for_logs([
|
|
|
|
' to CLOSINGD_COMPLETE',
|
|
|
|
'sendrawtx exit 0',
|
|
|
|
])
|
2020-03-19 00:46:17 +01:00
|
|
|
assert account_balance(l2, chanid_1) == 100001001
|
|
|
|
bitcoind.generate_block(6)
|
|
|
|
sync_blockheight(bitcoind, [l2])
|
|
|
|
l2.daemon.wait_for_log('{}.*FUNDING_TRANSACTION/FUNDING_OUTPUT->MUTUAL_CLOSE depth'.format(l1.info['id']))
|
|
|
|
|
|
|
|
l2.rpc.close(chan3)
|
2020-04-22 01:51:13 +02:00
|
|
|
l2.daemon.wait_for_logs([
|
|
|
|
' to CLOSINGD_COMPLETE',
|
|
|
|
'sendrawtx exit 0',
|
|
|
|
])
|
2020-03-19 00:46:17 +01:00
|
|
|
assert account_balance(l2, chanid_3) == 950000501
|
|
|
|
bitcoind.generate_block(6)
|
|
|
|
sync_blockheight(bitcoind, [l2])
|
|
|
|
l2.daemon.wait_for_log('{}.*FUNDING_TRANSACTION/FUNDING_OUTPUT->MUTUAL_CLOSE depth'.format(l3.info['id']))
|
2021-12-07 21:09:28 +01:00
|
|
|
l3.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
|
2020-03-19 00:46:17 +01:00
|
|
|
|
|
|
|
# Ending channel balance should be zero
|
|
|
|
assert account_balance(l2, chanid_1) == 0
|
|
|
|
assert account_balance(l2, chanid_3) == 0
|
|
|
|
|
|
|
|
# Verify we recorded all the movements we expect
|
2021-12-07 21:09:28 +01:00
|
|
|
check_coin_moves(l3, chanid_3, l3_l2_mvts, chainparams)
|
2020-06-23 03:46:35 +02:00
|
|
|
check_coin_moves(l2, chanid_1, l1_l2_mvts, chainparams)
|
|
|
|
check_coin_moves(l2, chanid_3, l2_l3_mvts, chainparams)
|
2020-07-18 08:32:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_3847_repro(node_factory, bitcoind):
|
|
|
|
"""Reproduces the issue in #3847: duplicate response from plugin
|
|
|
|
|
|
|
|
l2 holds on to HTLCs until the deadline expires. Then we allow them
|
|
|
|
through and either should terminate the payment attempt, and the second
|
|
|
|
would return a redundant result.
|
|
|
|
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[
|
|
|
|
{},
|
|
|
|
{},
|
|
|
|
{
|
|
|
|
'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_htlcs.py'),
|
|
|
|
'hold-time': 11,
|
|
|
|
'hold-result': 'fail',
|
|
|
|
},
|
|
|
|
], wait_for_announce=True)
|
|
|
|
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 4)
|
|
|
|
|
|
|
|
# Amount sufficient to trigger the presplit modifier
|
|
|
|
amt = 20 * 1000 * 1000
|
|
|
|
|
|
|
|
i1 = l3.rpc.invoice(
|
lightningd: change `msatoshi` args to `amount_msat`.
This is consistent with our output changes, and increases consistency.
It also keeps future sanity checks happy, that we only use JSON msat
helpers with '_msat' fields.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Changed: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice`: `msatoshi` argument is now called `amount_msat` to match other fields.
Changelog-Deprecated: JSON-RPC: `invoice`, `sendonion`, `sendpay`, `pay`, `keysend`, `fetchinvoice`, `sendinvoice` `msatoshi` (use `amount_msat`)
2022-06-19 09:20:11 +02:00
|
|
|
amount_msat=amt, label="direct", description="desc"
|
2020-07-18 08:32:50 +02:00
|
|
|
)['bolt11']
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.pay(i1, retry_for=10)
|
|
|
|
|
|
|
|
# We wait for at least two parts, and the bug would cause the `pay` plugin
|
|
|
|
# to crash
|
|
|
|
l1.daemon.wait_for_logs([r'Payment deadline expired, not retrying'] * 2)
|
|
|
|
|
|
|
|
# This call to paystatus would fail if the pay plugin crashed (it's
|
|
|
|
# provided by the plugin)
|
|
|
|
l1.rpc.paystatus(i1)
|
2020-07-30 07:40:29 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_important_plugin(node_factory):
|
|
|
|
# Cache it here.
|
|
|
|
pluginsdir = os.path.join(os.path.dirname(__file__), "plugins")
|
|
|
|
|
|
|
|
n = node_factory.get_node(options={"important-plugin": os.path.join(pluginsdir, "nonexistent")},
|
|
|
|
may_fail=True, expect_fail=True,
|
2020-09-09 03:33:10 +02:00
|
|
|
allow_broken_log=True, start=False)
|
2022-06-15 07:47:57 +02:00
|
|
|
|
2022-07-21 06:39:30 +02:00
|
|
|
n.daemon.start(wait_for_initialized=False, stderr_redir=True)
|
2022-06-26 06:42:01 +02:00
|
|
|
# Will exit with failure code.
|
|
|
|
assert n.daemon.wait() == 1
|
2022-06-15 07:47:57 +02:00
|
|
|
assert n.daemon.is_in_stderr(r"Failed to register .*nonexistent: No such file or directory")
|
2020-07-30 07:40:29 +02:00
|
|
|
|
|
|
|
# Check we exit if the important plugin dies.
|
2020-09-09 03:33:10 +02:00
|
|
|
n.daemon.opts['important-plugin'] = os.path.join(pluginsdir, "fail_by_itself.py")
|
2020-07-30 07:40:29 +02:00
|
|
|
|
2020-09-09 03:33:10 +02:00
|
|
|
n.daemon.start(wait_for_initialized=False)
|
2022-06-26 06:42:01 +02:00
|
|
|
# Will exit with failure code.
|
|
|
|
assert n.daemon.wait() == 1
|
|
|
|
n.daemon.wait_for_log(r'fail_by_itself.py: Plugin marked as important, shutting down lightningd')
|
2020-09-09 03:33:10 +02:00
|
|
|
|
2020-07-30 07:40:29 +02:00
|
|
|
# Check if the important plugin is disabled, we run as normal.
|
2020-09-09 03:33:10 +02:00
|
|
|
n.daemon.opts['disable-plugin'] = "fail_by_itself.py"
|
|
|
|
n.daemon.start()
|
2020-07-30 07:40:29 +02:00
|
|
|
# Make sure we can call into a plugin RPC (this is from `bcli`) even
|
|
|
|
# if fail_by_itself.py is disabled.
|
|
|
|
n.rpc.call("estimatefees", {})
|
|
|
|
n.stop()
|
|
|
|
|
|
|
|
# Check if an important plugin dies later, we fail.
|
2020-09-09 03:33:10 +02:00
|
|
|
del n.daemon.opts['disable-plugin']
|
|
|
|
n.daemon.opts['important-plugin'] = os.path.join(pluginsdir, "suicidal_plugin.py")
|
|
|
|
|
2022-06-26 06:42:01 +02:00
|
|
|
n.start()
|
2020-09-09 03:33:10 +02:00
|
|
|
|
2020-07-30 07:40:29 +02:00
|
|
|
with pytest.raises(RpcError):
|
|
|
|
n.rpc.call("die", {})
|
2020-09-09 03:33:10 +02:00
|
|
|
|
2022-06-26 06:42:01 +02:00
|
|
|
# Should exit with exitcode 1
|
|
|
|
n.daemon.wait_for_log('suicidal_plugin.py: Plugin marked as important, shutting down lightningd')
|
|
|
|
assert n.daemon.wait() == 1
|
|
|
|
n.stop()
|
2020-07-30 09:07:32 +02:00
|
|
|
|
|
|
|
# Check that if a builtin plugin dies, we fail.
|
2022-06-26 06:42:01 +02:00
|
|
|
start = n.daemon.logsearch_start
|
|
|
|
n.start()
|
|
|
|
# Reset logsearch_start, since this will predate message that start() looks for.
|
|
|
|
n.daemon.logsearch_start = start
|
|
|
|
line = n.daemon.wait_for_log(r'.*started\([0-9]*\).*plugins/pay')
|
|
|
|
pidstr = re.search(r'.*started\(([0-9]*)\).*plugins/pay', line).group(1)
|
2020-09-09 03:33:10 +02:00
|
|
|
|
2020-07-30 09:07:32 +02:00
|
|
|
# Kill pay.
|
|
|
|
os.kill(int(pidstr), signal.SIGKILL)
|
2022-06-26 06:42:01 +02:00
|
|
|
n.daemon.wait_for_log('pay: Plugin marked as important, shutting down lightningd')
|
|
|
|
# Should exit with exitcode 1
|
|
|
|
assert n.daemon.wait() == 1
|
|
|
|
n.stop()
|
2020-09-09 03:33:10 +02:00
|
|
|
|
2020-08-04 09:24:42 +02:00
|
|
|
|
2021-04-26 21:58:58 +02:00
|
|
|
@pytest.mark.developer("tests developer-only option.")
|
2020-08-04 09:24:42 +02:00
|
|
|
def test_dev_builtin_plugins_unimportant(node_factory):
|
|
|
|
n = node_factory.get_node(options={"dev-builtin-plugins-unimportant": None})
|
|
|
|
n.rpc.plugin_stop(plugin="pay")
|
2020-09-09 14:15:01 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_htlc_accepted_hook_crash(node_factory, executor):
|
|
|
|
"""Test that we do not hang incoming HTLCs if the hook plugin crashes.
|
|
|
|
|
|
|
|
Reproduces #3748.
|
|
|
|
"""
|
|
|
|
plugin = os.path.join(os.getcwd(), 'tests/plugins/htlc_accepted-crash.py')
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l2 = node_factory.get_node(
|
|
|
|
options={'plugin': plugin},
|
|
|
|
allow_broken_log=True
|
|
|
|
)
|
|
|
|
l1.connect(l2)
|
2020-11-30 16:43:44 +01:00
|
|
|
l1.fundchannel(l2)
|
2020-09-09 14:15:01 +02:00
|
|
|
|
|
|
|
i = l2.rpc.invoice(500, "crashpls", "crashpls")['bolt11']
|
|
|
|
|
|
|
|
# This should still succeed
|
|
|
|
|
|
|
|
f = executor.submit(l1.rpc.pay, i)
|
|
|
|
|
|
|
|
l2.daemon.wait_for_log(r'Crashing on purpose...')
|
|
|
|
l2.daemon.wait_for_log(
|
|
|
|
r'Hook handler for htlc_accepted failed with an exception.'
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(RpcError, match=r'failed: WIRE_TEMPORARY_NODE_FAILURE'):
|
|
|
|
f.result(10)
|
2020-09-23 14:22:02 +02:00
|
|
|
|
|
|
|
|
2020-10-12 07:33:50 +02:00
|
|
|
def test_notify(node_factory):
|
|
|
|
"""Test that notifications from plugins get ignored"""
|
|
|
|
plugins = [os.path.join(os.getcwd(), 'tests/plugins/notify.py'),
|
|
|
|
os.path.join(os.getcwd(), 'tests/plugins/notify2.py')]
|
|
|
|
l1 = node_factory.get_node(options={'plugin': plugins})
|
|
|
|
|
|
|
|
assert l1.rpc.call('make_notify') == 'This worked'
|
|
|
|
assert l1.rpc.call('call_make_notify') == 'This worked'
|
|
|
|
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
|
|
|
'--network={}'.format(TEST_NETWORK),
|
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'make_notify']).decode('utf-8').splitlines(keepends=True)
|
|
|
|
assert out[0] == '# Beginning stage 1\n'
|
|
|
|
assert out[1] == '\r'
|
|
|
|
for i in range(100):
|
|
|
|
assert out[2 + i].startswith("# Stage 1/2 {:>3}/100 |".format(1 + i))
|
|
|
|
if i == 99:
|
|
|
|
assert out[2 + i].endswith("|\n")
|
|
|
|
else:
|
|
|
|
assert out[2 + i].endswith("|\r")
|
2021-01-02 14:24:15 +01:00
|
|
|
|
|
|
|
assert out[102] == '# Beginning stage 2\n'
|
|
|
|
assert out[103] == '\r'
|
|
|
|
|
2020-10-12 07:33:50 +02:00
|
|
|
for i in range(10):
|
2021-01-02 14:24:15 +01:00
|
|
|
assert out[104 + i].startswith("# Stage 2/2 {:>2}/10 |".format(1 + i))
|
2020-10-12 07:33:50 +02:00
|
|
|
if i == 9:
|
2021-01-02 14:24:15 +01:00
|
|
|
assert out[104 + i].endswith("|\n")
|
2020-10-12 07:33:50 +02:00
|
|
|
else:
|
2021-01-02 14:24:15 +01:00
|
|
|
assert out[104 + i].endswith("|\r")
|
|
|
|
assert out[114] == '"This worked"\n'
|
|
|
|
assert len(out) == 115
|
2020-10-12 07:33:50 +02:00
|
|
|
|
|
|
|
# At debug level, we get the second prompt.
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
|
|
|
'--network={}'.format(TEST_NETWORK),
|
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'-N', 'debug',
|
|
|
|
'make_notify']).decode('utf-8').splitlines()
|
|
|
|
assert out[0] == '# Beginning stage 1'
|
|
|
|
assert out[1] == ''
|
|
|
|
for i in range(100):
|
|
|
|
assert out[2 + i].startswith("# Stage 1/2 {:>3}/100 |".format(1 + i))
|
|
|
|
assert out[2 + i].endswith("|")
|
|
|
|
assert out[102] == '# Beginning stage 2'
|
|
|
|
assert out[103] == ''
|
|
|
|
for i in range(10):
|
|
|
|
assert out[104 + i].startswith("# Stage 2/2 {:>2}/10 |".format(1 + i))
|
|
|
|
assert out[104 + i].endswith("|")
|
|
|
|
assert out[114] == '"This worked"'
|
|
|
|
assert len(out) == 115
|
|
|
|
|
|
|
|
# none suppresses
|
|
|
|
out = subprocess.check_output(['cli/lightning-cli',
|
|
|
|
'--network={}'.format(TEST_NETWORK),
|
|
|
|
'--lightning-dir={}'
|
|
|
|
.format(l1.daemon.lightning_dir),
|
|
|
|
'--notifications=none',
|
|
|
|
'make_notify']).decode('utf-8').splitlines()
|
|
|
|
assert out == ['"This worked"']
|
|
|
|
|
|
|
|
|
2020-09-23 14:22:02 +02:00
|
|
|
def test_htlc_accepted_hook_failcodes(node_factory):
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/htlc_accepted-failcode.py')
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[{}, {'plugin': plugin}])
|
|
|
|
|
|
|
|
# First let's test the newer failure_message, which should get passed
|
|
|
|
# through without being mapped.
|
|
|
|
tests = {
|
|
|
|
'2002': 'WIRE_TEMPORARY_NODE_FAILURE',
|
|
|
|
'400F' + 12 * '00': 'WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS',
|
|
|
|
'4009': 'WIRE_REQUIRED_CHANNEL_FEATURE_MISSING',
|
|
|
|
'4016' + 3 * '00': 'WIRE_INVALID_ONION_PAYLOAD',
|
|
|
|
}
|
|
|
|
|
|
|
|
for failmsg, expected in tests.items():
|
|
|
|
l2.rpc.setfailcode(msg=failmsg)
|
|
|
|
inv = l2.rpc.invoice(42, 'failmsg{}'.format(failmsg), '')['bolt11']
|
|
|
|
with pytest.raises(RpcError, match=r'failcodename.: .{}.'.format(expected)):
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
# And now test the older failcode return value. This is deprecated and can
|
|
|
|
# be removed once we have removed the failcode correction code in
|
|
|
|
# peer_htlcs.c. The following ones get remapped
|
|
|
|
tests.update({
|
|
|
|
'400F': 'WIRE_TEMPORARY_NODE_FAILURE',
|
|
|
|
'4009': 'WIRE_TEMPORARY_NODE_FAILURE',
|
|
|
|
'4016': 'WIRE_TEMPORARY_NODE_FAILURE',
|
|
|
|
})
|
|
|
|
|
|
|
|
for failcode, expected in tests.items():
|
|
|
|
# Do not attempt with full messages
|
|
|
|
if len(failcode) > 4:
|
|
|
|
continue
|
|
|
|
|
|
|
|
l2.rpc.setfailcode(code=failcode)
|
|
|
|
inv = l2.rpc.invoice(42, 'failcode{}'.format(failcode), '')['bolt11']
|
|
|
|
with pytest.raises(RpcError, match=r'failcodename.: .{}.'.format(expected)):
|
|
|
|
l1.rpc.pay(inv)
|
2020-10-30 02:13:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_hook_dep(node_factory):
|
|
|
|
dep_a = os.path.join(os.path.dirname(__file__), 'plugins/dep_a.py')
|
|
|
|
dep_b = os.path.join(os.path.dirname(__file__), 'plugins/dep_b.py')
|
|
|
|
dep_c = os.path.join(os.path.dirname(__file__), 'plugins/dep_c.py')
|
2020-11-02 03:36:27 +01:00
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[{},
|
|
|
|
{'plugin': dep_b},
|
|
|
|
{'plugin': [dep_a, dep_b]}])
|
|
|
|
|
|
|
|
# l2 complains about the two unknown plugins, only.
|
2020-11-09 10:25:29 +01:00
|
|
|
# (Could be already past)
|
|
|
|
l2.daemon.logsearch_start = 0
|
|
|
|
l2.daemon.wait_for_logs(["unknown plugin dep_a.py",
|
|
|
|
"unknown plugin dep_c.py"])
|
2020-11-02 03:36:27 +01:00
|
|
|
assert not l2.daemon.is_in_log("unknown plugin (?!dep_a.py|dep_c.py)")
|
|
|
|
logstart = l2.daemon.logsearch_start
|
|
|
|
|
|
|
|
# l3 complains about the dep_c, only.
|
|
|
|
assert l3.daemon.is_in_log("unknown plugin dep_c.py")
|
|
|
|
assert not l3.daemon.is_in_log("unknown plugin (?!dep_c.py)")
|
2020-10-30 02:13:42 +01:00
|
|
|
|
|
|
|
# A says it has to be before B.
|
|
|
|
l2.rpc.plugin_start(plugin=dep_a)
|
|
|
|
l2.daemon.wait_for_log(r"started.*dep_a.py")
|
2020-11-02 03:36:27 +01:00
|
|
|
# Still doesn't know about c.
|
|
|
|
assert l2.daemon.is_in_log("unknown plugin dep_c.py", logstart)
|
2020-10-30 02:13:42 +01:00
|
|
|
|
|
|
|
l1.pay(l2, 100000)
|
|
|
|
# They must be called in this order!
|
|
|
|
l2.daemon.wait_for_log(r"dep_a.py: htlc_accepted called")
|
|
|
|
l2.daemon.wait_for_log(r"dep_b.py: htlc_accepted called")
|
|
|
|
|
|
|
|
# But depc will not load, due to cyclical dep
|
2020-11-02 03:36:27 +01:00
|
|
|
with pytest.raises(RpcError, match=r'Cannot meet required hook dependencies'):
|
2020-10-30 02:13:42 +01:00
|
|
|
l2.rpc.plugin_start(plugin=dep_c)
|
|
|
|
|
|
|
|
l1.rpc.plugin_start(plugin=dep_c)
|
|
|
|
l1.daemon.wait_for_log(r"started.*dep_c.py")
|
2020-11-02 03:36:27 +01:00
|
|
|
|
|
|
|
# Complaints about unknown plugin a, but nothing else
|
|
|
|
assert l1.daemon.is_in_log("unknown plugin dep_a.py")
|
|
|
|
assert not l1.daemon.is_in_log("unknown plugin (?!dep_a.py)")
|
2020-11-02 03:38:01 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_hook_dep_stable(node_factory):
|
|
|
|
# Load in order A, D, E, B.
|
|
|
|
# A says it has to be before B, D says it has to be before E.
|
|
|
|
# It should load in the order specified.
|
|
|
|
|
|
|
|
dep_a = os.path.join(os.path.dirname(__file__), 'plugins/dep_a.py')
|
|
|
|
dep_b = os.path.join(os.path.dirname(__file__), 'plugins/dep_b.py')
|
|
|
|
dep_d = os.path.join(os.path.dirname(__file__), 'plugins/dep_d.py')
|
|
|
|
dep_e = os.path.join(os.path.dirname(__file__), 'plugins/dep_e.py')
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[{},
|
|
|
|
{'plugin': [dep_a, dep_d, dep_e, dep_b]}])
|
|
|
|
|
|
|
|
# dep_a mentions deb_c, but nothing else should be unknown.
|
2020-11-09 10:25:29 +01:00
|
|
|
# (Could be already past)
|
|
|
|
l2.daemon.logsearch_start = 0
|
|
|
|
l2.daemon.wait_for_log("unknown plugin dep_c.py")
|
2020-11-02 03:38:01 +01:00
|
|
|
assert not l2.daemon.is_in_log("unknown plugin (?!|dep_c.py)")
|
|
|
|
|
|
|
|
l1.pay(l2, 100000)
|
|
|
|
# They must be called in this order!
|
|
|
|
l2.daemon.wait_for_log(r"dep_a.py: htlc_accepted called")
|
|
|
|
l2.daemon.wait_for_log(r"dep_d.py: htlc_accepted called")
|
|
|
|
l2.daemon.wait_for_log(r"dep_e.py: htlc_accepted called")
|
|
|
|
l2.daemon.wait_for_log(r"dep_b.py: htlc_accepted called")
|
2020-11-10 13:24:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_htlc_accepted_hook_failonion(node_factory):
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/htlc_accepted-failonion.py')
|
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[{}, {'plugin': plugin}])
|
|
|
|
|
|
|
|
# an invalid onion
|
|
|
|
l2.rpc.setfailonion('0' * (292 * 2))
|
|
|
|
inv = l2.rpc.invoice(42, 'failonion000', '')['bolt11']
|
|
|
|
with pytest.raises(RpcError):
|
|
|
|
l1.rpc.pay(inv)
|
2020-12-14 05:58:35 +01:00
|
|
|
|
|
|
|
|
2022-09-25 15:14:12 +02:00
|
|
|
@pytest.mark.developer("Gossip without developer is slow.")
|
|
|
|
def test_htlc_accepted_hook_fwdto(node_factory):
|
|
|
|
plugin = os.path.join(os.path.dirname(__file__), 'plugins/htlc_accepted-fwdto.py')
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts=[{}, {'plugin': plugin}, {}], wait_for_announce=True)
|
|
|
|
|
|
|
|
# Add some balance
|
|
|
|
l1.rpc.pay(l2.rpc.invoice(10**9 // 2, 'balance', '')['bolt11'])
|
|
|
|
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['htlcs'] == [])
|
|
|
|
|
|
|
|
# make it forward back down same channel.
|
|
|
|
l2.rpc.setfwdto(only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['channel_id'])
|
|
|
|
inv = l3.rpc.invoice(42, 'fwdto', '')['bolt11']
|
|
|
|
with pytest.raises(RpcError, match="WIRE_INVALID_ONION_HMAC"):
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
assert l2.rpc.listforwards()['forwards'][0]['out_channel'] == only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['short_channel_id']
|
|
|
|
|
|
|
|
|
2020-12-14 05:58:35 +01:00
|
|
|
def test_dynamic_args(node_factory):
|
|
|
|
plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')
|
|
|
|
|
|
|
|
l1 = node_factory.get_node()
|
|
|
|
l1.rpc.plugin_start(plugin_path, greeting='Test arg parsing')
|
|
|
|
|
|
|
|
assert l1.rpc.call("hello") == "Test arg parsing world"
|
|
|
|
plugin = only_one([p for p in l1.rpc.listconfigs()['plugins'] if p['path'] == plugin_path])
|
|
|
|
assert plugin['options']['greeting'] == 'Test arg parsing'
|
|
|
|
|
|
|
|
l1.rpc.plugin_stop(plugin_path)
|
|
|
|
|
|
|
|
assert [p for p in l1.rpc.listconfigs()['plugins'] if p['path'] == plugin_path] == []
|
2021-01-02 14:29:39 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_pyln_request_notify(node_factory):
|
|
|
|
"""Test that pyln-client plugins can send notifications.
|
|
|
|
"""
|
|
|
|
plugin_path = os.path.join(
|
|
|
|
os.path.dirname(__file__), 'plugins/countdown.py'
|
|
|
|
)
|
|
|
|
l1 = node_factory.get_node(options={'plugin': plugin_path})
|
|
|
|
notifications = []
|
|
|
|
|
|
|
|
def n(*args, message, **kwargs):
|
|
|
|
print("Got a notification:", message)
|
|
|
|
notifications.append(message)
|
|
|
|
|
|
|
|
with l1.rpc.notify(n):
|
|
|
|
l1.rpc.countdown(10)
|
|
|
|
|
|
|
|
expected = ['{}/10'.format(i) for i in range(10)]
|
|
|
|
assert expected == notifications
|
|
|
|
|
|
|
|
# Calling without the context manager we should not get any notifications
|
|
|
|
notifications = []
|
|
|
|
l1.rpc.countdown(10)
|
|
|
|
assert notifications == []
|
2021-01-13 04:00:24 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_self_disable(node_factory):
|
|
|
|
"""Test that plugin can disable itself without penalty.
|
|
|
|
"""
|
2021-01-13 04:00:24 +01:00
|
|
|
# This disables in response to getmanifest.
|
|
|
|
p1 = os.path.join(
|
|
|
|
os.path.dirname(__file__), 'plugins/test_selfdisable_after_getmanifest'
|
2021-01-13 04:00:24 +01:00
|
|
|
)
|
2021-01-13 04:00:24 +01:00
|
|
|
# This disables in response to init.
|
|
|
|
p2 = os.path.join(os.getcwd(), "tests/plugins/test_libplugin")
|
2021-07-23 02:27:02 +02:00
|
|
|
|
|
|
|
pydisable = os.path.join(
|
|
|
|
os.path.dirname(__file__), 'plugins/selfdisable.py'
|
|
|
|
)
|
|
|
|
l1 = node_factory.get_node(options={'important-plugin': [p1, p2],
|
|
|
|
'plugin': pydisable,
|
|
|
|
'selfdisable': None})
|
2021-01-13 04:00:24 +01:00
|
|
|
|
|
|
|
# Could happen before it gets set up.
|
|
|
|
l1.daemon.logsearch_start = 0
|
2021-02-11 02:16:06 +01:00
|
|
|
l1.daemon.wait_for_logs(['test_selfdisable_after_getmanifest: .* disabled itself: Self-disable test after getmanifest',
|
2021-07-23 02:27:02 +02:00
|
|
|
'test_libplugin: .* disabled itself at init: Disabled via selfdisable option',
|
|
|
|
'selfdisable.py: .* disabled itself at init: init saying disable'])
|
2021-01-13 04:00:24 +01:00
|
|
|
|
2021-01-13 04:00:24 +01:00
|
|
|
assert p1 not in [p['name'] for p in l1.rpc.plugin_list()['plugins']]
|
|
|
|
assert p2 not in [p['name'] for p in l1.rpc.plugin_list()['plugins']]
|
2021-07-23 02:27:02 +02:00
|
|
|
assert pydisable not in [p['name'] for p in l1.rpc.plugin_list()['plugins']]
|
2021-01-13 04:00:24 +01:00
|
|
|
|
|
|
|
# Also works with dynamic load attempts
|
|
|
|
with pytest.raises(RpcError, match="Self-disable test after getmanifest"):
|
2021-01-13 04:00:24 +01:00
|
|
|
l1.rpc.plugin_start(p1)
|
2021-01-13 04:00:24 +01:00
|
|
|
|
2021-01-13 04:00:24 +01:00
|
|
|
# Also works with dynamic load attempts
|
|
|
|
with pytest.raises(RpcError, match="Disabled via selfdisable option"):
|
|
|
|
l1.rpc.plugin_start(p2, selfdisable=True)
|
2021-04-27 15:15:09 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_custom_notification_topics(node_factory):
|
|
|
|
plugin = os.path.join(
|
|
|
|
os.path.dirname(__file__), "plugins", "custom_notifications.py"
|
|
|
|
)
|
2021-04-28 18:36:56 +02:00
|
|
|
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': plugin}, {}])
|
2021-04-27 15:15:09 +02:00
|
|
|
l1.rpc.emit()
|
|
|
|
l1.daemon.wait_for_log(r'Got a custom notification Hello world')
|
2021-04-29 11:18:19 +02:00
|
|
|
|
2021-04-28 18:36:56 +02:00
|
|
|
inv = l2.rpc.invoice(42, "lbl", "desc")['bolt11']
|
|
|
|
l1.rpc.pay(inv)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log(r'Got a pay_success notification from plugin pay for payment_hash [0-9a-f]{64}')
|
|
|
|
|
2021-04-29 11:18:19 +02:00
|
|
|
# And now make sure that we drop unannounced notifications
|
|
|
|
l1.rpc.faulty_emit()
|
|
|
|
l1.daemon.wait_for_log(
|
|
|
|
r"Plugin attempted to send a notification to topic .* not forwarding"
|
|
|
|
)
|
|
|
|
time.sleep(1)
|
|
|
|
assert not l1.daemon.is_in_log(r'Got the ididntannouncethis event')
|
|
|
|
|
|
|
|
# The plugin just dist what previously was a fatal mistake (emit
|
|
|
|
# an unknown notification), make sure we didn't kill it.
|
|
|
|
assert 'custom_notifications.py' in [p['name'] for p in l1.rpc.listconfigs()['plugins']]
|
2021-06-18 11:03:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_restart_on_update(node_factory):
|
|
|
|
"""Tests if plugin rescan restarts modified plugins
|
|
|
|
"""
|
|
|
|
# we need to write plugin content dynamically
|
|
|
|
content = """#!/usr/bin/env python3
|
|
|
|
from pyln.client import Plugin
|
|
|
|
import time
|
|
|
|
plugin = Plugin()
|
|
|
|
@plugin.init()
|
|
|
|
def init(options, configuration, plugin):
|
|
|
|
plugin.log("test_restart_on_update %s")
|
|
|
|
plugin.run()
|
|
|
|
"""
|
|
|
|
|
|
|
|
# get a node that is not started so we can put a plugin in its lightning_dir
|
|
|
|
n = node_factory.get_node(start=False)
|
2021-11-12 07:44:46 +01:00
|
|
|
if "dev-no-plugin-checksum" in n.daemon.opts:
|
|
|
|
del n.daemon.opts["dev-no-plugin-checksum"]
|
|
|
|
|
2021-06-18 11:03:15 +02:00
|
|
|
lndir = n.daemon.lightning_dir
|
|
|
|
|
|
|
|
# write hello world plugin to lndir/plugins
|
|
|
|
os.makedirs(os.path.join(lndir, 'plugins'), exist_ok=True)
|
|
|
|
path = os.path.join(lndir, 'plugins', 'test_restart_on_update.py')
|
|
|
|
file = open(path, 'w+')
|
|
|
|
file.write(content % "1")
|
|
|
|
file.close()
|
|
|
|
os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC)
|
|
|
|
|
|
|
|
# now fire up the node and wait for the plugin to print hello
|
|
|
|
n.daemon.start()
|
|
|
|
n.daemon.logsearch_start = 0
|
|
|
|
n.daemon.wait_for_log(r"test_restart_on_update 1")
|
|
|
|
|
|
|
|
# a rescan should not yet reload the plugin on the same file
|
|
|
|
n.rpc.plugin_rescan()
|
|
|
|
assert not n.daemon.is_in_log(r"Plugin changed, needs restart.")
|
|
|
|
|
|
|
|
# modify the file
|
|
|
|
file = open(path, 'w+')
|
|
|
|
file.write(content % "2")
|
|
|
|
file.close()
|
|
|
|
os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC)
|
|
|
|
|
|
|
|
# rescan and check
|
|
|
|
n.rpc.plugin_rescan()
|
|
|
|
n.daemon.wait_for_log(r"Plugin changed, needs restart.")
|
|
|
|
n.daemon.wait_for_log(r"test_restart_on_update 2")
|
|
|
|
n.stop()
|
2021-09-03 12:16:21 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_plugin_shutdown(node_factory):
|
2021-12-08 18:58:09 +01:00
|
|
|
"""test 'shutdown' notifications, via `plugin stop` or via `stop`"""
|
|
|
|
|
2021-09-03 12:16:21 +02:00
|
|
|
p = os.path.join(os.getcwd(), "tests/plugins/test_libplugin")
|
2021-12-08 18:58:09 +01:00
|
|
|
p2 = os.path.join(os.getcwd(), 'tests/plugins/misc_notifications.py')
|
|
|
|
l1 = node_factory.get_node(options={'plugin': [p, p2]})
|
2021-09-03 12:16:21 +02:00
|
|
|
|
|
|
|
l1.rpc.plugin_stop(p)
|
|
|
|
l1.daemon.wait_for_log(r"test_libplugin: shutdown called")
|
|
|
|
# FIXME: clean this up!
|
|
|
|
l1.daemon.wait_for_log(r"test_libplugin: Killing plugin: exited during normal operation")
|
|
|
|
|
2021-12-08 18:58:09 +01:00
|
|
|
# Via `plugin stop` it can make RPC calls before it (self-)terminates
|
|
|
|
l1.rpc.plugin_stop(p2)
|
|
|
|
l1.daemon.wait_for_log(r'misc_notifications.py: via plugin stop, datastore success')
|
|
|
|
l1.rpc.plugin_start(p2)
|
|
|
|
|
|
|
|
# Now try timeout via `plugin stop`
|
2021-09-03 12:16:21 +02:00
|
|
|
l1.rpc.plugin_start(p, dont_shutdown=True)
|
|
|
|
l1.rpc.plugin_stop(p)
|
|
|
|
l1.daemon.wait_for_log(r"test_libplugin: shutdown called")
|
|
|
|
l1.daemon.wait_for_log(r"test_libplugin: Timeout on shutdown: killing anyway")
|
|
|
|
|
2021-12-08 18:58:09 +01:00
|
|
|
# Now, should also shutdown or timeout on finish, RPC calls then fail with error code -5
|
|
|
|
l1.rpc.plugin_start(p, dont_shutdown=True)
|
2021-09-03 12:16:21 +02:00
|
|
|
l1.rpc.stop()
|
2021-12-14 13:13:28 +01:00
|
|
|
l1.daemon.wait_for_logs(['test_libplugin: shutdown called',
|
2022-09-11 09:51:18 +02:00
|
|
|
'misc_notifications.py: .* Connection refused',
|
2021-12-14 13:13:28 +01:00
|
|
|
'test_libplugin: failed to self-terminate in time, killing.'])
|
2022-07-16 15:18:21 +02:00
|
|
|
|
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
def test_commando(node_factory, executor):
|
2023-01-03 05:23:28 +01:00
|
|
|
l1, l2 = node_factory.line_graph(2, fundchannel=False,
|
2023-01-03 05:23:28 +01:00
|
|
|
opts={'log-level': 'io'})
|
2022-07-16 15:18:21 +02:00
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
# Nothing works until we've issued a rune.
|
|
|
|
fut = executor.submit(l2.rpc.call, method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'method': 'listpeers'})
|
|
|
|
with pytest.raises(concurrent.futures.TimeoutError):
|
|
|
|
fut.result(10)
|
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
rune = l1.rpc.commando_rune()['rune']
|
2022-07-21 03:49:38 +02:00
|
|
|
|
|
|
|
# Bad rune fails
|
|
|
|
with pytest.raises(RpcError, match="Not authorized: Not derived from master"):
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': 'VXY4AAkrPyH2vzSvOHnI7PDVfS6O04bRQLUCIUFJD5Y9NjQmbWV0aG9kPWludm9pY2UmcmF0ZT0yMZ==',
|
|
|
|
'method': 'listpeers'})
|
|
|
|
|
2022-07-16 15:18:21 +02:00
|
|
|
# This works
|
|
|
|
res = l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:21 +02:00
|
|
|
'method': 'listpeers'})
|
|
|
|
assert len(res['peers']) == 1
|
|
|
|
assert res['peers'][0]['id'] == l2.info['id']
|
|
|
|
|
2023-01-03 05:23:28 +01:00
|
|
|
# Check JSON id is as expected (unfortunately pytest does not use a reliable name
|
|
|
|
# for itself: with -k it calls itself `-c` here, instead of `pytest`).
|
|
|
|
l2.daemon.wait_for_log(r'plugin-commando: "[^:/]*:commando#[0-9]*/cln:commando#[0-9]*"\[OUT\]')
|
|
|
|
l1.daemon.wait_for_log(r'jsonrpc#[0-9]*: "[^:/]*:commando#[0-9]*/cln:commando#[0-9]*/commando:listpeers#[0-9]*"\[IN\]')
|
|
|
|
|
2022-07-16 15:18:21 +02:00
|
|
|
res = l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:21 +02:00
|
|
|
'method': 'listpeers',
|
|
|
|
'params': {'id': l2.info['id']}})
|
|
|
|
assert len(res['peers']) == 1
|
|
|
|
assert res['peers'][0]['id'] == l2.info['id']
|
|
|
|
|
2023-01-03 05:23:28 +01:00
|
|
|
# Filter test
|
|
|
|
res = l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune,
|
|
|
|
'method': 'listpeers',
|
|
|
|
'filter': {'peers': [{'id': True}]}})
|
|
|
|
assert res == {'peers': [{'id': l2.info['id']}]}
|
|
|
|
|
2022-07-16 15:18:21 +02:00
|
|
|
with pytest.raises(RpcError, match='missing required parameter'):
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:21 +02:00
|
|
|
'method': 'withdraw'})
|
|
|
|
|
|
|
|
with pytest.raises(RpcError, match='unknown parameter: foobar'):
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'method': 'invoice',
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:21 +02:00
|
|
|
'params': {'foobar': 1}})
|
|
|
|
|
|
|
|
ret = l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:21 +02:00
|
|
|
'method': 'ping',
|
|
|
|
'params': {'id': l2.info['id']}})
|
|
|
|
assert 'totlen' in ret
|
|
|
|
|
|
|
|
# Now, reply will go over a multiple messages!
|
|
|
|
ret = l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:21 +02:00
|
|
|
'method': 'getlog',
|
|
|
|
'params': {'level': 'io'}})
|
|
|
|
|
|
|
|
assert len(json.dumps(ret)) > 65535
|
2022-07-16 15:18:27 +02:00
|
|
|
|
|
|
|
# Command will go over multiple messages.
|
|
|
|
ret = l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:27 +02:00
|
|
|
'method': 'invoice',
|
|
|
|
'params': {'amount_msat': 'any',
|
|
|
|
'label': 'label',
|
|
|
|
'description': 'A' * 200000,
|
|
|
|
'deschashonly': True}})
|
|
|
|
|
|
|
|
assert 'bolt11' in ret
|
2022-07-16 15:18:27 +02:00
|
|
|
|
|
|
|
# This will fail, will include data.
|
|
|
|
with pytest.raises(RpcError, match='No connection to first peer found') as exc_info:
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
2022-07-16 15:18:27 +02:00
|
|
|
'rune': rune,
|
2022-07-16 15:18:27 +02:00
|
|
|
'method': 'sendpay',
|
|
|
|
'params': {'route': [{'amount_msat': 1000,
|
|
|
|
'id': l1.info['id'],
|
|
|
|
'delay': 12,
|
|
|
|
'channel': '1x2x3'}],
|
|
|
|
'payment_hash': '00' * 32}})
|
|
|
|
assert exc_info.value.error['data']['erring_index'] == 0
|
2022-07-16 15:18:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_commando_rune(node_factory):
|
2023-01-03 05:23:28 +01:00
|
|
|
l1, l2 = node_factory.get_nodes(2)
|
2022-08-10 06:09:57 +02:00
|
|
|
|
|
|
|
# Force l1's commando secret
|
|
|
|
l1.rpc.datastore(key=['commando', 'secret'], hex='1241faef85297127c2ac9bde95421b2c51e5218498ae4901dc670c974af4284b')
|
|
|
|
l1.restart()
|
|
|
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
2022-07-16 15:18:27 +02:00
|
|
|
|
|
|
|
# I put that into a test node's commando.py to generate these runes (modified readonly to match ours):
|
|
|
|
# $ l1-cli commando-rune
|
|
|
|
# "rune": "zKc2W88jopslgUBl0UE77aEe5PNCLn5WwqSusU_Ov3A9MA=="
|
|
|
|
# $ l1-cli commando-rune restrictions=readonly
|
|
|
|
# "rune": "1PJnoR9a7u4Bhglj2s7rVOWqRQnswIwUoZrDVMKcLTY9MSZtZXRob2RebGlzdHxtZXRob2ReZ2V0fG1ldGhvZD1zdW1tYXJ5Jm1ldGhvZC9saXN0ZGF0YXN0b3Jl"
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
# $ l1-cli commando-rune restrictions='[[time>1656675211]]'
|
2022-07-16 15:18:27 +02:00
|
|
|
# "rune": "RnlWC4lwBULFaObo6ZP8jfqYRyTbfWPqcMT3qW-Wmso9MiZ0aW1lPjE2NTY2NzUyMTE="
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
# $ l1-cli commando-rune restrictions='[["id^022d223620a359a47ff7"],["method=listpeers"]]'
|
2022-07-16 15:18:27 +02:00
|
|
|
# "rune": "lXFWzb51HjWxKV5TmfdiBgd74w0moeyChj3zbLoxmws9MyZpZF4wMjJkMjIzNjIwYTM1OWE0N2ZmNyZtZXRob2Q9bGlzdHBlZXJz"
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
# $ l1-cli commando-rune lXFWzb51HjWxKV5TmfdiBgd74w0moeyChj3zbLoxmws9MyZpZF4wMjJkMjIzNjIwYTM1OWE0N2ZmNyZtZXRob2Q9bGlzdHBlZXJz '[pnamelevel!,pnamelevel/io]'
|
2022-07-16 15:18:27 +02:00
|
|
|
# "rune": "Dw2tzGCoUojAyT0JUw7fkYJYqExpEpaDRNTkyvWKoJY9MyZpZF4wMjJkMjIzNjIwYTM1OWE0N2ZmNyZtZXRob2Q9bGlzdHBlZXJzJnBuYW1lbGV2ZWwhfHBuYW1lbGV2ZWwvaW8="
|
|
|
|
|
|
|
|
rune1 = l1.rpc.commando_rune()
|
|
|
|
assert rune1['rune'] == 'zKc2W88jopslgUBl0UE77aEe5PNCLn5WwqSusU_Ov3A9MA=='
|
|
|
|
assert rune1['unique_id'] == '0'
|
|
|
|
rune2 = l1.rpc.commando_rune(restrictions="readonly")
|
|
|
|
assert rune2['rune'] == '1PJnoR9a7u4Bhglj2s7rVOWqRQnswIwUoZrDVMKcLTY9MSZtZXRob2RebGlzdHxtZXRob2ReZ2V0fG1ldGhvZD1zdW1tYXJ5Jm1ldGhvZC9saXN0ZGF0YXN0b3Jl'
|
|
|
|
assert rune2['unique_id'] == '1'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune3 = l1.rpc.commando_rune(restrictions=[["time>1656675211"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune3['rune'] == 'RnlWC4lwBULFaObo6ZP8jfqYRyTbfWPqcMT3qW-Wmso9MiZ0aW1lPjE2NTY2NzUyMTE='
|
|
|
|
assert rune3['unique_id'] == '2'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune4 = l1.rpc.commando_rune(restrictions=[["id^022d223620a359a47ff7"], ["method=listpeers"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune4['rune'] == 'lXFWzb51HjWxKV5TmfdiBgd74w0moeyChj3zbLoxmws9MyZpZF4wMjJkMjIzNjIwYTM1OWE0N2ZmNyZtZXRob2Q9bGlzdHBlZXJz'
|
|
|
|
assert rune4['unique_id'] == '3'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune5 = l1.rpc.commando_rune(rune4['rune'], [["pnamelevel!", "pnamelevel/io"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune5['rune'] == 'Dw2tzGCoUojAyT0JUw7fkYJYqExpEpaDRNTkyvWKoJY9MyZpZF4wMjJkMjIzNjIwYTM1OWE0N2ZmNyZtZXRob2Q9bGlzdHBlZXJzJnBuYW1lbGV2ZWwhfHBuYW1lbGV2ZWwvaW8='
|
|
|
|
assert rune5['unique_id'] == '3'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune6 = l1.rpc.commando_rune(rune5['rune'], [["parr1!", "parr1/io"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune6['rune'] == '2Wh6F4R51D3esZzp-7WWG51OhzhfcYKaaI8qiIonaHE9MyZpZF4wMjJkMjIzNjIwYTM1OWE0N2ZmNyZtZXRob2Q9bGlzdHBlZXJzJnBuYW1lbGV2ZWwhfHBuYW1lbGV2ZWwvaW8mcGFycjEhfHBhcnIxL2lv'
|
|
|
|
assert rune6['unique_id'] == '3'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune7 = l1.rpc.commando_rune(restrictions=[["pnum=0"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune7['rune'] == 'QJonN6ySDFw-P5VnilZxlOGRs_tST1ejtd-bAYuZfjk9NCZwbnVtPTA='
|
|
|
|
assert rune7['unique_id'] == '4'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune8 = l1.rpc.commando_rune(rune7['rune'], [["rate=3"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune8['rune'] == 'kSYFx6ON9hr_ExcQLwVkm1ABnvc1TcMFBwLrAVee0EA9NCZwbnVtPTAmcmF0ZT0z'
|
|
|
|
assert rune8['unique_id'] == '4'
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune9 = l1.rpc.commando_rune(rune8['rune'], [["rate=1"]])
|
2022-07-16 15:18:27 +02:00
|
|
|
assert rune9['rune'] == 'O8Zr-ULTBKO3_pKYz0QKE9xYl1vQ4Xx9PtlHuist9Rk9NCZwbnVtPTAmcmF0ZT0zJnJhdGU9MQ=='
|
|
|
|
assert rune9['unique_id'] == '4'
|
2022-07-16 15:18:27 +02:00
|
|
|
|
2022-09-12 23:14:20 +02:00
|
|
|
# Test rune with \|.
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
weirdrune = l1.rpc.commando_rune(restrictions=[["method=invoice"],
|
|
|
|
["pnamedescription=@tipjar|jb55@sendsats.lol"]])
|
2022-09-12 23:14:20 +02:00
|
|
|
with pytest.raises(RpcError, match='Not authorized:'):
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': weirdrune['rune'],
|
|
|
|
'method': 'invoice',
|
|
|
|
'params': {"amount_msat": "any",
|
|
|
|
"label": "lbl",
|
|
|
|
"description": "@tipjar\\|jb55@sendsats.lol"}})
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': weirdrune['rune'],
|
|
|
|
'method': 'invoice',
|
|
|
|
'params': {"amount_msat": "any",
|
|
|
|
"label": "lbl",
|
|
|
|
"description": "@tipjar|jb55@sendsats.lol"}})
|
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
runedecodes = ((rune1, []),
|
|
|
|
(rune2, [{'alternatives': ['method^list', 'method^get', 'method=summary'],
|
|
|
|
'summary': "method (of command) starts with 'list' OR method (of command) starts with 'get' OR method (of command) equal to 'summary'"},
|
|
|
|
{'alternatives': ['method/listdatastore'],
|
|
|
|
'summary': "method (of command) unequal to 'listdatastore'"}]),
|
|
|
|
(rune4, [{'alternatives': ['id^022d223620a359a47ff7'],
|
|
|
|
'summary': "id (of commanding peer) starts with '022d223620a359a47ff7'"},
|
|
|
|
{'alternatives': ['method=listpeers'],
|
|
|
|
'summary': "method (of command) equal to 'listpeers'"}]),
|
|
|
|
(rune5, [{'alternatives': ['id^022d223620a359a47ff7'],
|
|
|
|
'summary': "id (of commanding peer) starts with '022d223620a359a47ff7'"},
|
|
|
|
{'alternatives': ['method=listpeers'],
|
|
|
|
'summary': "method (of command) equal to 'listpeers'"},
|
|
|
|
{'alternatives': ['pnamelevel!', 'pnamelevel/io'],
|
|
|
|
'summary': "pnamelevel (object parameter 'level') is missing OR pnamelevel (object parameter 'level') unequal to 'io'"}]),
|
|
|
|
(rune6, [{'alternatives': ['id^022d223620a359a47ff7'],
|
|
|
|
'summary': "id (of commanding peer) starts with '022d223620a359a47ff7'"},
|
|
|
|
{'alternatives': ['method=listpeers'],
|
|
|
|
'summary': "method (of command) equal to 'listpeers'"},
|
|
|
|
{'alternatives': ['pnamelevel!', 'pnamelevel/io'],
|
|
|
|
'summary': "pnamelevel (object parameter 'level') is missing OR pnamelevel (object parameter 'level') unequal to 'io'"},
|
|
|
|
{'alternatives': ['parr1!', 'parr1/io'],
|
|
|
|
'summary': "parr1 (array parameter #1) is missing OR parr1 (array parameter #1) unequal to 'io'"}]),
|
|
|
|
(rune7, [{'alternatives': ['pnum=0'],
|
|
|
|
'summary': "pnum (number of command parameters) equal to 0"}]),
|
|
|
|
(rune8, [{'alternatives': ['pnum=0'],
|
|
|
|
'summary': "pnum (number of command parameters) equal to 0"},
|
|
|
|
{'alternatives': ['rate=3'],
|
|
|
|
'summary': "rate (max per minute) equal to 3"}]),
|
|
|
|
(rune9, [{'alternatives': ['pnum=0'],
|
|
|
|
'summary': "pnum (number of command parameters) equal to 0"},
|
|
|
|
{'alternatives': ['rate=3'],
|
|
|
|
'summary': "rate (max per minute) equal to 3"},
|
|
|
|
{'alternatives': ['rate=1'],
|
|
|
|
'summary': "rate (max per minute) equal to 1"}]))
|
|
|
|
for decode in runedecodes:
|
|
|
|
rune = decode[0]
|
|
|
|
restrictions = decode[1]
|
|
|
|
decoded = l1.rpc.decode(rune['rune'])
|
|
|
|
assert decoded['type'] == 'rune'
|
|
|
|
assert decoded['unique_id'] == rune['unique_id']
|
|
|
|
assert decoded['valid'] is True
|
|
|
|
assert decoded['restrictions'] == restrictions
|
|
|
|
|
|
|
|
# Time handling is a bit special, since we annotate the timestamp with how far away it is.
|
|
|
|
decoded = l1.rpc.decode(rune3['rune'])
|
|
|
|
assert decoded['type'] == 'rune'
|
|
|
|
assert decoded['unique_id'] == rune3['unique_id']
|
|
|
|
assert decoded['valid'] is True
|
|
|
|
assert len(decoded['restrictions']) == 1
|
|
|
|
assert decoded['restrictions'][0]['alternatives'] == ['time>1656675211']
|
|
|
|
assert decoded['restrictions'][0]['summary'].startswith("time (in seconds since 1970) greater than 1656675211 (")
|
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
# Replace rune3 with a more useful timestamp!
|
|
|
|
expiry = int(time.time()) + 15
|
commando: make rune alternatives a JSON array.
This avoids having to escape | or &, though we still allow that for
the deprecation period.
To detect deprecated usage, we insist that alternatives are *always*
an array (which could be loosened later), but that also means that
restrictions must *always* be an array for now.
Before:
```
# invoice, description either A or B
lightning-cli commando-rune '["method=invoice","pnamedescription=A|pnamedescription=B"]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '["method=invoice","pnamedescription=A\\|B"]'
```
After:
```
# invoice, description either A or B
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A", "pnamedescription=B"]]'
# invoice, description literally 'A|B'
lightning-cli commando-rune '[["method=invoice"],["pnamedescription=A|B"]]'
```
Changelog-Deprecated: JSON-RPC: `commando-rune` restrictions is always an array, each element an array of alternatives. Replaces a string with `|`-separators, so no escaping necessary except for `\\`.
2022-09-14 07:30:26 +02:00
|
|
|
rune3 = l1.rpc.commando_rune(restrictions=[["time<{}".format(expiry)]])
|
pytest: fix test_commando_rune flake.
We reset counters every minute, so ratelimit tests can flake since we
might hit that boundary.
Instead, wait for the reset then test explicitly, assuming that takes
less than 60 seconds.
```
for rune, cmd, params in failures:
print("{} {}".format(cmd, params))
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
l2.rpc.call(method='commando',
payload={'peer_id': l1.info['id'],
'rune': rune['rune'],
'method': cmd,
> 'params': params})
E Failed: DID NOT RAISE <class 'pyln.client.lightning.RpcError'>
...
DEBUG:root:Calling commando with payload {'peer_id': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'rune': 'O8Zr-ULTBKO3_pKYz0QKE9xYl1vQ4Xx9PtlHuist9Rk9NCZwbnVtPTAmcmF0ZT0zJnJhdGU9MQ==', 'method': 'getinfo', 'params': {}}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-07-26 06:52:53 +02:00
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
successes = ((rune1, "listpeers", {}),
|
|
|
|
(rune2, "listpeers", {}),
|
|
|
|
(rune2, "getinfo", {}),
|
|
|
|
(rune2, "getinfo", {}),
|
|
|
|
(rune3, "getinfo", {}),
|
|
|
|
(rune4, "listpeers", {}),
|
|
|
|
(rune5, "listpeers", {'id': l2.info['id']}),
|
2022-07-16 15:18:27 +02:00
|
|
|
(rune5, "listpeers", {'id': l2.info['id'], 'level': 'broken'}),
|
|
|
|
(rune6, "listpeers", [l2.info['id'], 'broken']),
|
|
|
|
(rune6, "listpeers", [l2.info['id']]),
|
|
|
|
(rune7, "listpeers", []),
|
pytest: fix test_commando_rune flake.
We reset counters every minute, so ratelimit tests can flake since we
might hit that boundary.
Instead, wait for the reset then test explicitly, assuming that takes
less than 60 seconds.
```
for rune, cmd, params in failures:
print("{} {}".format(cmd, params))
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
l2.rpc.call(method='commando',
payload={'peer_id': l1.info['id'],
'rune': rune['rune'],
'method': cmd,
> 'params': params})
E Failed: DID NOT RAISE <class 'pyln.client.lightning.RpcError'>
...
DEBUG:root:Calling commando with payload {'peer_id': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'rune': 'O8Zr-ULTBKO3_pKYz0QKE9xYl1vQ4Xx9PtlHuist9Rk9NCZwbnVtPTAmcmF0ZT0zJnJhdGU9MQ==', 'method': 'getinfo', 'params': {}}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-07-26 06:52:53 +02:00
|
|
|
(rune7, "getinfo", {}),
|
|
|
|
(rune9, "getinfo", {}),
|
|
|
|
(rune8, "getinfo", {}),
|
|
|
|
(rune8, "getinfo", {}))
|
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
failures = ((rune2, "withdraw", {}),
|
|
|
|
(rune2, "plugin", {'subcommand': 'list'}),
|
|
|
|
(rune3, "getinfo", {}),
|
|
|
|
(rune4, "listnodes", {}),
|
2022-07-16 15:18:27 +02:00
|
|
|
(rune5, "listpeers", {'id': l2.info['id'], 'level': 'io'}),
|
|
|
|
(rune6, "listpeers", [l2.info['id'], 'io']),
|
|
|
|
(rune7, "listpeers", [l2.info['id']]),
|
pytest: fix test_commando_rune flake.
We reset counters every minute, so ratelimit tests can flake since we
might hit that boundary.
Instead, wait for the reset then test explicitly, assuming that takes
less than 60 seconds.
```
for rune, cmd, params in failures:
print("{} {}".format(cmd, params))
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
l2.rpc.call(method='commando',
payload={'peer_id': l1.info['id'],
'rune': rune['rune'],
'method': cmd,
> 'params': params})
E Failed: DID NOT RAISE <class 'pyln.client.lightning.RpcError'>
...
DEBUG:root:Calling commando with payload {'peer_id': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'rune': 'O8Zr-ULTBKO3_pKYz0QKE9xYl1vQ4Xx9PtlHuist9Rk9NCZwbnVtPTAmcmF0ZT0zJnJhdGU9MQ==', 'method': 'getinfo', 'params': {}}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-07-26 06:52:53 +02:00
|
|
|
(rune7, "listpeers", {'id': l2.info['id']}))
|
2022-07-16 15:18:27 +02:00
|
|
|
|
|
|
|
for rune, cmd, params in successes:
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune['rune'],
|
|
|
|
'method': cmd,
|
|
|
|
'params': params})
|
|
|
|
|
|
|
|
while time.time() < expiry:
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
for rune, cmd, params in failures:
|
|
|
|
print("{} {}".format(cmd, params))
|
|
|
|
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune['rune'],
|
|
|
|
'method': cmd,
|
|
|
|
'params': params})
|
|
|
|
assert exc_info.value.error['code'] == 0x4c51
|
|
|
|
|
pytest: fix test_commando_rune flake.
We reset counters every minute, so ratelimit tests can flake since we
might hit that boundary.
Instead, wait for the reset then test explicitly, assuming that takes
less than 60 seconds.
```
for rune, cmd, params in failures:
print("{} {}".format(cmd, params))
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
l2.rpc.call(method='commando',
payload={'peer_id': l1.info['id'],
'rune': rune['rune'],
'method': cmd,
> 'params': params})
E Failed: DID NOT RAISE <class 'pyln.client.lightning.RpcError'>
...
DEBUG:root:Calling commando with payload {'peer_id': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'rune': 'O8Zr-ULTBKO3_pKYz0QKE9xYl1vQ4Xx9PtlHuist9Rk9NCZwbnVtPTAmcmF0ZT0zJnJhdGU9MQ==', 'method': 'getinfo', 'params': {}}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-07-26 06:52:53 +02:00
|
|
|
# Now, this can flake if we cross a minute boundary! So wait until
|
|
|
|
# It succeeds again.
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune8['rune'],
|
|
|
|
'method': 'getinfo',
|
|
|
|
'params': {}})
|
|
|
|
break
|
|
|
|
except RpcError as e:
|
|
|
|
assert e.error['code'] == 0x4c51
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
# This fails immediately, since we've done one.
|
|
|
|
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune9['rune'],
|
|
|
|
'method': 'getinfo',
|
|
|
|
'params': {}})
|
|
|
|
assert exc_info.value.error['code'] == 0x4c51
|
|
|
|
|
|
|
|
# Two more succeed for rune8.
|
|
|
|
for _ in range(2):
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune8['rune'],
|
|
|
|
'method': 'getinfo',
|
|
|
|
'params': {}})
|
|
|
|
assert exc_info.value.error['code'] == 0x4c51
|
|
|
|
|
|
|
|
# Now we've had 3 in one minute, this will fail.
|
|
|
|
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
|
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune8['rune'],
|
|
|
|
'method': 'getinfo',
|
|
|
|
'params': {}})
|
|
|
|
assert exc_info.value.error['code'] == 0x4c51
|
|
|
|
|
2022-07-16 15:18:27 +02:00
|
|
|
# rune5 can only be used by l2:
|
|
|
|
l3 = node_factory.get_node()
|
|
|
|
l3.connect(l1)
|
|
|
|
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
|
|
|
|
l3.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune5['rune'],
|
|
|
|
'method': "listpeers",
|
|
|
|
'params': {}})
|
|
|
|
assert exc_info.value.error['code'] == 0x4c51
|
2022-07-16 15:18:27 +02:00
|
|
|
|
|
|
|
# Now wait for ratelimit expiry, ratelimits should reset.
|
|
|
|
time.sleep(61)
|
|
|
|
|
pytest: fix test_commando_rune flake.
We reset counters every minute, so ratelimit tests can flake since we
might hit that boundary.
Instead, wait for the reset then test explicitly, assuming that takes
less than 60 seconds.
```
for rune, cmd, params in failures:
print("{} {}".format(cmd, params))
with pytest.raises(RpcError, match='Not authorized:') as exc_info:
l2.rpc.call(method='commando',
payload={'peer_id': l1.info['id'],
'rune': rune['rune'],
'method': cmd,
> 'params': params})
E Failed: DID NOT RAISE <class 'pyln.client.lightning.RpcError'>
...
DEBUG:root:Calling commando with payload {'peer_id': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'rune': 'O8Zr-ULTBKO3_pKYz0QKE9xYl1vQ4Xx9PtlHuist9Rk9NCZwbnVtPTAmcmF0ZT0zJnJhdGU9MQ==', 'method': 'getinfo', 'params': {}}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2022-07-26 06:52:53 +02:00
|
|
|
for rune, cmd, params in ((rune9, "getinfo", {}),
|
|
|
|
(rune8, "getinfo", {}),
|
|
|
|
(rune8, "getinfo", {})):
|
2022-07-16 15:18:27 +02:00
|
|
|
l2.rpc.call(method='commando',
|
|
|
|
payload={'peer_id': l1.info['id'],
|
|
|
|
'rune': rune['rune'],
|
|
|
|
'method': cmd,
|
|
|
|
'params': params})
|
2022-07-21 07:10:10 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_commando_stress(node_factory, executor):
|
|
|
|
"""Stress test to slam commando with many large queries"""
|
2023-01-03 05:23:28 +01:00
|
|
|
nodes = node_factory.get_nodes(5)
|
2022-07-21 07:10:10 +02:00
|
|
|
|
|
|
|
rune = nodes[0].rpc.commando_rune()['rune']
|
|
|
|
for n in nodes[1:]:
|
|
|
|
n.connect(nodes[0])
|
|
|
|
|
|
|
|
futs = []
|
|
|
|
for i in range(1000):
|
|
|
|
node = random.choice(nodes[1:])
|
|
|
|
futs.append(executor.submit(node.rpc.call, method='commando',
|
|
|
|
payload={'peer_id': nodes[0].info['id'],
|
|
|
|
'rune': rune,
|
|
|
|
'method': 'invoice',
|
|
|
|
'params': {'amount_msat': 'any',
|
|
|
|
'label': 'label{}'.format(i),
|
|
|
|
'description': 'A' * 200000,
|
|
|
|
'deschashonly': True}}))
|
|
|
|
discards = 0
|
|
|
|
for f in futs:
|
|
|
|
try:
|
|
|
|
f.result(TIMEOUT)
|
|
|
|
except RpcError as e:
|
|
|
|
assert(e.error['code'] == 0x4c50)
|
|
|
|
assert(e.error['message'] == "Invalid JSON")
|
|
|
|
discards += 1
|
|
|
|
|
2022-07-26 06:12:03 +02:00
|
|
|
# Should have at least one discard msg from each failure (we can have
|
|
|
|
# more, if they kept replacing each other, as happens!)
|
|
|
|
if discards > 0:
|
|
|
|
nodes[0].daemon.wait_for_logs([r"New cmd from .*, replacing old"] * discards)
|
|
|
|
else:
|
|
|
|
assert not nodes[0].daemon.is_in_log(r"New cmd from .*, replacing old")
|
2022-07-25 03:23:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_commando_badrune(node_factory):
|
|
|
|
"""Test invalid UTF-8 encodings in rune: used to make us kill the offers plugin which implements decode, as it gave bad utf8!"""
|
2023-01-03 05:23:28 +01:00
|
|
|
l1 = node_factory.get_node()
|
2022-07-25 03:23:30 +02:00
|
|
|
l1.rpc.decode('5zi6-ugA6hC4_XZ0R7snl5IuiQX4ugL4gm9BQKYaKUU9gCZtZXRob2RebGlzdHxtZXRob2ReZ2V0fG1ldGhvZD1zdW1tYXJ5Jm1ldGhvZC9saXN0ZGF0YXN0b3Jl')
|
|
|
|
rune = l1.rpc.commando_rune(restrictions="readonly")
|
|
|
|
|
|
|
|
binrune = base64.urlsafe_b64decode(rune['rune'])
|
|
|
|
# Mangle each part, try decode. Skip most of the boring chars
|
|
|
|
# (just '|', '&', '#').
|
|
|
|
for i in range(32, len(binrune)):
|
|
|
|
for span in (range(0, 32), (124, 38, 35), range(127, 256)):
|
|
|
|
for c in span:
|
|
|
|
modrune = binrune[:i] + bytes([c]) + binrune[i + 1:]
|
|
|
|
try:
|
|
|
|
l1.rpc.decode(base64.urlsafe_b64encode(modrune).decode('utf8'))
|
|
|
|
except RpcError:
|
|
|
|
pass
|
2022-09-09 14:48:31 +02:00
|
|
|
|
|
|
|
|
2022-09-19 02:49:52 +02:00
|
|
|
def test_autoclean(node_factory):
|
2022-09-19 02:53:00 +02:00
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts={'autoclean-cycle': 10,
|
|
|
|
'may_reconnect': True},
|
|
|
|
wait_for_announce=True)
|
|
|
|
|
2022-09-22 05:41:15 +02:00
|
|
|
# Under valgrind in CI, it can 50 seconds between creating invoice
|
|
|
|
# and restarting.
|
|
|
|
if node_factory.valgrind:
|
|
|
|
short_timeout = 10
|
|
|
|
longer_timeout = 60
|
|
|
|
else:
|
|
|
|
short_timeout = 5
|
|
|
|
longer_timeout = 20
|
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.autoclean_status('expiredinvoices')['autoclean']['expiredinvoices']['enabled'] is False
|
2022-09-22 05:41:15 +02:00
|
|
|
l3.rpc.invoice(amount_msat=12300, label='inv1', description='description1', expiry=short_timeout)
|
|
|
|
l3.rpc.invoice(amount_msat=12300, label='inv2', description='description2', expiry=longer_timeout)
|
|
|
|
l3.rpc.invoice(amount_msat=12300, label='inv3', description='description3', expiry=longer_timeout)
|
2022-09-19 02:53:00 +02:00
|
|
|
inv4 = l3.rpc.invoice(amount_msat=12300, label='inv4', description='description4', expiry=2000)
|
|
|
|
inv5 = l3.rpc.invoice(amount_msat=12300, label='inv5', description='description5', expiry=2000)
|
2022-09-19 02:53:00 +02:00
|
|
|
|
|
|
|
l3.stop()
|
|
|
|
l3.daemon.opts['autoclean-expiredinvoices-age'] = 2
|
|
|
|
l3.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['enabled'] is True
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['age'] == 2
|
2022-09-19 02:49:52 +02:00
|
|
|
|
|
|
|
# Both should still be there.
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['cleaned'] == 0
|
|
|
|
assert len(l3.rpc.listinvoices('inv1')['invoices']) == 1
|
|
|
|
assert len(l3.rpc.listinvoices('inv2')['invoices']) == 1
|
|
|
|
assert l3.rpc.listinvoices('inv1')['invoices'][0]['description'] == 'description1'
|
2022-09-19 02:49:52 +02:00
|
|
|
|
|
|
|
# First it expires.
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: only_one(l3.rpc.listinvoices('inv1')['invoices'])['status'] == 'expired')
|
2022-09-19 02:49:52 +02:00
|
|
|
# Now will get autocleaned
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: l3.rpc.listinvoices('inv1')['invoices'] == [])
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['cleaned'] == 1
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:49:52 +02:00
|
|
|
# Keeps settings across restarts
|
2022-09-19 02:53:00 +02:00
|
|
|
l3.restart()
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['enabled'] is True
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['age'] == 2
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['cleaned'] == 1
|
2022-09-19 02:49:52 +02:00
|
|
|
|
|
|
|
# Disabling works
|
2022-09-19 02:53:00 +02:00
|
|
|
l3.stop()
|
|
|
|
l3.daemon.opts['autoclean-expiredinvoices-age'] = 0
|
|
|
|
l3.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['enabled'] is False
|
|
|
|
assert 'age' not in l3.rpc.autoclean_status()['autoclean']['expiredinvoices']
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:49:52 +02:00
|
|
|
# Same with inv2/3
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: only_one(l3.rpc.listinvoices('inv2')['invoices'])['status'] == 'expired')
|
2022-09-22 05:41:15 +02:00
|
|
|
wait_for(lambda: only_one(l3.rpc.listinvoices('inv3')['invoices'])['status'] == 'expired')
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-22 05:41:15 +02:00
|
|
|
# Give it time to notice (runs every 10 seconds, give it 15)
|
2022-09-19 02:49:52 +02:00
|
|
|
time.sleep(15)
|
|
|
|
|
2022-09-22 05:41:15 +02:00
|
|
|
# They're still there!
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.listinvoices('inv2')['invoices'] != []
|
2022-09-22 05:41:15 +02:00
|
|
|
assert l3.rpc.listinvoices('inv3')['invoices'] != []
|
2022-09-19 02:49:52 +02:00
|
|
|
|
|
|
|
# Restart keeps it disabled.
|
2022-09-19 02:53:00 +02:00
|
|
|
l3.restart()
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['enabled'] is False
|
|
|
|
assert 'age' not in l3.rpc.autoclean_status()['autoclean']['expiredinvoices']
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:49:52 +02:00
|
|
|
# Now enable: they will get autocleaned
|
2022-09-19 02:53:00 +02:00
|
|
|
l3.stop()
|
|
|
|
l3.daemon.opts['autoclean-expiredinvoices-age'] = 2
|
|
|
|
l3.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: len(l3.rpc.listinvoices()['invoices']) == 2)
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['cleaned'] == 3
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
# Reconnect, l1 pays invoice, we test paid expiry.
|
|
|
|
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
|
|
|
l1.rpc.pay(inv4['bolt11'])
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
# We manually delete inv5 so we can have l1 fail a payment.
|
|
|
|
l3.rpc.delinvoice('inv5', 'unpaid')
|
2022-09-19 02:49:52 +02:00
|
|
|
with pytest.raises(RpcError, match='WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
|
2022-09-19 02:53:00 +02:00
|
|
|
l1.rpc.pay(inv5['bolt11'])
|
|
|
|
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['paidinvoices']['enabled'] is False
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['paidinvoices']['cleaned'] == 0
|
2022-09-19 02:53:00 +02:00
|
|
|
l3.stop()
|
|
|
|
l3.daemon.opts['autoclean-paidinvoices-age'] = 1
|
|
|
|
l3.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['paidinvoices']['enabled'] is True
|
|
|
|
|
|
|
|
wait_for(lambda: l3.rpc.listinvoices()['invoices'] == [])
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['expiredinvoices']['cleaned'] == 3
|
|
|
|
assert l3.rpc.autoclean_status()['autoclean']['paidinvoices']['cleaned'] == 1
|
|
|
|
|
|
|
|
assert only_one(l1.rpc.listpays(inv5['bolt11'])['pays'])['status'] == 'failed'
|
|
|
|
assert only_one(l1.rpc.listpays(inv4['bolt11'])['pays'])['status'] == 'complete'
|
2022-09-19 02:53:00 +02:00
|
|
|
l1.stop()
|
|
|
|
l1.daemon.opts['autoclean-failedpays-age'] = 1
|
|
|
|
l1.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
|
|
|
|
wait_for(lambda: l1.rpc.listpays(inv5['bolt11'])['pays'] == [])
|
|
|
|
assert l1.rpc.autoclean_status()['autoclean']['failedpays']['cleaned'] == 1
|
|
|
|
assert l1.rpc.autoclean_status()['autoclean']['succeededpays']['cleaned'] == 0
|
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
l1.stop()
|
|
|
|
l1.daemon.opts['autoclean-succeededpays-age'] = 2
|
|
|
|
l1.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: l1.rpc.listpays(inv4['bolt11'])['pays'] == [])
|
|
|
|
assert l1.rpc.listsendpays() == {'payments': []}
|
|
|
|
|
|
|
|
# Now, we should have 1 failed forward, 1 success.
|
|
|
|
assert len(l2.rpc.listforwards(status='failed')['forwards']) == 1
|
|
|
|
assert len(l2.rpc.listforwards(status='settled')['forwards']) == 1
|
|
|
|
assert len(l2.rpc.listforwards()['forwards']) == 2
|
|
|
|
|
|
|
|
# Clean failed ones.
|
2022-09-19 02:53:00 +02:00
|
|
|
l2.stop()
|
|
|
|
l2.daemon.opts['autoclean-failedforwards-age'] = 2
|
|
|
|
l2.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: l2.rpc.listforwards(status='failed')['forwards'] == [])
|
|
|
|
|
|
|
|
assert len(l2.rpc.listforwards(status='settled')['forwards']) == 1
|
|
|
|
assert l2.rpc.autoclean_status()['autoclean']['failedforwards']['cleaned'] == 1
|
|
|
|
assert l2.rpc.autoclean_status()['autoclean']['succeededforwards']['cleaned'] == 0
|
|
|
|
|
2022-09-27 01:43:36 +02:00
|
|
|
amt_before = l2.rpc.getinfo()['fees_collected_msat']
|
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
# Clean succeeded ones
|
2022-09-19 02:53:00 +02:00
|
|
|
l2.stop()
|
|
|
|
l2.daemon.opts['autoclean-succeededforwards-age'] = 2
|
|
|
|
l2.start()
|
2022-09-19 02:53:00 +02:00
|
|
|
wait_for(lambda: l2.rpc.listforwards(status='settled')['forwards'] == [])
|
|
|
|
assert l2.rpc.listforwards() == {'forwards': []}
|
|
|
|
assert l2.rpc.autoclean_status()['autoclean']['failedforwards']['cleaned'] == 1
|
|
|
|
assert l2.rpc.autoclean_status()['autoclean']['succeededforwards']['cleaned'] == 1
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-27 01:43:36 +02:00
|
|
|
# We still see correct total in getinfo!
|
|
|
|
assert l2.rpc.getinfo()['fees_collected_msat'] == amt_before
|
|
|
|
|
2022-09-19 02:49:52 +02:00
|
|
|
|
2022-09-19 02:53:00 +02:00
|
|
|
def test_autoclean_once(node_factory):
|
|
|
|
l1, l2, l3 = node_factory.line_graph(3, opts={'may_reconnect': True},
|
|
|
|
wait_for_announce=True)
|
|
|
|
|
|
|
|
l3.rpc.invoice(amount_msat=12300, label='inv1', description='description1', expiry=1)
|
|
|
|
inv2 = l3.rpc.invoice(amount_msat=12300, label='inv2', description='description4')
|
|
|
|
inv3 = l3.rpc.invoice(amount_msat=12300, label='inv3', description='description5')
|
|
|
|
|
|
|
|
l1.rpc.pay(inv2['bolt11'])
|
|
|
|
l3.rpc.delinvoice('inv3', 'unpaid')
|
|
|
|
with pytest.raises(RpcError, match='WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
|
|
|
|
l1.rpc.pay(inv3['bolt11'])
|
|
|
|
|
|
|
|
# Make sure > 1 second old!
|
|
|
|
time.sleep(2)
|
|
|
|
assert (l1.rpc.autoclean_once('failedpays', 1)
|
|
|
|
== {'autoclean': {'failedpays': {'cleaned': 1, 'uncleaned': 1}}})
|
|
|
|
assert l1.rpc.autoclean_status() == {'autoclean': {'failedpays': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'succeededpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'failedforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'expiredinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'paidinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0}}}
|
|
|
|
assert (l1.rpc.autoclean_once('succeededpays', 1)
|
|
|
|
== {'autoclean': {'succeededpays': {'cleaned': 1, 'uncleaned': 0}}})
|
|
|
|
assert l1.rpc.autoclean_status() == {'autoclean': {'failedpays': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'succeededpays': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'failedforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'expiredinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'paidinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0}}}
|
|
|
|
assert (l2.rpc.autoclean_once('failedforwards', 1)
|
|
|
|
== {'autoclean': {'failedforwards': {'cleaned': 1, 'uncleaned': 1}}})
|
|
|
|
assert l2.rpc.autoclean_status() == {'autoclean': {'failedpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'failedforwards': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'succeededforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'expiredinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'paidinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0}}}
|
|
|
|
assert (l2.rpc.autoclean_once('succeededforwards', 1)
|
|
|
|
== {'autoclean': {'succeededforwards': {'cleaned': 1, 'uncleaned': 0}}})
|
|
|
|
assert l2.rpc.autoclean_status() == {'autoclean': {'failedpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'failedforwards': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'succeededforwards': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'expiredinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'paidinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0}}}
|
|
|
|
assert (l3.rpc.autoclean_once('expiredinvoices', 1)
|
|
|
|
== {'autoclean': {'expiredinvoices': {'cleaned': 1, 'uncleaned': 1}}})
|
|
|
|
assert l3.rpc.autoclean_status() == {'autoclean': {'failedpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'failedforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'expiredinvoices': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'paidinvoices': {'enabled': False,
|
|
|
|
'cleaned': 0}}}
|
|
|
|
assert (l3.rpc.autoclean_once('paidinvoices', 1)
|
|
|
|
== {'autoclean': {'paidinvoices': {'cleaned': 1, 'uncleaned': 0}}})
|
|
|
|
assert l3.rpc.autoclean_status() == {'autoclean': {'failedpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededpays': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'failedforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'succeededforwards': {'enabled': False,
|
|
|
|
'cleaned': 0},
|
|
|
|
'expiredinvoices': {'enabled': False,
|
|
|
|
'cleaned': 1},
|
|
|
|
'paidinvoices': {'enabled': False,
|
|
|
|
'cleaned': 1}}}
|
|
|
|
|
|
|
|
|
2022-09-12 22:43:54 +02:00
|
|
|
def test_block_added_notifications(node_factory, bitcoind):
|
2022-09-09 14:48:31 +02:00
|
|
|
"""Test if a plugin gets notifications when a new block is found"""
|
|
|
|
base = bitcoind.rpc.getblockchaininfo()["blocks"]
|
|
|
|
plugin = [
|
2022-09-12 22:43:54 +02:00
|
|
|
os.path.join(os.getcwd(), "tests/plugins/block_added.py"),
|
2022-09-09 14:48:31 +02:00
|
|
|
]
|
|
|
|
l1 = node_factory.get_node(options={"plugin": plugin})
|
|
|
|
ret = l1.rpc.call("blockscatched")
|
|
|
|
assert len(ret) == 1 and ret[0] == base + 0
|
|
|
|
|
|
|
|
bitcoind.generate_block(2)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
ret = l1.rpc.call("blockscatched")
|
|
|
|
assert len(ret) == 3 and ret[0] == base + 0 and ret[2] == base + 2
|
|
|
|
|
|
|
|
l2 = node_factory.get_node(options={"plugin": plugin})
|
|
|
|
ret = l2.rpc.call("blockscatched")
|
|
|
|
assert len(ret) == 1 and ret[0] == base + 2
|
|
|
|
|
|
|
|
l2.stop()
|
|
|
|
next_l2_base = bitcoind.rpc.getblockchaininfo()["blocks"]
|
|
|
|
|
|
|
|
bitcoind.generate_block(2)
|
|
|
|
sync_blockheight(bitcoind, [l1])
|
|
|
|
ret = l1.rpc.call("blockscatched")
|
|
|
|
assert len(ret) == 5 and ret[4] == base + 4
|
|
|
|
|
|
|
|
l2.start()
|
|
|
|
sync_blockheight(bitcoind, [l2])
|
|
|
|
ret = l2.rpc.call("blockscatched")
|
|
|
|
assert len(ret) == 3 and ret[1] == next_l2_base + 1 and ret[2] == next_l2_base + 2
|