2022-01-14 18:56:00 +01:00
|
|
|
from fixtures import * # noqa: F401,F403
|
|
|
|
from pathlib import Path
|
2023-06-20 16:06:13 +02:00
|
|
|
from pyln import grpc as clnpb
|
2024-05-16 14:25:10 -05:00
|
|
|
from pyln.testing.utils import env, TEST_NETWORK, wait_for, sync_blockheight, TIMEOUT
|
2023-10-02 14:24:58 +10:30
|
|
|
from utils import first_scid
|
2022-01-18 10:44:20 +01:00
|
|
|
import grpc
|
2022-01-14 18:56:00 +01:00
|
|
|
import pytest
|
2022-01-18 10:44:20 +01:00
|
|
|
import subprocess
|
2022-12-20 10:11:59 +01:00
|
|
|
import os
|
2023-10-10 10:59:48 +02:00
|
|
|
import re
|
2022-01-14 18:56:00 +01:00
|
|
|
|
2022-02-09 17:10:09 +01:00
|
|
|
# Skip the entire module if we don't have Rust.
|
|
|
|
pytestmark = pytest.mark.skipif(
|
|
|
|
env('RUST') != '1',
|
|
|
|
reason='RUST is not enabled skipping rust-dependent tests'
|
|
|
|
)
|
2022-01-14 18:56:00 +01:00
|
|
|
|
2022-12-20 10:11:59 +01:00
|
|
|
RUST_PROFILE = os.environ.get("RUST_PROFILE", "debug")
|
|
|
|
|
2022-01-14 18:56:00 +01:00
|
|
|
|
2022-03-29 11:16:33 +10:30
|
|
|
def wait_for_grpc_start(node):
|
|
|
|
"""This can happen before "public key" which start() swallows"""
|
2024-08-12 23:31:41 +02:00
|
|
|
wait_for(lambda: node.daemon.is_in_log(r'serving grpc'))
|
2022-03-29 11:16:33 +10:30
|
|
|
|
|
|
|
|
2022-01-14 18:56:00 +01:00
|
|
|
def test_rpc_client(node_factory):
|
|
|
|
l1 = node_factory.get_node()
|
2022-12-20 10:11:59 +01:00
|
|
|
bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-rpc-getinfo"
|
2022-01-14 18:56:00 +01:00
|
|
|
rpc_path = Path(l1.daemon.lightning_dir) / TEST_NETWORK / "lightning-rpc"
|
|
|
|
out = subprocess.check_output([bin_path, rpc_path], stderr=subprocess.STDOUT)
|
|
|
|
assert(b'0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518' in out)
|
2022-01-24 13:19:45 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_plugin_start(node_factory):
|
2022-02-07 11:00:08 +01:00
|
|
|
"""Start a minimal plugin and ensure it is well-behaved
|
|
|
|
"""
|
2022-12-20 10:11:59 +01:00
|
|
|
bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-plugin-startup"
|
2024-08-12 23:31:41 +02:00
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[
|
|
|
|
{"plugin": str(bin_path), 'test-option': 31337}, {}
|
|
|
|
])
|
2022-02-07 11:00:08 +01:00
|
|
|
|
2022-01-18 10:44:20 +01:00
|
|
|
# The plugin should be in the list of active plugins
|
|
|
|
plugins = l1.rpc.plugin('list')['plugins']
|
|
|
|
assert len([p for p in plugins if 'cln-plugin-startup' in p['name'] and p['active']]) == 1
|
|
|
|
|
2023-06-02 12:06:04 +09:30
|
|
|
assert str(bin_path) in l1.rpc.listconfigs()['configs']['plugin']['values_str']
|
2022-02-21 18:31:29 +01:00
|
|
|
|
|
|
|
# Now check that the `testmethod was registered ok
|
2023-09-14 17:34:13 -06:00
|
|
|
assert l1.rpc.help("testmethod") == {
|
2022-02-21 18:31:29 +01:00
|
|
|
'help': [
|
|
|
|
{
|
2024-07-23 15:13:07 -07:00
|
|
|
'command': 'testmethod '
|
2022-02-21 18:31:29 +01:00
|
|
|
}
|
|
|
|
],
|
|
|
|
'format-hint': 'simple'
|
|
|
|
}
|
|
|
|
|
|
|
|
assert l1.rpc.testmethod() == "Hello"
|
2023-09-14 17:34:13 -06:00
|
|
|
assert l1.rpc.test_custom_notification() == "Notification sent"
|
|
|
|
l1.daemon.wait_for_log(r'Received a test_custom_notification')
|
2022-02-23 19:00:25 +01:00
|
|
|
|
|
|
|
l1.connect(l2)
|
2022-02-25 14:36:39 +01:00
|
|
|
l1.daemon.wait_for_log(r'Got a connect hook call')
|
2022-02-23 19:00:25 +01:00
|
|
|
l1.daemon.wait_for_log(r'Got a connect notification')
|
2022-01-20 15:09:21 +01:00
|
|
|
|
2024-05-07 18:52:26 +02:00
|
|
|
l1.rpc.setconfig("test-dynamic-option", True)
|
|
|
|
assert l1.rpc.listconfigs("test-dynamic-option")["configs"]["test-dynamic-option"]["value_bool"]
|
|
|
|
wait_for(lambda: l1.daemon.is_in_log(r'cln-plugin-startup: Got dynamic option change: test-dynamic-option \\"true\\"'))
|
|
|
|
l1.rpc.setconfig("test-dynamic-option", False)
|
|
|
|
assert not l1.rpc.listconfigs("test-dynamic-option")["configs"]["test-dynamic-option"]["value_bool"]
|
|
|
|
wait_for(lambda: l1.daemon.is_in_log(r'cln-plugin-startup: Got dynamic option change: test-dynamic-option \\"false\\"'))
|
|
|
|
|
2022-01-20 15:09:21 +01:00
|
|
|
|
2024-02-06 10:52:50 +01:00
|
|
|
def test_plugin_options_handle_defaults(node_factory):
|
2022-10-26 13:10:19 +02:00
|
|
|
"""Start a minimal plugin and ensure it is well-behaved
|
|
|
|
"""
|
2022-12-20 10:11:59 +01:00
|
|
|
bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-plugin-startup"
|
2024-08-09 16:09:48 +02:00
|
|
|
l1 = node_factory.get_node(
|
|
|
|
options={
|
|
|
|
"plugin": str(bin_path),
|
|
|
|
"opt-option": 31337,
|
|
|
|
"test-option": 31338,
|
|
|
|
"multi-str-option": ["String1", "String2"],
|
|
|
|
"multi-str-option-default": ["NotDefault1", "NotDefault2"],
|
|
|
|
"multi-i64-option": [1, 2, 3, 4],
|
|
|
|
"multi-i64-option-default": [5, 6],
|
|
|
|
}
|
|
|
|
)
|
2022-10-26 13:10:19 +02:00
|
|
|
opts = l1.rpc.testoptions()
|
2024-02-06 10:52:50 +01:00
|
|
|
assert opts["opt-option"] == 31337
|
|
|
|
assert opts["test-option"] == 31338
|
2024-08-09 16:09:48 +02:00
|
|
|
assert opts["multi-str-option"] == ["String1", "String2"]
|
|
|
|
assert opts["multi-str-option-default"] == ["NotDefault1", "NotDefault2"]
|
|
|
|
assert opts["multi-i64-option"] == [1, 2, 3, 4]
|
|
|
|
assert opts["multi-i64-option-default"] == [5, 6]
|
2022-10-26 13:10:19 +02:00
|
|
|
|
|
|
|
# Do not set any value, should be None now
|
|
|
|
l1 = node_factory.get_node(options={"plugin": str(bin_path)})
|
|
|
|
opts = l1.rpc.testoptions()
|
2024-02-06 10:52:50 +01:00
|
|
|
assert opts["opt-option"] is None, "opt-option has no default"
|
|
|
|
assert opts["test-option"] == 42, "test-option has a default of 42"
|
2024-08-09 16:09:48 +02:00
|
|
|
assert opts["multi-str-option"] is None
|
|
|
|
assert opts["multi-str-option-default"] == ["Default1"]
|
|
|
|
assert opts["multi-i64-option"] is None
|
|
|
|
assert opts["multi-i64-option-default"] == [-42]
|
2022-10-26 13:10:19 +02:00
|
|
|
|
|
|
|
|
2022-01-20 15:09:21 +01:00
|
|
|
def test_grpc_connect(node_factory):
|
|
|
|
"""Attempts to connect to the grpc interface and call getinfo"""
|
2022-04-01 14:42:45 +10:30
|
|
|
# These only exist if we have rust!
|
2024-08-12 23:31:41 +02:00
|
|
|
l1 = node_factory.get_node()
|
2022-01-20 15:09:21 +01:00
|
|
|
|
|
|
|
p = Path(l1.daemon.lightning_dir) / TEST_NETWORK
|
|
|
|
cert_path = p / "client.pem"
|
|
|
|
key_path = p / "client-key.pem"
|
|
|
|
ca_cert_path = p / "ca.pem"
|
|
|
|
creds = grpc.ssl_channel_credentials(
|
|
|
|
root_certificates=ca_cert_path.open('rb').read(),
|
|
|
|
private_key=key_path.open('rb').read(),
|
|
|
|
certificate_chain=cert_path.open('rb').read()
|
|
|
|
)
|
|
|
|
|
2022-03-29 11:16:33 +10:30
|
|
|
wait_for_grpc_start(l1)
|
2022-01-20 15:09:21 +01:00
|
|
|
channel = grpc.secure_channel(
|
2024-08-12 23:31:41 +02:00
|
|
|
f"localhost:{l1.grpc_port}",
|
2022-01-20 15:09:21 +01:00
|
|
|
creds,
|
|
|
|
options=(('grpc.ssl_target_name_override', 'cln'),)
|
|
|
|
)
|
2023-06-20 16:06:13 +02:00
|
|
|
stub = clnpb.NodeStub(channel)
|
2022-01-20 15:09:21 +01:00
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
response = stub.Getinfo(clnpb.GetinfoRequest())
|
2022-01-20 15:09:21 +01:00
|
|
|
print(response)
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
response = stub.ListFunds(clnpb.ListfundsRequest())
|
2022-01-20 15:09:21 +01:00
|
|
|
print(response)
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
inv = stub.Invoice(clnpb.InvoiceRequest(
|
|
|
|
amount_msat=clnpb.AmountOrAny(any=True),
|
2022-04-01 14:42:45 +10:30
|
|
|
description="hello",
|
|
|
|
label="lbl1",
|
|
|
|
preimage=b"\x00" * 32,
|
|
|
|
cltv=24
|
|
|
|
))
|
|
|
|
print(inv)
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
rates = stub.Feerates(clnpb.FeeratesRequest(style='PERKB'))
|
2022-04-01 14:43:35 +10:30
|
|
|
print(rates)
|
|
|
|
|
2022-04-01 14:43:34 +10:30
|
|
|
# Test a failing RPC call, so we know that errors are returned correctly.
|
|
|
|
with pytest.raises(Exception, match=r'Duplicate label'):
|
|
|
|
# This request creates a label collision
|
2023-06-20 16:06:13 +02:00
|
|
|
stub.Invoice(clnpb.InvoiceRequest(
|
|
|
|
amount_msat=clnpb.AmountOrAny(amount=clnpb.Amount(msat=12345)),
|
2022-04-01 14:43:34 +10:30
|
|
|
description="hello",
|
|
|
|
label="lbl1",
|
|
|
|
))
|
|
|
|
|
2022-01-20 15:09:21 +01:00
|
|
|
|
|
|
|
def test_grpc_generate_certificate(node_factory):
|
|
|
|
"""Test whether we correctly generate the certificates.
|
|
|
|
|
|
|
|
- If we have no certs, we need to generate them all
|
|
|
|
- If we have certs, we they should just get loaded
|
|
|
|
- If we delete one cert or its key it should get regenerated.
|
|
|
|
"""
|
2024-08-12 23:31:41 +02:00
|
|
|
l1 = node_factory.get_node(start=False)
|
2022-01-20 15:09:21 +01:00
|
|
|
|
|
|
|
p = Path(l1.daemon.lightning_dir) / TEST_NETWORK
|
2022-01-20 15:21:07 +01:00
|
|
|
files = [p / f for f in [
|
|
|
|
'ca.pem',
|
|
|
|
'ca-key.pem',
|
|
|
|
'client.pem',
|
|
|
|
'client-key.pem',
|
|
|
|
'server-key.pem',
|
|
|
|
'server.pem',
|
|
|
|
]]
|
2022-01-20 15:09:21 +01:00
|
|
|
|
|
|
|
# Before starting no files exist.
|
|
|
|
assert [f.exists() for f in files] == [False] * len(files)
|
|
|
|
|
|
|
|
l1.start()
|
|
|
|
assert [f.exists() for f in files] == [True] * len(files)
|
|
|
|
|
|
|
|
# The files exist, restarting should not change them
|
|
|
|
contents = [f.open().read() for f in files]
|
|
|
|
l1.restart()
|
|
|
|
assert contents == [f.open().read() for f in files]
|
|
|
|
|
|
|
|
# Now we delete the last file, we should regenerate it as well as its key
|
|
|
|
files[-1].unlink()
|
|
|
|
l1.restart()
|
|
|
|
assert contents[-2] != files[-2].open().read()
|
|
|
|
assert contents[-1] != files[-1].open().read()
|
2022-01-20 15:21:07 +01:00
|
|
|
|
2023-03-07 14:07:17 +01:00
|
|
|
keys = [f for f in files if f.name.endswith('-key.pem')]
|
|
|
|
modes = [f.stat().st_mode for f in keys]
|
|
|
|
private = [m % 8 == 0 and (m // 8) % 8 == 0 for m in modes]
|
|
|
|
assert all(private)
|
|
|
|
|
2022-01-20 15:21:07 +01:00
|
|
|
|
2024-08-12 23:31:41 +02:00
|
|
|
def test_grpc_default_port_auto_starts(node_factory):
|
|
|
|
"""Ensure that we start cln-grpc on default port. Also check that certificates are generated."""
|
|
|
|
l1 = node_factory.get_node(unused_grpc_port=False)
|
2022-03-23 11:50:36 +01:00
|
|
|
|
2024-08-12 23:31:41 +02:00
|
|
|
grpcplugin = next((p for p in l1.rpc.plugin('list')['plugins'] if 'cln-grpc' in p['name'] and p['active']), None)
|
|
|
|
# Check that the plugin is active
|
|
|
|
assert grpcplugin is not None
|
|
|
|
# Check that the plugin is listening on the default port
|
2025-02-12 16:32:43 +01:00
|
|
|
assert l1.daemon.is_in_log(r'serving grpc on 127.0.0.1:9736')
|
2024-08-12 23:31:41 +02:00
|
|
|
# Check that the certificates are generated
|
|
|
|
assert len([f for f in os.listdir(Path(l1.daemon.lightning_dir) / TEST_NETWORK) if re.match(r".*\.pem$", f)]) >= 6
|
|
|
|
|
|
|
|
# Check server connection
|
|
|
|
l1.grpc.Getinfo(clnpb.GetinfoRequest())
|
2022-03-23 11:50:36 +01:00
|
|
|
|
|
|
|
|
2022-01-20 15:21:07 +01:00
|
|
|
def test_grpc_wrong_auth(node_factory):
|
|
|
|
"""An mTLS client certificate should only be usable with its node
|
|
|
|
|
|
|
|
We create two instances, each generates its own certs and keys,
|
|
|
|
and then we try to cross the wires.
|
|
|
|
"""
|
2022-04-01 14:42:45 +10:30
|
|
|
# These only exist if we have rust!
|
2024-08-12 23:31:41 +02:00
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[{"start": False}, {"start": False}])
|
2022-01-20 15:21:07 +01:00
|
|
|
l1.start()
|
2022-03-29 11:16:33 +10:30
|
|
|
wait_for_grpc_start(l1)
|
2022-01-20 15:21:07 +01:00
|
|
|
|
|
|
|
def connect(node):
|
|
|
|
p = Path(node.daemon.lightning_dir) / TEST_NETWORK
|
|
|
|
cert, key, ca = [f.open('rb').read() for f in [
|
|
|
|
p / 'client.pem',
|
|
|
|
p / 'client-key.pem',
|
|
|
|
p / "ca.pem"]]
|
|
|
|
|
|
|
|
creds = grpc.ssl_channel_credentials(
|
|
|
|
root_certificates=ca,
|
|
|
|
private_key=key,
|
|
|
|
certificate_chain=cert,
|
|
|
|
)
|
|
|
|
|
|
|
|
channel = grpc.secure_channel(
|
2024-08-12 23:31:41 +02:00
|
|
|
f"localhost:{node.grpc_port}",
|
2022-01-20 15:21:07 +01:00
|
|
|
creds,
|
|
|
|
options=(('grpc.ssl_target_name_override', 'cln'),)
|
|
|
|
)
|
2023-06-20 16:06:13 +02:00
|
|
|
return clnpb.NodeStub(channel)
|
2022-01-20 15:21:07 +01:00
|
|
|
|
|
|
|
stub = connect(l1)
|
|
|
|
# This should work, it's the correct node
|
2023-06-20 16:06:13 +02:00
|
|
|
stub.Getinfo(clnpb.GetinfoRequest())
|
2022-01-20 15:21:07 +01:00
|
|
|
|
|
|
|
l1.stop()
|
|
|
|
l2.start()
|
2022-03-29 11:16:33 +10:30
|
|
|
wait_for_grpc_start(l2)
|
2022-01-20 15:21:07 +01:00
|
|
|
|
|
|
|
# This should not work, it's a different node
|
|
|
|
with pytest.raises(Exception, match=r'Socket closed|StatusCode.UNAVAILABLE'):
|
2023-06-20 16:06:13 +02:00
|
|
|
stub.Getinfo(clnpb.GetinfoRequest())
|
2022-01-20 15:21:07 +01:00
|
|
|
|
|
|
|
# Now load the correct ones and we should be good to go
|
|
|
|
stub = connect(l2)
|
2023-06-20 16:06:13 +02:00
|
|
|
stub.Getinfo(clnpb.GetinfoRequest())
|
2023-04-06 18:25:44 +02:00
|
|
|
|
|
|
|
|
2023-04-11 06:57:45 +09:30
|
|
|
def test_cln_plugin_reentrant(node_factory, executor):
|
|
|
|
"""Ensure that we continue processing events while already handling.
|
|
|
|
|
|
|
|
We should be continuing to handle incoming events even though a
|
|
|
|
prior event has not completed. This is important for things like
|
|
|
|
the `htlc_accepted` hook which needs to hold on to multiple
|
|
|
|
incoming HTLCs.
|
|
|
|
|
|
|
|
Scenario: l1 uses an `htlc_accepted` to hold on to incoming HTLCs,
|
|
|
|
and we release them using an RPC method.
|
|
|
|
|
|
|
|
"""
|
|
|
|
bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-plugin-reentrant"
|
2024-08-12 23:31:41 +02:00
|
|
|
l1, l2 = node_factory.get_nodes(2, opts=[{"plugin": str(bin_path)}, {}])
|
2023-04-11 06:57:45 +09:30
|
|
|
l2.connect(l1)
|
|
|
|
l2.fundchannel(l1)
|
|
|
|
|
|
|
|
# Now create two invoices, and pay them both. Neither should
|
|
|
|
# succeed, but we should queue them on the plugin.
|
2024-01-25 10:58:48 +10:30
|
|
|
i1 = l1.rpc.invoice(label='lbl1', amount_msat='42sat', description='desc')['bolt11']
|
|
|
|
i2 = l1.rpc.invoice(label='lbl2', amount_msat='31337sat', description='desc')['bolt11']
|
2023-04-11 06:57:45 +09:30
|
|
|
|
|
|
|
f1 = executor.submit(l2.rpc.pay, i1)
|
|
|
|
f2 = executor.submit(l2.rpc.pay, i2)
|
|
|
|
|
2023-05-18 15:44:01 +09:30
|
|
|
l1.daemon.wait_for_logs(["plugin-cln-plugin-reentrant: Holding on to incoming HTLC Object"] * 2)
|
2023-04-11 06:57:45 +09:30
|
|
|
|
|
|
|
print("Releasing HTLCs after holding them")
|
|
|
|
l1.rpc.call('release')
|
|
|
|
|
2023-05-18 15:44:01 +09:30
|
|
|
assert f1.result(timeout=TIMEOUT)
|
|
|
|
assert f2.result(timeout=TIMEOUT)
|
2023-04-11 06:57:45 +09:30
|
|
|
|
|
|
|
|
2023-04-06 18:25:44 +02:00
|
|
|
def test_grpc_keysend_routehint(bitcoind, node_factory):
|
|
|
|
"""The routehints are a bit special, test that conversions work.
|
|
|
|
|
|
|
|
3 node line graph, with l1 as the keysend sender and l3 the
|
|
|
|
recipient.
|
|
|
|
|
|
|
|
"""
|
|
|
|
l1, l2, l3 = node_factory.line_graph(
|
|
|
|
3,
|
|
|
|
announce_channels=True, # Do not enforce scid-alias
|
|
|
|
)
|
|
|
|
bitcoind.generate_block(3)
|
|
|
|
sync_blockheight(bitcoind, [l1, l2, l3])
|
|
|
|
|
|
|
|
chan = l2.rpc.listpeerchannels(l3.info['id'])
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
routehint = clnpb.RoutehintList(hints=[
|
|
|
|
clnpb.Routehint(hops=[
|
|
|
|
clnpb.RouteHop(
|
2023-04-06 18:25:44 +02:00
|
|
|
id=bytes.fromhex(l2.info['id']),
|
2024-04-22 19:23:59 +02:00
|
|
|
scid=chan['channels'][0]['short_channel_id'],
|
2023-04-06 18:25:44 +02:00
|
|
|
# Fees are defaults from CLN
|
2023-06-20 16:06:13 +02:00
|
|
|
feebase=clnpb.Amount(msat=1),
|
2023-04-06 18:25:44 +02:00
|
|
|
feeprop=10,
|
|
|
|
expirydelta=18,
|
|
|
|
)
|
|
|
|
])
|
|
|
|
])
|
|
|
|
|
2023-10-02 14:24:58 +10:30
|
|
|
# FIXME: keysend needs (unannounced) channel in gossip_store
|
2023-12-13 16:06:19 +10:30
|
|
|
l1.wait_local_channel_active(first_scid(l1, l2))
|
2023-10-02 14:24:58 +10:30
|
|
|
|
2023-04-06 18:25:44 +02:00
|
|
|
# And now we send a keysend with that routehint list
|
2023-06-20 16:06:13 +02:00
|
|
|
call = clnpb.KeysendRequest(
|
2023-04-06 18:25:44 +02:00
|
|
|
destination=bytes.fromhex(l3.info['id']),
|
2023-06-20 16:06:13 +02:00
|
|
|
amount_msat=clnpb.Amount(msat=42),
|
2023-04-06 18:25:44 +02:00
|
|
|
routehints=routehint,
|
|
|
|
)
|
|
|
|
|
2024-08-12 23:31:41 +02:00
|
|
|
res = l1.grpc.KeySend(call)
|
2023-04-06 18:25:44 +02:00
|
|
|
print(res)
|
2023-05-02 13:03:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_grpc_listpeerchannels(bitcoind, node_factory):
|
|
|
|
""" Check that conversions of this rather complex type work.
|
|
|
|
"""
|
|
|
|
l1, l2 = node_factory.line_graph(
|
|
|
|
2,
|
|
|
|
announce_channels=True, # Do not enforce scid-alias
|
|
|
|
)
|
|
|
|
|
2023-05-05 16:31:16 +02:00
|
|
|
stub = l1.grpc
|
2023-06-20 16:06:13 +02:00
|
|
|
res = stub.ListPeerChannels(clnpb.ListpeerchannelsRequest(id=None))
|
2023-05-02 13:03:30 +02:00
|
|
|
|
|
|
|
# Way too many fields to check, so just do a couple
|
|
|
|
assert len(res.channels) == 1
|
|
|
|
c = res.channels[0]
|
|
|
|
assert c.peer_id.hex() == l2.info['id']
|
|
|
|
assert c.state == 2 # CHANNELD_NORMAL
|
2023-05-02 17:15:08 +02:00
|
|
|
|
|
|
|
# And since we're at it let's close the channel as well so we can
|
|
|
|
# see it in listclosedchanenls
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
res = stub.Close(clnpb.CloseRequest(id=l2.info['id']))
|
2023-05-02 17:15:08 +02:00
|
|
|
|
|
|
|
bitcoind.generate_block(100, wait_for_mempool=1)
|
|
|
|
l1.daemon.wait_for_log(r'onchaind complete, forgetting peer')
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
stub.ListClosedChannels(clnpb.ListclosedchannelsRequest())
|
2023-05-05 16:31:16 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_grpc_decode(node_factory):
|
2024-08-12 23:31:41 +02:00
|
|
|
l1 = node_factory.get_node()
|
2023-06-20 16:06:13 +02:00
|
|
|
inv = l1.grpc.Invoice(clnpb.InvoiceRequest(
|
|
|
|
amount_msat=clnpb.AmountOrAny(any=True),
|
2023-05-05 16:31:16 +02:00
|
|
|
description="desc",
|
|
|
|
label="label",
|
|
|
|
))
|
|
|
|
|
2023-06-20 16:06:13 +02:00
|
|
|
res = l1.grpc.Decode(clnpb.DecodeRequest(
|
2023-05-05 16:31:16 +02:00
|
|
|
string=inv.bolt11
|
|
|
|
))
|
|
|
|
print(res)
|
2024-02-23 10:13:53 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_rust_plugin_subscribe_wildcard(node_factory):
|
|
|
|
""" Creates a plugin that loads the subscribe_wildcard plugin
|
|
|
|
"""
|
|
|
|
bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-subscribe-wildcard"
|
|
|
|
l1 = node_factory.get_node(options={"plugin": bin_path})
|
|
|
|
l2 = node_factory.get_node()
|
|
|
|
|
|
|
|
l2.connect(l1)
|
|
|
|
|
|
|
|
l1.daemon.wait_for_log("Received notification connect")
|
2024-02-16 12:13:21 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_grpc_block_added_notifications(node_factory, bitcoind):
|
2024-08-12 23:31:41 +02:00
|
|
|
l1 = node_factory.get_node()
|
2024-02-16 12:13:21 +01:00
|
|
|
|
|
|
|
# Test the block_added notification
|
|
|
|
# Start listening to block added events over grpc
|
|
|
|
block_added_stream = l1.grpc.SubscribeBlockAdded(clnpb.StreamBlockAddedRequest())
|
|
|
|
bitcoind.generate_block(10)
|
|
|
|
for block_added_event in block_added_stream:
|
|
|
|
assert block_added_event.hash is not None
|
|
|
|
assert block_added_event.height is not None
|
|
|
|
|
|
|
|
# If we don't break out of the loop we'll
|
|
|
|
# be waiting for ever
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
def test_grpc_connect_notification(node_factory):
|
2024-08-12 23:31:41 +02:00
|
|
|
l1, l2 = node_factory.get_nodes(2)
|
2024-02-16 12:13:21 +01:00
|
|
|
|
|
|
|
# Test the connect notification
|
|
|
|
connect_stream = l1.grpc.SubscribeConnect(clnpb.StreamConnectRequest())
|
pytest: fix flake in test_grpc_connect_notification
Looking at the logs (and comparing a successful run), it seems the connect happens before
the connect_stream is ready, so we miss it:
```
________________________ test_grpc_connect_notification ________________________
[gw7] linux -- Python 3.8.18 /home/runner/.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/bin/python
node_factory = <pyln.testing.utils.NodeFactory object at 0x7fb08bb969d0>
def test_grpc_connect_notification(node_factory):
l1, l2 = node_factory.get_nodes(2)
# Test the connect notification
connect_stream = l1.grpc.SubscribeConnect(clnpb.StreamConnectRequest())
l2.connect(l1)
> for connect_event in connect_stream:
tests/test_cln_rs.py:425:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../../.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/lib/python3.8/site-packages/grpc/_channel.py:543: in __next__
return self._next()
../../../.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/lib/python3.8/site-packages/grpc/_channel.py:960: in _next
_common.wait(self._state.condition.wait, _response_ready)
../../../.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/lib/python3.8/site-packages/grpc/_common.py:156: in wait
_wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb)
../../../.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/lib/python3.8/site-packages/grpc/_common.py:116: in _wait_once
wait_fn(timeout=timeout)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <Condition(<unlocked _thread.RLock object owner=0 count=0 at 0x7fb089730f00>, 0)>
timeout = 0.1
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
gotit = False
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
> gotit = waiter.acquire(True, timeout)
E Failed: Timeout >1200.0s
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2024-11-16 10:37:54 +10:30
|
|
|
|
|
|
|
# FIXME: The above does not seem to be synchronous, causing a flake. Wait
|
|
|
|
# until it does something (and this seems to be something!)
|
|
|
|
l1.daemon.wait_for_log('plugin-cln-grpc: received settings ACK')
|
2024-02-16 12:13:21 +01:00
|
|
|
l2.connect(l1)
|
|
|
|
|
|
|
|
for connect_event in connect_stream:
|
|
|
|
assert connect_event.id.hex() == l2.info["id"]
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
def test_grpc_custommsg_notification(node_factory):
|
2024-08-12 23:31:41 +02:00
|
|
|
l1, l2 = node_factory.get_nodes(2)
|
2024-02-16 12:13:21 +01:00
|
|
|
|
|
|
|
# Test the connect notification
|
|
|
|
custommsg_stream = l1.grpc.SubscribeCustomMsg(clnpb.StreamCustomMsgRequest())
|
|
|
|
l2.connect(l1)
|
|
|
|
|
|
|
|
# Send the custom-msg to node l1
|
|
|
|
# The payload doesn't matter.
|
|
|
|
# It just needs to be valid hex which encodes to an odd BOLT-8 msg id
|
|
|
|
l2.rpc.sendcustommsg(l1.info["id"], "3131313174657374")
|
|
|
|
|
|
|
|
for custommsg in custommsg_stream:
|
|
|
|
assert custommsg.peer_id.hex() == l2.info["id"]
|
|
|
|
assert custommsg.payload.hex() == "3131313174657374"
|
|
|
|
assert custommsg.payload == b"1111test"
|
2024-02-21 12:36:58 +01:00
|
|
|
break
|