mirror of
https://github.com/ElementsProject/lightning.git
synced 2024-11-19 01:43:36 +01:00
pytest: Highlight the re-entrancy issue for cln-plugin events
This was pointed out by Daywalker [1]: we are synchronously processing events from `lightningd` which means that if processing one of the hooks or requests was slow or delayed, we would not get notifications, hooks, or RPC requests, massively impacting the flexbility. This highlights the issue with a failing test (it times out), and in the next commit we will isolate event processing into their own task, so to free the event loop from having to wait for an eventual response. [1] https://community.corelightning.org/c/developers/hold-invoice-plugin#comment_wrapper_16754493
This commit is contained in:
parent
54bd024910
commit
db3707f957
@ -16,5 +16,8 @@ target/${RUST_PROFILE}/examples/cln-rpc-getinfo: $(shell find cln-rpc -name *.rs
|
||||
target/${RUST_PROFILE}/examples/cln-plugin-startup: $(shell find cln-rpc -name *.rs)
|
||||
cargo build ${CARGO_OPTS} --example cln-plugin-startup
|
||||
|
||||
target/${RUST_PROFILE}/examples/cln-plugin-reentrant: $(shell find plugins/examples -name *.rs)
|
||||
cargo build ${CARGO_OPTS} --example cln-plugin-reentrant
|
||||
|
||||
|
||||
cln-rpc-all: ${CLN_RPC_GEN_ALL} ${CLN_RPC_EXAMPLES}
|
||||
|
@ -1,5 +1,5 @@
|
||||
PLUGIN_PAY_SRC := plugins/pay.c
|
||||
PLUGIN_PAY_HEADER :=
|
||||
PLUGIN_PAY_HEADER :=
|
||||
PLUGIN_PAY_OBJS := $(PLUGIN_PAY_SRC:.c=.o)
|
||||
|
||||
PLUGIN_AUTOCLEAN_SRC := plugins/autoclean.c
|
||||
@ -40,7 +40,7 @@ PLUGIN_FETCHINVOICE_OBJS := $(PLUGIN_FETCHINVOICE_SRC:.c=.o)
|
||||
PLUGIN_FETCHINVOICE_HEADER :=
|
||||
|
||||
PLUGIN_SQL_SRC := plugins/sql.c
|
||||
PLUGIN_SQL_HEADER :=
|
||||
PLUGIN_SQL_HEADER :=
|
||||
PLUGIN_SQL_OBJS := $(PLUGIN_SQL_SRC:.c=.o)
|
||||
|
||||
PLUGIN_SPENDER_SRC := \
|
||||
@ -188,7 +188,7 @@ plugins/autoclean: $(PLUGIN_AUTOCLEAN_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_O
|
||||
|
||||
plugins/chanbackup: $(PLUGIN_chanbackup_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_OBJS) $(JSMN_OBJS)
|
||||
|
||||
plugins/commando: $(PLUGIN_COMMANDO_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_OBJS) $(JSMN_OBJS)
|
||||
plugins/commando: $(PLUGIN_COMMANDO_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_OBJS) $(JSMN_OBJS)
|
||||
|
||||
# Topology wants to decode node_announcement, and peer_wiregen which
|
||||
# pulls in some of bitcoin/.
|
||||
@ -232,6 +232,7 @@ plugins/list_of_builtin_plugins_gen.h: plugins/Makefile Makefile config.vars
|
||||
|
||||
CLN_PLUGIN_EXAMPLES := \
|
||||
target/${RUST_PROFILE}/examples/cln-plugin-startup \
|
||||
target/${RUST_PROFILE}/examples/cln-plugin-reentrant \
|
||||
target/${RUST_PROFILE}/examples/cln-rpc-getinfo
|
||||
|
||||
CLN_PLUGIN_SRC = $(shell find plugins/src -name "*.rs")
|
||||
|
44
plugins/examples/cln-plugin-reentrant.rs
Normal file
44
plugins/examples/cln-plugin-reentrant.rs
Normal file
@ -0,0 +1,44 @@
|
||||
use anyhow::Error;
|
||||
use cln_plugin::{Builder, Plugin};
|
||||
use serde_json::json;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct State {
|
||||
tx: broadcast::Sender<()>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Error> {
|
||||
let (tx, _) = broadcast::channel(4);
|
||||
let state = State { tx };
|
||||
|
||||
if let Some(plugin) = Builder::new(tokio::io::stdin(), tokio::io::stdout())
|
||||
.hook("htlc_accepted", htlc_accepted_handler)
|
||||
.rpcmethod("release", "Release all HTLCs we currently hold", release)
|
||||
.start(state)
|
||||
.await?
|
||||
{
|
||||
plugin.join().await?;
|
||||
Ok(())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Release all waiting HTLCs
|
||||
async fn release(p: Plugin<State>, _v: serde_json::Value) -> Result<serde_json::Value, Error> {
|
||||
p.state().tx.send(()).unwrap();
|
||||
Ok(json!("Released!"))
|
||||
}
|
||||
|
||||
async fn htlc_accepted_handler(
|
||||
p: Plugin<State>,
|
||||
v: serde_json::Value,
|
||||
) -> Result<serde_json::Value, Error> {
|
||||
log::info!("Holding on to incoming HTLC {:?}", v);
|
||||
// Wait for `release` to be called.
|
||||
p.state().tx.subscribe().recv().await.unwrap();
|
||||
|
||||
Ok(json!({"result": "continue"}))
|
||||
}
|
@ -249,6 +249,46 @@ def test_grpc_wrong_auth(node_factory):
|
||||
stub.Getinfo(nodepb.GetinfoRequest())
|
||||
|
||||
|
||||
@pytest.mark.xfail(
|
||||
reason="Times out because we can't call the RPC method while currently holding on to HTLCs",
|
||||
strict=True,
|
||||
)
|
||||
def test_cln_plugin_reentrant(node_factory, executor):
|
||||
"""Ensure that we continue processing events while already handling.
|
||||
|
||||
We should be continuing to handle incoming events even though a
|
||||
prior event has not completed. This is important for things like
|
||||
the `htlc_accepted` hook which needs to hold on to multiple
|
||||
incoming HTLCs.
|
||||
|
||||
Scenario: l1 uses an `htlc_accepted` to hold on to incoming HTLCs,
|
||||
and we release them using an RPC method.
|
||||
|
||||
"""
|
||||
bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-plugin-reentrant"
|
||||
l1 = node_factory.get_node(options={"plugin": str(bin_path)})
|
||||
l2 = node_factory.get_node()
|
||||
l2.connect(l1)
|
||||
l2.fundchannel(l1)
|
||||
|
||||
# Now create two invoices, and pay them both. Neither should
|
||||
# succeed, but we should queue them on the plugin.
|
||||
i1 = l1.rpc.invoice(label='lbl1', msatoshi='42sat', description='desc')['bolt11']
|
||||
i2 = l1.rpc.invoice(label='lbl2', msatoshi='31337sat', description='desc')['bolt11']
|
||||
|
||||
f1 = executor.submit(l2.rpc.pay, i1)
|
||||
f2 = executor.submit(l2.rpc.pay, i2)
|
||||
|
||||
import time
|
||||
time.sleep(3)
|
||||
|
||||
print("Releasing HTLCs after holding them")
|
||||
l1.rpc.call('release')
|
||||
|
||||
assert f1.result()
|
||||
assert f2.result()
|
||||
|
||||
|
||||
def test_grpc_keysend_routehint(bitcoind, node_factory):
|
||||
"""The routehints are a bit special, test that conversions work.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user