2021-03-17 14:18:37 -04:00
|
|
|
//! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
|
|
|
|
//! running properly, and (2) either can or should be run in the background. See docs for
|
|
|
|
//! [`BackgroundProcessor`] for more details on the nitty-gritty.
|
|
|
|
|
2022-08-07 13:49:10 -04:00
|
|
|
// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
|
2021-03-17 14:05:09 -04:00
|
|
|
#![deny(broken_intra_doc_links)]
|
2022-08-07 13:49:10 -04:00
|
|
|
#![deny(private_intra_doc_links)]
|
|
|
|
|
2021-03-17 14:18:37 -04:00
|
|
|
#![deny(missing_docs)]
|
2023-03-30 22:11:22 +00:00
|
|
|
#![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
|
2021-03-17 14:05:09 -04:00
|
|
|
|
2022-02-11 22:22:20 -06:00
|
|
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
|
|
|
|
|
|
|
|
#[cfg(any(test, feature = "std"))]
|
|
|
|
extern crate core;
|
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
#[cfg(not(feature = "std"))]
|
|
|
|
extern crate alloc;
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[macro_use] extern crate lightning;
|
2022-06-01 15:26:07 -07:00
|
|
|
extern crate lightning_rapid_gossip_sync;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
use lightning::chain;
|
|
|
|
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
|
2021-10-07 23:46:13 +00:00
|
|
|
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
|
2023-04-28 14:11:37 -05:00
|
|
|
use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
|
2023-03-07 13:57:01 -08:00
|
|
|
use lightning::events::{Event, PathFailure};
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use lightning::events::{EventHandler, EventsProvider};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::ln::channelmanager::ChannelManager;
|
2023-04-29 18:45:59 +00:00
|
|
|
use lightning::ln::peer_handler::APeerManager;
|
2022-06-02 12:53:34 -07:00
|
|
|
use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
|
2023-01-21 03:28:35 +00:00
|
|
|
use lightning::routing::utxo::UtxoLookup;
|
2022-10-28 11:31:24 -04:00
|
|
|
use lightning::routing::router::Router;
|
2023-08-22 18:57:06 +03:00
|
|
|
use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::logger::Logger;
|
2022-04-11 13:50:31 -04:00
|
|
|
use lightning::util::persist::Persister;
|
2023-03-09 03:11:13 +00:00
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use lightning::util::wakers::Sleeper;
|
2022-06-01 15:26:07 -07:00
|
|
|
use lightning_rapid_gossip_sync::RapidGossipSync;
|
2023-01-17 00:16:48 +00:00
|
|
|
|
|
|
|
use core::ops::Deref;
|
|
|
|
use core::time::Duration;
|
|
|
|
|
|
|
|
#[cfg(feature = "std")]
|
2021-01-11 18:03:32 -05:00
|
|
|
use std::sync::Arc;
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use core::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use std::thread::{self, JoinHandle};
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use std::time::Instant;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
#[cfg(not(feature = "std"))]
|
|
|
|
use alloc::vec::Vec;
|
2022-08-09 06:01:10 +00:00
|
|
|
|
2021-08-19 11:21:42 -05:00
|
|
|
/// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
|
2021-01-11 18:03:32 -05:00
|
|
|
/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
|
|
|
|
/// responsibilities are:
|
2021-08-19 11:21:42 -05:00
|
|
|
/// * Processing [`Event`]s with a user-provided [`EventHandler`].
|
|
|
|
/// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
|
2021-01-11 18:03:32 -05:00
|
|
|
/// writing it to disk/backups by invoking the callback given to it at startup.
|
2021-08-19 11:21:42 -05:00
|
|
|
/// [`ChannelManager`] persistence should be done in the background.
|
2023-04-17 11:52:15 -07:00
|
|
|
/// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
|
|
|
|
/// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
|
2022-08-26 12:54:16 +02:00
|
|
|
/// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
|
|
|
|
/// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
|
|
|
|
/// upon as doing so may result in high latency.
|
|
|
|
///
|
|
|
|
/// # Note
|
|
|
|
///
|
|
|
|
/// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
|
|
|
|
/// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
|
|
|
|
/// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
|
|
|
|
/// unilateral chain closure fees are at risk.
|
|
|
|
///
|
|
|
|
/// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
|
2023-03-07 13:57:01 -08:00
|
|
|
/// [`Event`]: lightning::events::Event
|
2023-04-29 18:45:59 +00:00
|
|
|
/// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
|
|
|
|
/// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
2021-08-01 02:42:42 +00:00
|
|
|
#[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
|
2021-01-11 18:03:32 -05:00
|
|
|
pub struct BackgroundProcessor {
|
|
|
|
stop_thread: Arc<AtomicBool>,
|
2021-07-19 12:50:56 -05:00
|
|
|
thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(test))]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 60;
|
2021-01-11 18:03:32 -05:00
|
|
|
#[cfg(test)]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 1;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(all(not(test), not(debug_assertions)))]
|
2022-01-20 23:42:56 +00:00
|
|
|
const PING_TIMER: u64 = 10;
|
2021-08-20 15:48:40 +01:00
|
|
|
/// Signature operations take a lot longer without compiler optimisations.
|
|
|
|
/// Increasing the ping timer allows for this but slower devices will be disconnected if the
|
|
|
|
/// timeout is reached.
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(all(not(test), debug_assertions))]
|
2021-08-20 15:48:40 +01:00
|
|
|
const PING_TIMER: u64 = 30;
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(test)]
|
|
|
|
const PING_TIMER: u64 = 1;
|
2021-08-05 17:04:18 +00:00
|
|
|
|
2021-12-15 18:59:15 +00:00
|
|
|
/// Prune the network graph of stale entries hourly.
|
|
|
|
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
|
|
|
|
|
2022-06-09 22:58:26 -07:00
|
|
|
#[cfg(not(test))]
|
2023-04-24 16:37:05 -05:00
|
|
|
const SCORER_PERSIST_TIMER: u64 = 60 * 60;
|
2022-06-01 16:25:30 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
const SCORER_PERSIST_TIMER: u64 = 1;
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
#[cfg(not(test))]
|
|
|
|
const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
|
|
|
|
#[cfg(test)]
|
|
|
|
const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
|
|
|
|
|
2023-04-17 11:52:15 -07:00
|
|
|
#[cfg(not(test))]
|
|
|
|
const REBROADCAST_TIMER: u64 = 30;
|
|
|
|
#[cfg(test)]
|
|
|
|
const REBROADCAST_TIMER: u64 = 1;
|
|
|
|
|
2023-03-14 21:23:21 +00:00
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
/// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
|
|
|
|
const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
|
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
|
2023-04-17 11:52:15 -07:00
|
|
|
min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
|
2023-03-14 21:23:21 +00:00
|
|
|
|
2022-06-02 14:48:32 -07:00
|
|
|
/// Either [`P2PGossipSync`] or [`RapidGossipSync`].
|
|
|
|
pub enum GossipSync<
|
2023-01-21 03:28:35 +00:00
|
|
|
P: Deref<Target = P2PGossipSync<G, U, L>>,
|
2022-06-02 14:48:32 -07:00
|
|
|
R: Deref<Target = RapidGossipSync<G, L>>,
|
|
|
|
G: Deref<Target = NetworkGraph<L>>,
|
2023-01-21 03:28:35 +00:00
|
|
|
U: Deref,
|
2022-06-02 14:48:32 -07:00
|
|
|
L: Deref,
|
|
|
|
>
|
2023-01-21 03:28:35 +00:00
|
|
|
where U::Target: UtxoLookup, L::Target: Logger {
|
2022-06-02 14:48:32 -07:00
|
|
|
/// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
|
|
|
|
P2P(P),
|
|
|
|
/// Rapid gossip sync from a trusted server.
|
|
|
|
Rapid(R),
|
|
|
|
/// No gossip sync.
|
|
|
|
None,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<
|
2023-01-21 03:28:35 +00:00
|
|
|
P: Deref<Target = P2PGossipSync<G, U, L>>,
|
2022-06-02 14:48:32 -07:00
|
|
|
R: Deref<Target = RapidGossipSync<G, L>>,
|
|
|
|
G: Deref<Target = NetworkGraph<L>>,
|
2023-01-21 03:28:35 +00:00
|
|
|
U: Deref,
|
2022-06-02 14:48:32 -07:00
|
|
|
L: Deref,
|
2023-01-21 03:28:35 +00:00
|
|
|
> GossipSync<P, R, G, U, L>
|
|
|
|
where U::Target: UtxoLookup, L::Target: Logger {
|
2022-06-02 14:48:32 -07:00
|
|
|
fn network_graph(&self) -> Option<&G> {
|
|
|
|
match self {
|
|
|
|
GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
|
|
|
|
GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
|
|
|
|
GossipSync::None => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn prunable_network_graph(&self) -> Option<&G> {
|
|
|
|
match self {
|
|
|
|
GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
|
|
|
|
GossipSync::Rapid(gossip_sync) => {
|
|
|
|
if gossip_sync.is_initial_sync_complete() {
|
|
|
|
Some(gossip_sync.network_graph())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
},
|
|
|
|
GossipSync::None => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-13 19:38:31 -04:00
|
|
|
|
2023-03-16 20:35:52 +03:00
|
|
|
/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
|
2023-01-21 03:28:35 +00:00
|
|
|
impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
|
|
|
|
GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
|
2022-07-14 12:32:30 -07:00
|
|
|
where
|
2023-01-21 03:28:35 +00:00
|
|
|
U::Target: UtxoLookup,
|
2022-07-14 12:32:30 -07:00
|
|
|
L::Target: Logger,
|
|
|
|
{
|
|
|
|
/// Initializes a new [`GossipSync::P2P`] variant.
|
|
|
|
pub fn p2p(gossip_sync: P) -> Self {
|
|
|
|
GossipSync::P2P(gossip_sync)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-16 20:35:52 +03:00
|
|
|
/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
|
2022-07-14 12:32:30 -07:00
|
|
|
impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
|
|
|
|
GossipSync<
|
2023-01-21 03:28:35 +00:00
|
|
|
&P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
|
2022-07-14 12:32:30 -07:00
|
|
|
R,
|
|
|
|
G,
|
2023-01-21 03:28:35 +00:00
|
|
|
&'a (dyn UtxoLookup + Send + Sync),
|
2022-07-14 12:32:30 -07:00
|
|
|
L,
|
|
|
|
>
|
|
|
|
where
|
|
|
|
L::Target: Logger,
|
|
|
|
{
|
|
|
|
/// Initializes a new [`GossipSync::Rapid`] variant.
|
|
|
|
pub fn rapid(gossip_sync: R) -> Self {
|
|
|
|
GossipSync::Rapid(gossip_sync)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-16 20:35:52 +03:00
|
|
|
/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
|
2022-07-14 12:32:30 -07:00
|
|
|
impl<'a, L: Deref>
|
|
|
|
GossipSync<
|
2023-01-21 03:28:35 +00:00
|
|
|
&P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
|
2022-07-14 12:32:30 -07:00
|
|
|
&RapidGossipSync<&'a NetworkGraph<L>, L>,
|
|
|
|
&'a NetworkGraph<L>,
|
2023-01-21 03:28:35 +00:00
|
|
|
&'a (dyn UtxoLookup + Send + Sync),
|
2022-07-14 12:32:30 -07:00
|
|
|
L,
|
|
|
|
>
|
|
|
|
where
|
|
|
|
L::Target: Logger,
|
|
|
|
{
|
|
|
|
/// Initializes a new [`GossipSync::None`] variant.
|
|
|
|
pub fn none() -> Self {
|
|
|
|
GossipSync::None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-31 16:07:41 -07:00
|
|
|
fn handle_network_graph_update<L: Deref>(
|
|
|
|
network_graph: &NetworkGraph<L>, event: &Event
|
|
|
|
) where L::Target: Logger {
|
2023-02-13 17:55:42 -05:00
|
|
|
if let Event::PaymentPathFailed {
|
|
|
|
failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
|
|
|
|
{
|
|
|
|
network_graph.handle_network_update(upd);
|
2022-10-31 16:07:41 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-24 16:37:05 -05:00
|
|
|
/// Updates scorer based on event and returns whether an update occurred so we can decide whether
|
|
|
|
/// to persist.
|
2023-02-03 11:25:20 -05:00
|
|
|
fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
|
|
|
|
scorer: &'a S, event: &Event
|
2023-04-24 16:37:05 -05:00
|
|
|
) -> bool {
|
2023-02-03 11:25:20 -05:00
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
|
2023-08-22 18:57:06 +03:00
|
|
|
let mut score = scorer.write_lock();
|
2023-04-09 13:50:44 -04:00
|
|
|
score.payment_path_failed(path, *scid);
|
2023-02-03 11:25:20 -05:00
|
|
|
},
|
|
|
|
Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
|
|
|
|
// Reached if the destination explicitly failed it back. We treat this as a successful probe
|
|
|
|
// because the payment made it all the way to the destination with sufficient liquidity.
|
2023-08-22 18:57:06 +03:00
|
|
|
let mut score = scorer.write_lock();
|
2023-04-09 13:50:44 -04:00
|
|
|
score.probe_successful(path);
|
2023-02-03 11:25:20 -05:00
|
|
|
},
|
|
|
|
Event::PaymentPathSuccessful { path, .. } => {
|
2023-08-22 18:57:06 +03:00
|
|
|
let mut score = scorer.write_lock();
|
2023-04-09 13:50:44 -04:00
|
|
|
score.payment_path_successful(path);
|
2023-02-03 11:25:20 -05:00
|
|
|
},
|
|
|
|
Event::ProbeSuccessful { path, .. } => {
|
2023-08-22 18:57:06 +03:00
|
|
|
let mut score = scorer.write_lock();
|
2023-04-09 13:50:44 -04:00
|
|
|
score.probe_successful(path);
|
2023-02-03 11:25:20 -05:00
|
|
|
},
|
|
|
|
Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
|
2023-08-22 18:57:06 +03:00
|
|
|
let mut score = scorer.write_lock();
|
2023-04-09 13:50:44 -04:00
|
|
|
score.probe_failed(path, *scid);
|
2023-02-03 11:25:20 -05:00
|
|
|
},
|
2023-04-24 16:37:05 -05:00
|
|
|
_ => return false,
|
2023-02-03 11:25:20 -05:00
|
|
|
}
|
2023-04-24 16:37:05 -05:00
|
|
|
true
|
2023-02-03 11:25:20 -05:00
|
|
|
}
|
|
|
|
|
2022-08-09 06:01:10 +00:00
|
|
|
macro_rules! define_run_body {
|
2022-10-20 15:51:37 -07:00
|
|
|
($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
|
|
|
|
$channel_manager: ident, $process_channel_manager_events: expr,
|
2022-08-09 06:01:10 +00:00
|
|
|
$gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
|
2023-03-14 21:23:21 +00:00
|
|
|
$loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
|
|
|
|
$check_slow_await: expr)
|
2022-08-09 06:01:10 +00:00
|
|
|
=> { {
|
|
|
|
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
|
|
|
|
$channel_manager.timer_tick_occurred();
|
2023-04-17 11:52:15 -07:00
|
|
|
log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
|
|
|
|
$chain_monitor.rebroadcast_pending_claims();
|
2022-08-09 06:01:10 +00:00
|
|
|
|
2023-01-16 23:47:11 +00:00
|
|
|
let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
|
|
|
|
let mut last_ping_call = $get_timer(PING_TIMER);
|
|
|
|
let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
|
|
|
|
let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
|
2023-04-17 11:52:15 -07:00
|
|
|
let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
let mut have_pruned = false;
|
|
|
|
|
|
|
|
loop {
|
2022-10-20 15:51:37 -07:00
|
|
|
$process_channel_manager_events;
|
|
|
|
$process_chain_monitor_events;
|
2022-08-09 06:01:10 +00:00
|
|
|
|
|
|
|
// Note that the PeerManager::process_events may block on ChannelManager's locks,
|
|
|
|
// hence it comes last here. When the ChannelManager finishes whatever it's doing,
|
|
|
|
// we want to ensure we get into `persist_manager` as quickly as we can, especially
|
|
|
|
// without running the normal event processing above and handing events to users.
|
|
|
|
//
|
|
|
|
// Specifically, on an *extremely* slow machine, we may see ChannelManager start
|
|
|
|
// processing a message effectively at any point during this loop. In order to
|
|
|
|
// minimize the time between such processing completing and persisting the updated
|
|
|
|
// ChannelManager, we want to minimize methods blocking on a ChannelManager
|
|
|
|
// generally, and as a fallback place such blocking only immediately before
|
|
|
|
// persistence.
|
2023-04-29 18:45:59 +00:00
|
|
|
$peer_manager.as_ref().process_events();
|
2022-08-09 06:01:10 +00:00
|
|
|
|
Check for `background-processor` exit condition before+after sleep
In a synchronous `BackgroundProcessor`, the exit is done by setting
an atomic flag, which is most likely to happen while we're asleep.
Thus, we previously checked for the exit condition after the sleep
(and after we persisted the `ChannelManager`, if required, though
this is no longer required and dates back to when we didn't do a
re-persist after breaking out of the main loop).
For an async `background-processor`, this is also fine, however
because of the relatively longer sleep time, if the exit flag is
set via a sleep check returning true during event processing, we
may end up delaying exit rather substantially.
In order to avoid this, we simply check for the exit condition both
before and immediately after the sleep in `background-processor`.
2023-04-24 03:48:42 +00:00
|
|
|
// Exit the loop if the background processor was requested to stop.
|
|
|
|
if $loop_exit_check {
|
|
|
|
log_trace!($logger, "Terminating background processor.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-08-09 06:01:10 +00:00
|
|
|
// We wait up to 100ms, but track how long it takes to detect being put to sleep,
|
|
|
|
// see `await_start`'s use below.
|
2023-03-14 21:23:21 +00:00
|
|
|
let mut await_start = None;
|
|
|
|
if $check_slow_await { await_start = Some($get_timer(1)); }
|
2023-09-08 20:26:29 +00:00
|
|
|
$await;
|
2023-03-14 21:23:21 +00:00
|
|
|
let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
|
2022-08-09 06:01:10 +00:00
|
|
|
|
|
|
|
// Exit the loop if the background processor was requested to stop.
|
|
|
|
if $loop_exit_check {
|
|
|
|
log_trace!($logger, "Terminating background processor.");
|
|
|
|
break;
|
|
|
|
}
|
Check for `background-processor` exit condition before+after sleep
In a synchronous `BackgroundProcessor`, the exit is done by setting
an atomic flag, which is most likely to happen while we're asleep.
Thus, we previously checked for the exit condition after the sleep
(and after we persisted the `ChannelManager`, if required, though
this is no longer required and dates back to when we didn't do a
re-persist after breaking out of the main loop).
For an async `background-processor`, this is also fine, however
because of the relatively longer sleep time, if the exit flag is
set via a sleep check returning true during event processing, we
may end up delaying exit rather substantially.
In order to avoid this, we simply check for the exit condition both
before and immediately after the sleep in `background-processor`.
2023-04-24 03:48:42 +00:00
|
|
|
|
2023-09-08 20:26:29 +00:00
|
|
|
if $channel_manager.get_and_clear_needs_persistence() {
|
Check for `background-processor` exit condition before+after sleep
In a synchronous `BackgroundProcessor`, the exit is done by setting
an atomic flag, which is most likely to happen while we're asleep.
Thus, we previously checked for the exit condition after the sleep
(and after we persisted the `ChannelManager`, if required, though
this is no longer required and dates back to when we didn't do a
re-persist after breaking out of the main loop).
For an async `background-processor`, this is also fine, however
because of the relatively longer sleep time, if the exit flag is
set via a sleep check returning true during event processing, we
may end up delaying exit rather substantially.
In order to avoid this, we simply check for the exit condition both
before and immediately after the sleep in `background-processor`.
2023-04-24 03:48:42 +00:00
|
|
|
log_trace!($logger, "Persisting ChannelManager...");
|
|
|
|
$persister.persist_manager(&*$channel_manager)?;
|
|
|
|
log_trace!($logger, "Done persisting ChannelManager.");
|
|
|
|
}
|
2023-01-16 23:47:11 +00:00
|
|
|
if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
|
2022-08-09 06:01:10 +00:00
|
|
|
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
|
|
|
|
$channel_manager.timer_tick_occurred();
|
2023-01-16 23:47:11 +00:00
|
|
|
last_freshness_call = $get_timer(FRESHNESS_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
2023-01-16 23:47:11 +00:00
|
|
|
if await_slow {
|
2022-08-09 06:01:10 +00:00
|
|
|
// On various platforms, we may be starved of CPU cycles for several reasons.
|
|
|
|
// E.g. on iOS, if we've been in the background, we will be entirely paused.
|
|
|
|
// Similarly, if we're on a desktop platform and the device has been asleep, we
|
|
|
|
// may not get any cycles.
|
|
|
|
// We detect this by checking if our max-100ms-sleep, above, ran longer than a
|
|
|
|
// full second, at which point we assume sockets may have been killed (they
|
|
|
|
// appear to be at least on some platforms, even if it has only been a second).
|
|
|
|
// Note that we have to take care to not get here just because user event
|
|
|
|
// processing was slow at the top of the loop. For example, the sample client
|
|
|
|
// may call Bitcoin Core RPCs during event handling, which very often takes
|
|
|
|
// more than a handful of seconds to complete, and shouldn't disconnect all our
|
|
|
|
// peers.
|
|
|
|
log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
|
2023-04-29 18:45:59 +00:00
|
|
|
$peer_manager.as_ref().disconnect_all_peers();
|
2023-01-16 23:47:11 +00:00
|
|
|
last_ping_call = $get_timer(PING_TIMER);
|
|
|
|
} else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
|
2022-08-09 06:01:10 +00:00
|
|
|
log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
|
2023-04-29 18:45:59 +00:00
|
|
|
$peer_manager.as_ref().timer_tick_occurred();
|
2023-01-16 23:47:11 +00:00
|
|
|
last_ping_call = $get_timer(PING_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note that we want to run a graph prune once not long after startup before
|
|
|
|
// falling back to our usual hourly prunes. This avoids short-lived clients never
|
|
|
|
// pruning their network graph. We run once 60 seconds after startup before
|
2023-05-15 18:51:05 -05:00
|
|
|
// continuing our normal cadence. For RGS, since 60 seconds is likely too long,
|
|
|
|
// we prune after an initial sync completes.
|
2023-04-20 15:37:11 +02:00
|
|
|
let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
|
2023-05-15 18:51:05 -05:00
|
|
|
let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
|
|
|
|
let should_prune = match $gossip_sync {
|
|
|
|
GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
|
|
|
|
_ => prune_timer_elapsed,
|
|
|
|
};
|
|
|
|
if should_prune {
|
2022-08-09 06:01:10 +00:00
|
|
|
// The network graph must not be pruned while rapid sync completion is pending
|
|
|
|
if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")] {
|
|
|
|
log_trace!($logger, "Pruning and persisting network graph.");
|
|
|
|
network_graph.remove_stale_channels_and_tracking();
|
|
|
|
}
|
|
|
|
#[cfg(not(feature = "std"))] {
|
|
|
|
log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
|
|
|
|
log_trace!($logger, "Persisting network graph.");
|
|
|
|
}
|
2022-08-09 06:01:10 +00:00
|
|
|
|
|
|
|
if let Err(e) = $persister.persist_graph(network_graph) {
|
|
|
|
log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
have_pruned = true;
|
|
|
|
}
|
2023-04-20 15:37:11 +02:00
|
|
|
let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
|
|
|
|
last_prune_call = $get_timer(prune_timer);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
|
2023-01-16 23:47:11 +00:00
|
|
|
if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
|
2022-08-09 06:01:10 +00:00
|
|
|
if let Some(ref scorer) = $scorer {
|
|
|
|
log_trace!($logger, "Persisting scorer");
|
|
|
|
if let Err(e) = $persister.persist_scorer(&scorer) {
|
|
|
|
log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
|
|
|
|
}
|
|
|
|
}
|
2023-01-16 23:47:11 +00:00
|
|
|
last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
2023-04-17 11:52:15 -07:00
|
|
|
|
|
|
|
if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
|
|
|
|
log_trace!($logger, "Rebroadcasting monitor's pending claims");
|
|
|
|
$chain_monitor.rebroadcast_pending_claims();
|
|
|
|
last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
|
|
|
|
}
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// After we exit, ensure we persist the ChannelManager one final time - this avoids
|
|
|
|
// some races where users quit while channel updates were in-flight, with
|
|
|
|
// ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
|
|
|
|
$persister.persist_manager(&*$channel_manager)?;
|
|
|
|
|
|
|
|
// Persist Scorer on exit
|
|
|
|
if let Some(ref scorer) = $scorer {
|
|
|
|
$persister.persist_scorer(&scorer)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist NetworkGraph on exit
|
|
|
|
if let Some(network_graph) = $gossip_sync.network_graph() {
|
|
|
|
$persister.persist_graph(network_graph)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
2023-03-30 21:52:03 +00:00
|
|
|
#[cfg(feature = "futures")]
|
2023-03-30 22:11:22 +00:00
|
|
|
pub(crate) mod futures_util {
|
|
|
|
use core::future::Future;
|
|
|
|
use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
|
|
|
|
use core::pin::Pin;
|
|
|
|
use core::marker::Unpin;
|
2023-03-09 03:11:13 +00:00
|
|
|
pub(crate) struct Selector<
|
|
|
|
A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
|
|
|
|
> {
|
2023-03-30 22:11:22 +00:00
|
|
|
pub a: A,
|
|
|
|
pub b: B,
|
2023-03-09 03:11:13 +00:00
|
|
|
pub c: C,
|
2023-03-30 22:11:22 +00:00
|
|
|
}
|
|
|
|
pub(crate) enum SelectorOutput {
|
2023-03-09 03:11:13 +00:00
|
|
|
A, B, C(bool),
|
2023-03-30 22:11:22 +00:00
|
|
|
}
|
2023-03-30 21:52:03 +00:00
|
|
|
|
2023-03-09 03:11:13 +00:00
|
|
|
impl<
|
|
|
|
A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
|
|
|
|
> Future for Selector<A, B, C> {
|
2023-03-30 22:11:22 +00:00
|
|
|
type Output = SelectorOutput;
|
|
|
|
fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
|
|
|
|
match Pin::new(&mut self.a).poll(ctx) {
|
|
|
|
Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
|
|
|
|
Poll::Pending => {},
|
|
|
|
}
|
|
|
|
match Pin::new(&mut self.b).poll(ctx) {
|
2023-03-09 03:11:13 +00:00
|
|
|
Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
|
|
|
|
Poll::Pending => {},
|
|
|
|
}
|
|
|
|
match Pin::new(&mut self.c).poll(ctx) {
|
|
|
|
Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
|
2023-03-30 22:11:22 +00:00
|
|
|
Poll::Pending => {},
|
|
|
|
}
|
|
|
|
Poll::Pending
|
2023-03-30 21:52:03 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-30 22:11:22 +00:00
|
|
|
|
|
|
|
// If we want to poll a future without an async context to figure out if it has completed or
|
|
|
|
// not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
|
|
|
|
// but sadly there's a good bit of boilerplate here.
|
|
|
|
fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
|
|
|
|
fn dummy_waker_action(_: *const ()) { }
|
|
|
|
|
|
|
|
const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
|
|
|
|
dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
|
|
|
|
pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
|
2023-03-30 21:52:03 +00:00
|
|
|
}
|
2023-03-30 22:11:22 +00:00
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
use futures_util::{Selector, SelectorOutput, dummy_waker};
|
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
use core::task;
|
2023-03-30 21:52:03 +00:00
|
|
|
|
2022-08-09 06:01:10 +00:00
|
|
|
/// Processes background events in a future.
|
|
|
|
///
|
|
|
|
/// `sleeper` should return a future which completes in the given amount of time and returns a
|
2022-11-09 17:35:26 +00:00
|
|
|
/// boolean indicating whether the background processing should exit. Once `sleeper` returns a
|
2023-02-02 14:55:58 -06:00
|
|
|
/// future which outputs `true`, the loop will exit and this function's future will complete.
|
|
|
|
/// The `sleeper` future is free to return early after it has triggered the exit condition.
|
2022-08-09 06:01:10 +00:00
|
|
|
///
|
|
|
|
/// See [`BackgroundProcessor::start`] for information on which actions this handles.
|
2023-01-17 00:16:48 +00:00
|
|
|
///
|
|
|
|
/// Requires the `futures` feature. Note that while this method is available without the `std`
|
|
|
|
/// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
|
|
|
|
/// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
|
|
|
|
/// manually instead.
|
2023-03-14 21:23:21 +00:00
|
|
|
///
|
|
|
|
/// The `mobile_interruptable_platform` flag should be set if we're currently running on a
|
|
|
|
/// mobile device, where we may need to check for interruption of the application regularly. If you
|
|
|
|
/// are unsure, you should set the flag, as the performance impact of it is minimal unless there
|
|
|
|
/// are hundreds or thousands of simultaneous process calls running.
|
2023-02-02 14:55:58 -06:00
|
|
|
///
|
|
|
|
/// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
|
|
|
|
/// could setup `process_events_async` like this:
|
|
|
|
/// ```
|
2023-08-01 13:37:46 +02:00
|
|
|
/// # use lightning::io;
|
2023-09-30 17:40:42 +00:00
|
|
|
/// # use std::sync::{Arc, RwLock};
|
2023-08-01 13:37:46 +02:00
|
|
|
/// # use std::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
/// # use lightning_background_processor::{process_events_async, GossipSync};
|
|
|
|
/// # struct MyStore {}
|
|
|
|
/// # impl lightning::util::persist::KVStore for MyStore {
|
2023-09-28 17:28:04 +00:00
|
|
|
/// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
|
|
|
|
/// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
|
|
|
|
/// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
|
|
|
|
/// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
|
2023-02-02 14:55:58 -06:00
|
|
|
/// # }
|
|
|
|
/// # struct MyEventHandler {}
|
|
|
|
/// # impl MyEventHandler {
|
|
|
|
/// # async fn handle_event(&self, _: lightning::events::Event) {}
|
|
|
|
/// # }
|
|
|
|
/// # #[derive(Eq, PartialEq, Clone, Hash)]
|
|
|
|
/// # struct MySocketDescriptor {}
|
|
|
|
/// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
|
|
|
|
/// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
|
|
|
|
/// # fn disconnect_socket(&mut self) {}
|
|
|
|
/// # }
|
|
|
|
/// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
|
|
|
|
/// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
|
2023-04-28 14:11:37 -05:00
|
|
|
/// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
|
2023-02-02 14:55:58 -06:00
|
|
|
/// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
|
|
|
|
/// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
|
|
|
|
/// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
|
2023-08-01 13:37:46 +02:00
|
|
|
/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
|
2023-09-30 17:32:28 +00:00
|
|
|
/// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
|
2023-02-02 14:55:58 -06:00
|
|
|
/// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
|
|
|
|
/// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
|
|
|
|
/// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
|
2023-09-30 17:40:42 +00:00
|
|
|
/// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
|
2023-02-02 14:55:58 -06:00
|
|
|
///
|
2023-08-01 13:37:46 +02:00
|
|
|
/// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
|
2023-02-02 14:55:58 -06:00
|
|
|
/// let background_persister = Arc::clone(&my_persister);
|
|
|
|
/// let background_event_handler = Arc::clone(&my_event_handler);
|
|
|
|
/// let background_chain_mon = Arc::clone(&my_chain_monitor);
|
|
|
|
/// let background_chan_man = Arc::clone(&my_channel_manager);
|
|
|
|
/// let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
|
|
|
|
/// let background_peer_man = Arc::clone(&my_peer_manager);
|
|
|
|
/// let background_logger = Arc::clone(&my_logger);
|
|
|
|
/// let background_scorer = Arc::clone(&my_scorer);
|
|
|
|
///
|
|
|
|
/// // Setup the sleeper.
|
|
|
|
/// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
|
|
|
|
///
|
|
|
|
/// let sleeper = move |d| {
|
|
|
|
/// let mut receiver = stop_receiver.clone();
|
|
|
|
/// Box::pin(async move {
|
|
|
|
/// tokio::select!{
|
|
|
|
/// _ = tokio::time::sleep(d) => false,
|
|
|
|
/// _ = receiver.changed() => true,
|
|
|
|
/// }
|
|
|
|
/// })
|
|
|
|
/// };
|
|
|
|
///
|
|
|
|
/// let mobile_interruptable_platform = false;
|
|
|
|
///
|
|
|
|
/// let handle = tokio::spawn(async move {
|
|
|
|
/// process_events_async(
|
|
|
|
/// background_persister,
|
|
|
|
/// |e| background_event_handler.handle_event(e),
|
|
|
|
/// background_chain_mon,
|
|
|
|
/// background_chan_man,
|
|
|
|
/// background_gossip_sync,
|
|
|
|
/// background_peer_man,
|
|
|
|
/// background_logger,
|
|
|
|
/// Some(background_scorer),
|
|
|
|
/// sleeper,
|
|
|
|
/// mobile_interruptable_platform,
|
|
|
|
/// )
|
|
|
|
/// .await
|
|
|
|
/// .expect("Failed to process events");
|
|
|
|
/// });
|
|
|
|
///
|
|
|
|
/// // Stop the background processing.
|
|
|
|
/// stop_sender.send(()).unwrap();
|
|
|
|
/// handle.await.unwrap();
|
|
|
|
/// # }
|
|
|
|
///```
|
2022-08-09 06:01:10 +00:00
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
pub async fn process_events_async<
|
|
|
|
'a,
|
2023-01-21 03:28:35 +00:00
|
|
|
UL: 'static + Deref + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
CF: 'static + Deref + Send + Sync,
|
|
|
|
CW: 'static + Deref + Send + Sync,
|
|
|
|
T: 'static + Deref + Send + Sync,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES: 'static + Deref + Send + Sync,
|
|
|
|
NS: 'static + Deref + Send + Sync,
|
|
|
|
SP: 'static + Deref + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
F: 'static + Deref + Send + Sync,
|
2022-10-28 11:31:24 -04:00
|
|
|
R: 'static + Deref + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
|
|
|
|
L: 'static + Deref + Send + Sync,
|
|
|
|
P: 'static + Deref + Send + Sync,
|
2022-10-20 15:51:37 -07:00
|
|
|
EventHandlerFuture: core::future::Future<Output = ()>,
|
|
|
|
EventHandler: Fn(Event) -> EventHandlerFuture,
|
2022-08-09 06:01:10 +00:00
|
|
|
PS: 'static + Deref + Send,
|
2022-12-20 14:46:08 -08:00
|
|
|
M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
|
|
|
|
CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
|
2023-01-21 03:28:35 +00:00
|
|
|
PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
|
2023-04-29 18:45:59 +00:00
|
|
|
APM: APeerManager + Send + Sync,
|
|
|
|
PM: 'static + Deref<Target = APM> + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
S: 'static + Deref<Target = SC> + Send + Sync,
|
2023-02-03 11:25:20 -05:00
|
|
|
SC: for<'b> WriteableScore<'b>,
|
2023-01-17 00:03:43 +00:00
|
|
|
SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
|
2022-08-09 06:01:10 +00:00
|
|
|
Sleeper: Fn(Duration) -> SleepFuture
|
|
|
|
>(
|
2022-10-20 15:51:37 -07:00
|
|
|
persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
|
2023-01-21 03:28:35 +00:00
|
|
|
gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
|
2023-03-14 21:23:21 +00:00
|
|
|
sleeper: Sleeper, mobile_interruptable_platform: bool,
|
2023-02-24 02:56:11 +00:00
|
|
|
) -> Result<(), lightning::io::Error>
|
2022-08-09 06:01:10 +00:00
|
|
|
where
|
2023-01-21 03:28:35 +00:00
|
|
|
UL::Target: 'static + UtxoLookup,
|
2022-08-09 06:01:10 +00:00
|
|
|
CF::Target: 'static + chain::Filter,
|
2022-12-20 14:46:08 -08:00
|
|
|
CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
|
2022-08-09 06:01:10 +00:00
|
|
|
T::Target: 'static + BroadcasterInterface,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES::Target: 'static + EntropySource,
|
|
|
|
NS::Target: 'static + NodeSigner,
|
|
|
|
SP::Target: 'static + SignerProvider,
|
2022-08-09 06:01:10 +00:00
|
|
|
F::Target: 'static + FeeEstimator,
|
2022-10-28 11:31:24 -04:00
|
|
|
R::Target: 'static + Router,
|
2022-08-09 06:01:10 +00:00
|
|
|
L::Target: 'static + Logger,
|
2022-12-20 14:46:08 -08:00
|
|
|
P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
|
|
|
|
PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
|
2022-08-09 06:01:10 +00:00
|
|
|
{
|
2023-04-03 20:11:30 +00:00
|
|
|
let mut should_break = false;
|
2022-10-20 15:51:37 -07:00
|
|
|
let async_event_handler = |event| {
|
|
|
|
let network_graph = gossip_sync.network_graph();
|
|
|
|
let event_handler = &event_handler;
|
2023-02-03 11:25:20 -05:00
|
|
|
let scorer = &scorer;
|
2023-04-24 16:37:05 -05:00
|
|
|
let logger = &logger;
|
|
|
|
let persister = &persister;
|
2022-10-20 15:51:37 -07:00
|
|
|
async move {
|
|
|
|
if let Some(network_graph) = network_graph {
|
|
|
|
handle_network_graph_update(network_graph, &event)
|
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
if let Some(ref scorer) = scorer {
|
2023-04-24 16:37:05 -05:00
|
|
|
if update_scorer(scorer, &event) {
|
|
|
|
log_trace!(logger, "Persisting scorer after update");
|
|
|
|
if let Err(e) = persister.persist_scorer(&scorer) {
|
|
|
|
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
|
|
|
|
}
|
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
}
|
2022-10-20 15:51:37 -07:00
|
|
|
event_handler(event).await;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
define_run_body!(persister,
|
|
|
|
chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
|
|
|
|
channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
|
2022-11-09 17:35:26 +00:00
|
|
|
gossip_sync, peer_manager, logger, scorer, should_break, {
|
2023-03-30 21:52:03 +00:00
|
|
|
let fut = Selector {
|
2023-08-24 18:34:55 +00:00
|
|
|
a: channel_manager.get_event_or_persistence_needed_future(),
|
2023-03-09 03:11:13 +00:00
|
|
|
b: chain_monitor.get_update_future(),
|
2023-03-14 21:23:21 +00:00
|
|
|
c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
|
2023-03-30 21:52:03 +00:00
|
|
|
};
|
|
|
|
match fut.await {
|
2023-09-08 20:26:29 +00:00
|
|
|
SelectorOutput::A|SelectorOutput::B => {},
|
2023-03-09 03:11:13 +00:00
|
|
|
SelectorOutput::C(exit) => {
|
2022-11-09 17:35:26 +00:00
|
|
|
should_break = exit;
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
}
|
2023-01-17 00:03:43 +00:00
|
|
|
}, |t| sleeper(Duration::from_secs(t)),
|
|
|
|
|fut: &mut SleepFuture, _| {
|
2023-03-30 22:11:22 +00:00
|
|
|
let mut waker = dummy_waker();
|
2023-01-17 00:03:43 +00:00
|
|
|
let mut ctx = task::Context::from_waker(&mut waker);
|
2023-04-03 20:15:04 +00:00
|
|
|
match core::pin::Pin::new(fut).poll(&mut ctx) {
|
|
|
|
task::Poll::Ready(exit) => { should_break = exit; true },
|
|
|
|
task::Poll::Pending => false,
|
|
|
|
}
|
2023-03-14 21:23:21 +00:00
|
|
|
}, mobile_interruptable_platform)
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
2021-01-11 18:03:32 -05:00
|
|
|
impl BackgroundProcessor {
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Start a background thread that takes care of responsibilities enumerated in the [top-level
|
|
|
|
/// documentation].
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-07-19 12:50:56 -05:00
|
|
|
/// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
|
2022-03-21 20:13:14 -07:00
|
|
|
/// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
|
2021-07-19 12:50:56 -05:00
|
|
|
/// either [`join`] or [`stop`].
|
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// # Data Persistence
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2022-03-21 20:13:14 -07:00
|
|
|
/// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
|
2021-03-17 15:53:29 -04:00
|
|
|
/// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
|
2022-04-11 13:50:31 -04:00
|
|
|
/// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
|
2021-03-17 15:53:29 -04:00
|
|
|
/// provided implementation.
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2022-06-02 14:48:32 -07:00
|
|
|
/// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
|
|
|
|
/// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
|
|
|
|
/// See the `lightning-persister` crate for LDK's provided implementation.
|
2022-03-21 20:13:14 -07:00
|
|
|
///
|
|
|
|
/// Typically, users should either implement [`Persister::persist_manager`] to never return an
|
2021-08-19 11:21:42 -05:00
|
|
|
/// error or call [`join`] and handle any error that may arise. For the latter case,
|
|
|
|
/// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
|
|
|
|
///
|
|
|
|
/// # Event Handling
|
|
|
|
///
|
|
|
|
/// `event_handler` is responsible for handling events that users should be notified of (e.g.,
|
2021-09-14 21:38:00 -05:00
|
|
|
/// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
|
|
|
|
/// functionality implemented by other handlers.
|
2022-06-01 10:28:34 -07:00
|
|
|
/// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
|
2021-08-19 11:21:42 -05:00
|
|
|
///
|
2022-06-01 15:26:07 -07:00
|
|
|
/// # Rapid Gossip Sync
|
|
|
|
///
|
2022-06-13 18:18:19 -05:00
|
|
|
/// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
|
2022-06-02 14:48:32 -07:00
|
|
|
/// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
|
|
|
|
/// until the [`RapidGossipSync`] instance completes its first sync.
|
2022-06-01 15:26:07 -07:00
|
|
|
///
|
2021-09-17 16:00:24 +00:00
|
|
|
/// [top-level documentation]: BackgroundProcessor
|
2021-07-19 12:50:56 -05:00
|
|
|
/// [`join`]: Self::join
|
|
|
|
/// [`stop`]: Self::stop
|
2021-03-17 15:53:29 -04:00
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
/// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
|
2022-04-11 13:50:31 -04:00
|
|
|
/// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
|
|
|
|
/// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
|
2022-06-02 12:53:34 -07:00
|
|
|
/// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
|
|
|
|
/// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
|
2021-04-13 16:04:17 -04:00
|
|
|
pub fn start<
|
2022-04-27 22:16:38 -07:00
|
|
|
'a,
|
2023-01-21 03:28:35 +00:00
|
|
|
UL: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF: 'static + Deref + Send + Sync,
|
|
|
|
CW: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
T: 'static + Deref + Send + Sync,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES: 'static + Deref + Send + Sync,
|
|
|
|
NS: 'static + Deref + Send + Sync,
|
|
|
|
SP: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
F: 'static + Deref + Send + Sync,
|
2022-10-28 11:31:24 -04:00
|
|
|
R: 'static + Deref + Send + Sync,
|
2022-06-03 21:35:37 -07:00
|
|
|
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
L: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
P: 'static + Deref + Send + Sync,
|
2021-08-23 23:56:59 -05:00
|
|
|
EH: 'static + EventHandler + Send,
|
2022-04-11 13:50:31 -04:00
|
|
|
PS: 'static + Deref + Send,
|
2022-12-20 14:46:08 -08:00
|
|
|
M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
|
|
|
|
CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
|
2023-01-21 03:28:35 +00:00
|
|
|
PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
|
2022-06-02 14:48:32 -07:00
|
|
|
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
|
2023-04-29 18:45:59 +00:00
|
|
|
APM: APeerManager + Send + Sync,
|
|
|
|
PM: 'static + Deref<Target = APM> + Send + Sync,
|
2022-04-27 22:16:38 -07:00
|
|
|
S: 'static + Deref<Target = SC> + Send + Sync,
|
2023-02-03 11:25:20 -05:00
|
|
|
SC: for <'b> WriteableScore<'b>,
|
2021-09-14 21:38:00 -05:00
|
|
|
>(
|
2022-03-21 20:13:14 -07:00
|
|
|
persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
|
2023-01-21 03:28:35 +00:00
|
|
|
gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
|
2021-09-14 21:38:00 -05:00
|
|
|
) -> Self
|
2021-04-13 19:38:31 -04:00
|
|
|
where
|
2023-01-21 03:28:35 +00:00
|
|
|
UL::Target: 'static + UtxoLookup,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF::Target: 'static + chain::Filter,
|
2022-12-20 14:46:08 -08:00
|
|
|
CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
|
2021-04-13 16:04:17 -04:00
|
|
|
T::Target: 'static + BroadcasterInterface,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES::Target: 'static + EntropySource,
|
|
|
|
NS::Target: 'static + NodeSigner,
|
|
|
|
SP::Target: 'static + SignerProvider,
|
2021-04-13 16:04:17 -04:00
|
|
|
F::Target: 'static + FeeEstimator,
|
2022-10-28 11:31:24 -04:00
|
|
|
R::Target: 'static + Router,
|
2021-04-13 16:04:17 -04:00
|
|
|
L::Target: 'static + Logger,
|
2022-12-20 14:46:08 -08:00
|
|
|
P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
|
|
|
|
PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
|
2021-01-11 18:03:32 -05:00
|
|
|
{
|
|
|
|
let stop_thread = Arc::new(AtomicBool::new(false));
|
|
|
|
let stop_thread_clone = stop_thread.clone();
|
|
|
|
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
|
2022-11-02 16:29:00 -07:00
|
|
|
let event_handler = |event| {
|
|
|
|
let network_graph = gossip_sync.network_graph();
|
|
|
|
if let Some(network_graph) = network_graph {
|
|
|
|
handle_network_graph_update(network_graph, &event)
|
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
if let Some(ref scorer) = scorer {
|
2023-04-24 16:37:05 -05:00
|
|
|
if update_scorer(scorer, &event) {
|
|
|
|
log_trace!(logger, "Persisting scorer after update");
|
|
|
|
if let Err(e) = persister.persist_scorer(&scorer) {
|
|
|
|
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
|
|
|
|
}
|
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
}
|
2022-11-02 16:29:00 -07:00
|
|
|
event_handler.handle_event(event);
|
2022-10-20 15:51:37 -07:00
|
|
|
};
|
|
|
|
define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
|
|
|
|
channel_manager, channel_manager.process_pending_events(&event_handler),
|
2022-08-09 06:01:10 +00:00
|
|
|
gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
|
2023-09-08 20:26:29 +00:00
|
|
|
{ Sleeper::from_two_futures(
|
2023-08-24 18:34:55 +00:00
|
|
|
channel_manager.get_event_or_persistence_needed_future(),
|
2023-03-09 03:11:13 +00:00
|
|
|
chain_monitor.get_update_future()
|
2023-09-08 20:26:29 +00:00
|
|
|
).wait_timeout(Duration::from_millis(100)); },
|
2023-03-14 21:23:21 +00:00
|
|
|
|_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
|
2021-01-11 18:03:32 -05:00
|
|
|
});
|
2021-07-18 13:11:01 -05:00
|
|
|
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
pub fn join(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
2021-07-18 13:11:01 -05:00
|
|
|
pub fn stop(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.stop_and_join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-01-11 18:03:32 -05:00
|
|
|
self.stop_thread.store(true, Ordering::Release);
|
2021-07-19 12:50:56 -05:00
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-07-18 13:11:01 -05:00
|
|
|
match self.thread_handle.take() {
|
|
|
|
Some(handle) => handle.join().unwrap(),
|
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
2021-07-18 13:11:01 -05:00
|
|
|
impl Drop for BackgroundProcessor {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.stop_and_join_thread().unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(all(feature = "std", test))]
|
2021-01-11 18:03:32 -05:00
|
|
|
mod tests {
|
2023-06-01 12:40:57 +02:00
|
|
|
use bitcoin::blockdata::constants::{genesis_block, ChainHash};
|
2022-08-09 17:39:51 +02:00
|
|
|
use bitcoin::blockdata::locktime::PackedLockTime;
|
2021-01-11 18:03:32 -05:00
|
|
|
use bitcoin::blockdata::transaction::{Transaction, TxOut};
|
|
|
|
use bitcoin::network::constants::Network;
|
2023-02-03 11:25:20 -05:00
|
|
|
use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
|
2022-04-24 16:03:26 +00:00
|
|
|
use lightning::chain::{BestBlock, Confirm, chainmonitor};
|
2021-05-11 08:34:57 -07:00
|
|
|
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
|
2023-04-28 14:11:37 -05:00
|
|
|
use lightning::sign::{InMemorySigner, KeysManager};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::chain::transaction::OutPoint;
|
2023-03-07 13:57:01 -08:00
|
|
|
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
|
2023-03-10 16:30:37 +01:00
|
|
|
use lightning::{get_event_msg, get_event};
|
2023-02-03 11:25:20 -05:00
|
|
|
use lightning::ln::PaymentHash;
|
2023-02-03 11:14:53 -05:00
|
|
|
use lightning::ln::channelmanager;
|
2023-02-03 11:25:20 -05:00
|
|
|
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
|
|
|
|
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
|
2023-04-27 13:31:04 -07:00
|
|
|
use lightning::ln::functional_test_utils::*;
|
2021-07-31 09:32:27 -05:00
|
|
|
use lightning::ln::msgs::{ChannelMessageHandler, Init};
|
2021-08-05 14:51:17 +09:00
|
|
|
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
|
2023-02-03 11:14:53 -05:00
|
|
|
use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
|
2023-04-09 13:50:44 -04:00
|
|
|
use lightning::routing::router::{DefaultRouter, Path, RouteHop};
|
2023-10-21 01:08:38 +00:00
|
|
|
use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::config::UserConfig;
|
|
|
|
use lightning::util::ser::Writeable;
|
|
|
|
use lightning::util::test_utils;
|
2023-09-28 17:06:20 +00:00
|
|
|
use lightning::util::persist::{KVStore,
|
|
|
|
CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
|
|
|
|
NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
|
|
|
|
SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
|
2023-08-01 13:37:46 +02:00
|
|
|
use lightning_persister::fs_store::FilesystemStore;
|
2023-02-03 11:14:53 -05:00
|
|
|
use std::collections::VecDeque;
|
2023-04-26 14:57:18 +02:00
|
|
|
use std::{fs, env};
|
2021-01-11 18:03:32 -05:00
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::sync::{Arc, Mutex};
|
2022-06-01 15:26:07 -07:00
|
|
|
use std::sync::mpsc::SyncSender;
|
2021-01-11 18:03:32 -05:00
|
|
|
use std::time::Duration;
|
2022-06-01 15:26:07 -07:00
|
|
|
use lightning_rapid_gossip_sync::RapidGossipSync;
|
2022-06-02 14:48:32 -07:00
|
|
|
use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:34:57 -07:00
|
|
|
const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
|
|
|
|
|
2022-10-14 13:24:02 +02:00
|
|
|
#[derive(Clone, Hash, PartialEq, Eq)]
|
2021-04-02 18:40:57 -04:00
|
|
|
struct TestDescriptor{}
|
|
|
|
impl SocketDescriptor for TestDescriptor {
|
|
|
|
fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
|
|
|
|
fn disconnect_socket(&mut self) {}
|
|
|
|
}
|
|
|
|
|
2023-10-21 01:08:38 +00:00
|
|
|
#[cfg(c_bindings)]
|
|
|
|
type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
|
|
|
|
#[cfg(not(c_bindings))]
|
|
|
|
type LockingWrapper<T> = Mutex<T>;
|
|
|
|
|
2023-06-29 10:41:38 +08:00
|
|
|
type ChannelManager =
|
|
|
|
channelmanager::ChannelManager<
|
|
|
|
Arc<ChainMonitor>,
|
|
|
|
Arc<test_utils::TestBroadcaster>,
|
|
|
|
Arc<KeysManager>,
|
|
|
|
Arc<KeysManager>,
|
|
|
|
Arc<KeysManager>,
|
|
|
|
Arc<test_utils::TestFeeEstimator>,
|
|
|
|
Arc<DefaultRouter<
|
|
|
|
Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
|
|
|
|
Arc<test_utils::TestLogger>,
|
2023-10-21 01:08:38 +00:00
|
|
|
Arc<LockingWrapper<TestScorer>>,
|
2023-06-29 10:41:38 +08:00
|
|
|
(),
|
|
|
|
TestScorer>
|
|
|
|
>,
|
|
|
|
Arc<test_utils::TestLogger>>;
|
2023-02-03 11:14:53 -05:00
|
|
|
|
2023-08-01 13:37:46 +02:00
|
|
|
type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2022-06-02 14:48:32 -07:00
|
|
|
type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
|
|
|
|
type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
struct Node {
|
2023-02-03 11:14:53 -05:00
|
|
|
node: Arc<ChannelManager>,
|
2022-06-02 14:48:32 -07:00
|
|
|
p2p_gossip_sync: PGS,
|
|
|
|
rapid_gossip_sync: RGS,
|
2023-01-18 13:03:20 -08:00
|
|
|
peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
|
2021-05-11 08:34:57 -07:00
|
|
|
chain_monitor: Arc<ChainMonitor>,
|
2023-08-01 13:37:46 +02:00
|
|
|
kv_store: Arc<FilesystemStore>,
|
2021-05-11 08:34:57 -07:00
|
|
|
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
|
2022-06-03 21:35:37 -07:00
|
|
|
network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
|
2021-01-11 18:03:32 -05:00
|
|
|
logger: Arc<test_utils::TestLogger>,
|
2021-05-11 08:34:57 -07:00
|
|
|
best_block: BestBlock,
|
2023-10-21 01:08:38 +00:00
|
|
|
scorer: Arc<LockingWrapper<TestScorer>>,
|
2022-06-02 14:48:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Node {
|
|
|
|
fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
|
|
|
|
GossipSync::P2P(self.p2p_gossip_sync.clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
|
|
|
|
GossipSync::Rapid(self.rapid_gossip_sync.clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
|
|
|
|
GossipSync::None
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Node {
|
|
|
|
fn drop(&mut self) {
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = self.kv_store.get_data_dir();
|
2021-01-11 18:03:32 -05:00
|
|
|
match fs::remove_dir_all(data_dir.clone()) {
|
2023-08-01 13:37:46 +02:00
|
|
|
Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
|
2021-01-11 18:03:32 -05:00
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
struct Persister {
|
2022-03-28 19:36:43 -07:00
|
|
|
graph_error: Option<(std::io::ErrorKind, &'static str)>,
|
2022-06-01 15:26:07 -07:00
|
|
|
graph_persistence_notifier: Option<SyncSender<()>>,
|
2022-04-11 13:50:31 -04:00
|
|
|
manager_error: Option<(std::io::ErrorKind, &'static str)>,
|
2022-04-27 22:16:38 -07:00
|
|
|
scorer_error: Option<(std::io::ErrorKind, &'static str)>,
|
2023-08-01 13:37:46 +02:00
|
|
|
kv_store: FilesystemStore,
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Persister {
|
2023-08-01 13:37:46 +02:00
|
|
|
fn new(data_dir: PathBuf) -> Self {
|
|
|
|
let kv_store = FilesystemStore::new(data_dir);
|
|
|
|
Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
|
|
|
|
Self { graph_error: Some((error, message)), ..self }
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:26:07 -07:00
|
|
|
fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
|
|
|
|
Self { graph_persistence_notifier: Some(sender), ..self }
|
|
|
|
}
|
|
|
|
|
2022-03-28 19:36:43 -07:00
|
|
|
fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
|
|
|
|
Self { manager_error: Some((error, message)), ..self }
|
|
|
|
}
|
2022-04-27 22:16:38 -07:00
|
|
|
|
|
|
|
fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
|
|
|
|
Self { scorer_error: Some((error, message)), ..self }
|
|
|
|
}
|
2022-03-21 20:13:14 -07:00
|
|
|
}
|
|
|
|
|
2023-08-01 13:37:46 +02:00
|
|
|
impl KVStore for Persister {
|
2023-09-28 17:28:04 +00:00
|
|
|
fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
|
|
|
|
self.kv_store.read(primary_namespace, secondary_namespace, key)
|
2023-08-01 13:37:46 +02:00
|
|
|
}
|
|
|
|
|
2023-09-28 17:28:04 +00:00
|
|
|
fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
|
|
|
|
if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
|
|
|
|
secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
|
2023-08-01 13:37:46 +02:00
|
|
|
key == CHANNEL_MANAGER_PERSISTENCE_KEY
|
|
|
|
{
|
2022-04-11 13:50:31 -04:00
|
|
|
if let Some((error, message)) = self.manager_error {
|
|
|
|
return Err(std::io::Error::new(error, message))
|
|
|
|
}
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
2022-03-21 20:13:14 -07:00
|
|
|
|
2023-09-28 17:28:04 +00:00
|
|
|
if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
|
|
|
|
secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
|
2023-08-01 13:37:46 +02:00
|
|
|
key == NETWORK_GRAPH_PERSISTENCE_KEY
|
|
|
|
{
|
2022-06-01 15:26:07 -07:00
|
|
|
if let Some(sender) = &self.graph_persistence_notifier {
|
2023-04-20 15:37:11 +02:00
|
|
|
match sender.send(()) {
|
|
|
|
Ok(()) => {},
|
|
|
|
Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
|
|
|
|
}
|
2022-06-01 15:26:07 -07:00
|
|
|
};
|
|
|
|
|
2022-04-11 13:50:31 -04:00
|
|
|
if let Some((error, message)) = self.graph_error {
|
|
|
|
return Err(std::io::Error::new(error, message))
|
|
|
|
}
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
2022-04-11 13:50:31 -04:00
|
|
|
|
2023-09-28 17:28:04 +00:00
|
|
|
if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
|
|
|
|
secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
|
2023-08-01 13:37:46 +02:00
|
|
|
key == SCORER_PERSISTENCE_KEY
|
|
|
|
{
|
2022-04-27 22:16:38 -07:00
|
|
|
if let Some((error, message)) = self.scorer_error {
|
|
|
|
return Err(std::io::Error::new(error, message))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-28 17:28:04 +00:00
|
|
|
self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
|
2023-08-01 13:37:46 +02:00
|
|
|
}
|
|
|
|
|
2023-09-28 17:28:04 +00:00
|
|
|
fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
|
|
|
|
self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
|
2023-08-01 13:37:46 +02:00
|
|
|
}
|
|
|
|
|
2023-09-28 17:28:04 +00:00
|
|
|
fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
|
|
|
|
self.kv_store.list(primary_namespace, secondary_namespace)
|
2022-03-21 20:13:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:14:53 -05:00
|
|
|
struct TestScorer {
|
|
|
|
event_expectations: Option<VecDeque<TestResult>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
enum TestResult {
|
2023-04-09 13:50:44 -04:00
|
|
|
PaymentFailure { path: Path, short_channel_id: u64 },
|
|
|
|
PaymentSuccess { path: Path },
|
|
|
|
ProbeFailure { path: Path },
|
|
|
|
ProbeSuccess { path: Path },
|
2023-02-03 11:14:53 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TestScorer {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self { event_expectations: None }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn expect(&mut self, expectation: TestResult) {
|
2023-02-27 18:24:57 +00:00
|
|
|
self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
|
2023-02-03 11:14:53 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl lightning::util::ser::Writeable for TestScorer {
|
|
|
|
fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
|
|
|
|
}
|
|
|
|
|
2023-08-22 18:57:06 +03:00
|
|
|
impl ScoreLookUp for TestScorer {
|
2023-05-06 11:01:22 -07:00
|
|
|
type ScoreParams = ();
|
2023-02-03 11:14:53 -05:00
|
|
|
fn channel_penalty_msat(
|
2023-05-06 11:01:22 -07:00
|
|
|
&self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
|
2023-02-03 11:14:53 -05:00
|
|
|
) -> u64 { unimplemented!(); }
|
2023-08-22 18:57:06 +03:00
|
|
|
}
|
2023-02-03 11:14:53 -05:00
|
|
|
|
2023-08-22 18:57:06 +03:00
|
|
|
impl ScoreUpdate for TestScorer {
|
2023-04-09 13:50:44 -04:00
|
|
|
fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
|
2023-02-03 11:14:53 -05:00
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, short_channel_id } => {
|
2023-04-09 13:50:44 -04:00
|
|
|
assert_eq!(actual_path, &path);
|
2023-02-03 11:14:53 -05:00
|
|
|
assert_eq!(actual_short_channel_id, short_channel_id);
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
panic!("Unexpected successful payment path: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
panic!("Unexpected probe failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
panic!("Unexpected probe success: {:?}", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-09 13:50:44 -04:00
|
|
|
fn payment_path_successful(&mut self, actual_path: &Path) {
|
2023-02-03 11:14:53 -05:00
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, .. } => {
|
|
|
|
panic!("Unexpected payment path failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
2023-04-09 13:50:44 -04:00
|
|
|
assert_eq!(actual_path, &path);
|
2023-02-03 11:14:53 -05:00
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
panic!("Unexpected probe failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
panic!("Unexpected probe success: {:?}", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-09 13:50:44 -04:00
|
|
|
fn probe_failed(&mut self, actual_path: &Path, _: u64) {
|
2023-02-03 11:14:53 -05:00
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, .. } => {
|
|
|
|
panic!("Unexpected payment path failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
panic!("Unexpected payment path success: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
2023-04-09 13:50:44 -04:00
|
|
|
assert_eq!(actual_path, &path);
|
2023-02-03 11:14:53 -05:00
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
panic!("Unexpected probe success: {:?}", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-04-09 13:50:44 -04:00
|
|
|
fn probe_successful(&mut self, actual_path: &Path) {
|
2023-02-03 11:14:53 -05:00
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, .. } => {
|
|
|
|
panic!("Unexpected payment path failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
panic!("Unexpected payment path success: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
panic!("Unexpected probe failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
2023-04-09 13:50:44 -04:00
|
|
|
assert_eq!(actual_path, &path);
|
2023-02-03 11:14:53 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-21 01:08:38 +00:00
|
|
|
#[cfg(c_bindings)]
|
|
|
|
impl lightning::routing::scoring::Score for TestScorer {}
|
|
|
|
|
2023-02-03 11:14:53 -05:00
|
|
|
impl Drop for TestScorer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if std::thread::panicking() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(event_expectations) = &self.event_expectations {
|
|
|
|
if !event_expectations.is_empty() {
|
|
|
|
panic!("Unsatisfied event expectations: {:?}", event_expectations);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
fn get_full_filepath(filepath: String, filename: String) -> String {
|
|
|
|
let mut path = PathBuf::from(filepath);
|
|
|
|
path.push(filename);
|
|
|
|
path.to_str().unwrap().to_string()
|
|
|
|
}
|
|
|
|
|
2023-04-26 14:57:18 +02:00
|
|
|
fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
|
|
|
|
let persist_temp_path = env::temp_dir().join(persist_dir);
|
|
|
|
let persist_dir = persist_temp_path.to_string_lossy().to_string();
|
2023-05-29 14:43:05 +02:00
|
|
|
let network = Network::Bitcoin;
|
2021-01-11 18:03:32 -05:00
|
|
|
let mut nodes = Vec::new();
|
|
|
|
for i in 0..num_nodes {
|
2023-04-15 22:12:11 -07:00
|
|
|
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
|
2021-01-11 18:03:32 -05:00
|
|
|
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
|
2021-09-14 21:38:00 -05:00
|
|
|
let genesis_block = genesis_block(network);
|
2023-02-09 19:20:22 +00:00
|
|
|
let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
|
2023-10-21 01:08:38 +00:00
|
|
|
let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
|
2022-11-29 15:16:47 -05:00
|
|
|
let seed = [i as u8; 32];
|
2023-09-21 22:22:18 +00:00
|
|
|
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
|
2023-05-29 14:43:05 +02:00
|
|
|
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
|
2023-08-01 13:37:46 +02:00
|
|
|
let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
|
2021-09-14 21:38:00 -05:00
|
|
|
let now = Duration::from_secs(genesis_block.header.time as u64);
|
2021-01-11 18:03:32 -05:00
|
|
|
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
|
2023-08-01 13:37:46 +02:00
|
|
|
let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
|
2023-02-15 06:09:00 +00:00
|
|
|
let best_block = BestBlock::from_network(network);
|
2021-05-11 08:34:57 -07:00
|
|
|
let params = ChainParameters { network, best_block };
|
2023-06-22 15:19:15 -07:00
|
|
|
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
|
2022-06-02 14:48:32 -07:00
|
|
|
let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
|
2023-02-23 19:20:41 +00:00
|
|
|
let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
|
2023-04-29 17:58:15 +00:00
|
|
|
let msg_handler = MessageHandler {
|
2023-06-01 12:40:57 +02:00
|
|
|
chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
|
2023-04-29 17:58:15 +00:00
|
|
|
route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
|
|
|
|
onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
|
|
|
|
};
|
|
|
|
let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
|
2023-08-01 13:37:46 +02:00
|
|
|
let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
|
2021-01-11 18:03:32 -05:00
|
|
|
nodes.push(node);
|
|
|
|
}
|
2021-07-31 09:32:27 -05:00
|
|
|
|
|
|
|
for i in 0..num_nodes {
|
|
|
|
for j in (i+1)..num_nodes {
|
2023-06-01 10:23:55 +02:00
|
|
|
nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
|
|
|
|
features: nodes[j].node.init_features(), networks: None, remote_network_address: None
|
|
|
|
}, true).unwrap();
|
|
|
|
nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
|
|
|
|
features: nodes[i].node.init_features(), networks: None, remote_network_address: None
|
|
|
|
}, false).unwrap();
|
2021-07-31 09:32:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-26 14:57:18 +02:00
|
|
|
(persist_dir, nodes)
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! open_channel {
|
2021-05-11 08:07:54 -07:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
begin_open_channel!($node_a, $node_b, $channel_value);
|
|
|
|
let events = $node_a.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
2022-10-31 10:36:12 -07:00
|
|
|
let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
|
2023-04-06 14:59:04 -07:00
|
|
|
$node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
|
|
|
|
$node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
|
|
|
|
get_event!($node_b, Event::ChannelPending);
|
|
|
|
$node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
|
|
|
|
get_event!($node_a, Event::ChannelPending);
|
2021-05-11 08:07:54 -07:00
|
|
|
tx
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! begin_open_channel {
|
2021-01-11 18:03:32 -05:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
$node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
|
2023-01-16 20:34:59 +00:00
|
|
|
$node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
|
2021-05-11 08:07:54 -07:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! handle_funding_generation_ready {
|
|
|
|
($event: expr, $channel_value: expr) => {{
|
|
|
|
match $event {
|
2022-10-31 10:36:12 -07:00
|
|
|
Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
|
2021-08-17 11:12:18 -05:00
|
|
|
assert_eq!(channel_value_satoshis, $channel_value);
|
2021-01-11 18:03:32 -05:00
|
|
|
assert_eq!(user_channel_id, 42);
|
|
|
|
|
2022-08-09 17:39:51 +02:00
|
|
|
let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
|
2021-08-17 11:12:18 -05:00
|
|
|
value: channel_value_satoshis, script_pubkey: output_script.clone(),
|
2021-01-11 18:03:32 -05:00
|
|
|
}]};
|
2021-08-17 11:12:18 -05:00
|
|
|
(temporary_channel_id, tx)
|
2021-01-11 18:03:32 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
2021-05-11 08:07:54 -07:00
|
|
|
}
|
|
|
|
}}
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
|
|
|
|
for i in 1..=depth {
|
2021-05-11 08:34:57 -07:00
|
|
|
let prev_blockhash = node.best_block.block_hash();
|
|
|
|
let height = node.best_block.height() + 1;
|
2023-04-27 13:31:04 -07:00
|
|
|
let header = create_dummy_header(prev_blockhash, height);
|
2021-05-11 08:34:57 -07:00
|
|
|
let txdata = vec![(0, tx)];
|
|
|
|
node.best_block = BestBlock::new(header.block_hash(), height);
|
|
|
|
match i {
|
|
|
|
1 => {
|
|
|
|
node.node.transactions_confirmed(&header, &txdata, height);
|
|
|
|
node.chain_monitor.transactions_confirmed(&header, &txdata, height);
|
|
|
|
},
|
2021-05-31 20:04:36 +00:00
|
|
|
x if x == depth => {
|
2021-05-11 08:34:57 -07:00
|
|
|
node.node.best_block_updated(&header, height);
|
|
|
|
node.chain_monitor.best_block_updated(&header, height);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction(node: &mut Node, tx: &Transaction) {
|
|
|
|
confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
|
|
|
|
}
|
2021-05-11 08:34:57 -07:00
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[test]
|
|
|
|
fn test_background_processor() {
|
|
|
|
// Test that when a new channel is created, the ChannelManager needs to be re-persisted with
|
|
|
|
// updates. Also test that when new updates are available, the manager signals that it needs
|
|
|
|
// re-persistence and is successfully re-persisted.
|
2023-04-26 14:57:18 +02:00
|
|
|
let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
// Go through the channel creation process so that each node has something to persist. Since
|
|
|
|
// open_channel consumes events, it must complete before starting BackgroundProcessor to
|
|
|
|
// avoid a race with processing events.
|
|
|
|
let tx = open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
// Initiate the background processors to watch each node.
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
macro_rules! check_persisted_data {
|
2022-03-28 18:39:39 -07:00
|
|
|
($node: expr, $filepath: expr) => {
|
|
|
|
let mut expected_bytes = Vec::new();
|
2021-11-13 01:06:09 +00:00
|
|
|
loop {
|
2022-03-28 18:39:39 -07:00
|
|
|
expected_bytes.clear();
|
|
|
|
match $node.write(&mut expected_bytes) {
|
2021-11-13 01:06:09 +00:00
|
|
|
Ok(()) => {
|
2021-01-11 18:03:32 -05:00
|
|
|
match std::fs::read($filepath) {
|
|
|
|
Ok(bytes) => {
|
2022-03-28 18:39:39 -07:00
|
|
|
if bytes == expected_bytes {
|
2021-01-11 18:03:32 -05:00
|
|
|
break
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(_) => continue
|
|
|
|
}
|
2021-11-13 01:06:09 +00:00
|
|
|
},
|
|
|
|
Err(e) => panic!("Unexpected error: {}", e)
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the initial channel manager data is persisted as expected.
|
2023-04-26 14:57:18 +02:00
|
|
|
let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
|
2022-03-28 18:39:39 -07:00
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone());
|
2022-03-21 20:13:14 -07:00
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
2023-08-24 18:34:55 +00:00
|
|
|
if !nodes[0].node.get_event_or_persist_condvar_value() { break }
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Force-close the channel.
|
2022-06-23 20:25:58 +00:00
|
|
|
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
// Check that the force-close updates are persisted.
|
2022-03-28 18:39:39 -07:00
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone());
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
2023-08-24 18:34:55 +00:00
|
|
|
if !nodes[0].node.get_event_or_persist_condvar_value() { break }
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
// Check network graph is persisted
|
2023-04-26 14:57:18 +02:00
|
|
|
let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
|
2022-06-02 14:48:32 -07:00
|
|
|
check_persisted_data!(nodes[0].network_graph, filepath.clone());
|
2022-03-21 20:13:14 -07:00
|
|
|
|
2022-04-27 22:16:38 -07:00
|
|
|
// Check scorer is persisted
|
2023-04-26 14:57:18 +02:00
|
|
|
let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
|
2022-04-27 22:16:38 -07:00
|
|
|
check_persisted_data!(nodes[0].scorer, filepath.clone());
|
|
|
|
|
2023-04-03 18:38:26 +00:00
|
|
|
if !std::thread::panicking() {
|
|
|
|
bg_processor.stop().unwrap();
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-04-09 16:55:10 -04:00
|
|
|
fn test_timer_tick_called() {
|
2023-04-17 11:52:15 -07:00
|
|
|
// Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
|
|
|
|
// `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
|
|
|
|
// `PeerManager::timer_tick_occurred` every `PING_TIMER`.
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(1, "test_timer_tick_called");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
2023-04-17 11:52:15 -07:00
|
|
|
let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
|
|
|
|
let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
|
|
|
|
let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
|
|
|
|
if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
|
|
|
|
log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
|
|
|
|
log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
|
2021-01-11 18:03:32 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-03 18:38:26 +00:00
|
|
|
if !std::thread::panicking() {
|
|
|
|
bg_processor.stop().unwrap();
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-03-21 20:13:14 -07:00
|
|
|
fn test_channel_manager_persist_error() {
|
2021-01-11 18:03:32 -05:00
|
|
|
// Test that if we encounter an error during manager persistence, the thread panics.
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_persist_error");
|
2021-01-11 18:03:32 -05:00
|
|
|
open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-07-19 12:50:56 -05:00
|
|
|
match bg_processor.join() {
|
2021-07-18 12:59:27 -05:00
|
|
|
Ok(_) => panic!("Expected error persisting manager"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
2023-04-03 20:47:02 +00:00
|
|
|
#[tokio::test]
|
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
async fn test_channel_manager_persist_error_async() {
|
|
|
|
// Test that if we encounter an error during manager persistence, the thread panics.
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_persist_error_sync");
|
2023-04-03 20:47:02 +00:00
|
|
|
open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2023-04-03 20:47:02 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
|
|
|
|
|
|
|
|
let bp_future = super::process_events_async(
|
|
|
|
persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
|
|
|
|
nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
|
|
|
|
Some(nodes[0].scorer.clone()), move |dur: Duration| {
|
|
|
|
Box::pin(async move {
|
|
|
|
tokio::time::sleep(dur).await;
|
|
|
|
false // Never exit
|
|
|
|
})
|
|
|
|
}, false,
|
|
|
|
);
|
|
|
|
match bp_future.await {
|
|
|
|
Ok(_) => panic!("Expected error persisting manager"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
#[test]
|
|
|
|
fn test_network_graph_persist_error() {
|
|
|
|
// Test that if we encounter an error during network graph persistence, an error gets returned.
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-03-21 20:13:14 -07:00
|
|
|
|
|
|
|
match bg_processor.stop() {
|
|
|
|
Ok(_) => panic!("Expected error persisting network graph"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-27 22:16:38 -07:00
|
|
|
#[test]
|
|
|
|
fn test_scorer_persist_error() {
|
|
|
|
// Test that if we encounter an error during scorer persistence, an error gets returned.
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-04-27 22:16:38 -07:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-04-27 22:16:38 -07:00
|
|
|
|
|
|
|
match bg_processor.stop() {
|
|
|
|
Ok(_) => panic!("Expected error persisting scorer"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
#[test]
|
|
|
|
fn test_background_event_handling() {
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
|
2021-05-11 08:07:54 -07:00
|
|
|
let channel_value = 100000;
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir.clone()));
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
// Set up a background event handler for FundingGenerationReady events.
|
2023-04-06 14:59:04 -07:00
|
|
|
let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
|
|
|
|
let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = move |event: Event| match event {
|
2023-04-06 14:59:04 -07:00
|
|
|
Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
|
|
|
|
Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
|
2022-11-01 09:57:37 +01:00
|
|
|
Event::ChannelReady { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
2021-05-11 08:07:54 -07:00
|
|
|
};
|
2022-11-01 09:57:37 +01:00
|
|
|
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
// Open a channel and check that the FundingGenerationReady event was handled.
|
|
|
|
begin_open_channel!(nodes[0], nodes[1], channel_value);
|
2023-04-06 14:59:04 -07:00
|
|
|
let (temporary_channel_id, funding_tx) = funding_generation_recv
|
2021-05-11 08:34:57 -07:00
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
2021-05-11 08:07:54 -07:00
|
|
|
.expect("FundingGenerationReady not handled within deadline");
|
2023-04-06 14:59:04 -07:00
|
|
|
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
|
|
|
|
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
|
|
|
|
get_event!(nodes[1], Event::ChannelPending);
|
|
|
|
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
|
|
|
|
let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("ChannelPending not handled within deadline");
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
// Confirm the funding transaction.
|
|
|
|
confirm_transaction(&mut nodes[0], &funding_tx);
|
2022-05-30 14:39:04 -07:00
|
|
|
let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
confirm_transaction(&mut nodes[1], &funding_tx);
|
2022-05-30 14:39:04 -07:00
|
|
|
let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
|
|
|
|
nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
|
2021-06-12 21:58:50 +00:00
|
|
|
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
|
2022-05-30 14:39:04 -07:00
|
|
|
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
|
2021-06-12 21:58:50 +00:00
|
|
|
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
|
2023-04-03 18:38:26 +00:00
|
|
|
if !std::thread::panicking() {
|
|
|
|
bg_processor.stop().unwrap();
|
|
|
|
}
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
// Set up a background event handler for SpendableOutputs events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = move |event: Event| match event {
|
2023-02-27 18:24:57 +00:00
|
|
|
Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
|
2022-11-01 09:57:37 +01:00
|
|
|
Event::ChannelReady { .. } => {},
|
|
|
|
Event::ChannelClosed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
};
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
// Force close the channel and check that the SpendableOutputs event was handled.
|
2022-06-23 20:25:58 +00:00
|
|
|
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
|
2021-05-11 08:34:57 -07:00
|
|
|
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
|
2021-05-31 20:04:36 +00:00
|
|
|
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
|
2022-11-01 09:57:37 +01:00
|
|
|
|
2021-05-11 08:34:57 -07:00
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
2022-11-01 09:57:37 +01:00
|
|
|
.expect("Events not handled within deadline");
|
2021-05-11 08:34:57 -07:00
|
|
|
match event {
|
|
|
|
Event::SpendableOutputs { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
2023-04-03 18:38:26 +00:00
|
|
|
if !std::thread::panicking() {
|
|
|
|
bg_processor.stop().unwrap();
|
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
}
|
2021-08-23 23:56:59 -05:00
|
|
|
|
2022-06-01 15:26:07 -07:00
|
|
|
#[test]
|
|
|
|
fn test_scorer_persistence() {
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_scorer_persistence");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2022-06-01 15:26:07 -07:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
|
|
|
let expected_log = "Persisting scorer".to_string();
|
|
|
|
if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-03 18:38:26 +00:00
|
|
|
if !std::thread::panicking() {
|
|
|
|
bg_processor.stop().unwrap();
|
|
|
|
}
|
2022-06-01 15:26:07 -07:00
|
|
|
}
|
|
|
|
|
2023-04-03 20:34:13 +00:00
|
|
|
macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
|
|
|
|
($nodes: expr, $receive: expr, $sleep: expr) => {
|
|
|
|
let features = ChannelFeatures::empty();
|
|
|
|
$nodes[0].network_graph.add_channel_from_partial_announcement(
|
|
|
|
42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
|
|
|
|
).expect("Failed to update channel from partial announcement");
|
|
|
|
let original_graph_description = $nodes[0].network_graph.to_string();
|
|
|
|
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
|
|
|
|
assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
$sleep;
|
|
|
|
let log_entries = $nodes[0].logger.lines.lock().unwrap();
|
|
|
|
let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
|
|
|
|
if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
|
|
|
|
.unwrap_or(&0) > 1
|
|
|
|
{
|
|
|
|
// Wait until the loop has gone around at least twice.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let initialization_input = vec![
|
|
|
|
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
|
|
|
|
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
|
|
|
|
0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
|
|
|
|
187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
|
|
|
|
157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
|
|
|
|
88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
|
|
|
|
204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
|
|
|
|
181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
|
|
|
|
110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
|
|
|
|
76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
|
|
|
|
226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
|
|
|
|
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
|
|
|
|
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
|
|
|
|
];
|
|
|
|
$nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
|
|
|
|
|
2023-04-06 10:10:14 -05:00
|
|
|
// this should have added two channels and pruned the previous one.
|
|
|
|
assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
|
2023-04-03 20:34:13 +00:00
|
|
|
|
|
|
|
$receive.expect("Network graph not pruned within deadline");
|
|
|
|
|
|
|
|
// all channels should now be pruned
|
|
|
|
assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:26:07 -07:00
|
|
|
#[test]
|
|
|
|
fn test_not_pruning_network_graph_until_graph_sync_completion() {
|
2023-04-03 20:34:13 +00:00
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
|
|
|
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2023-02-27 18:24:57 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
2023-04-03 20:34:13 +00:00
|
|
|
do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
|
|
|
|
receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
|
|
|
|
std::thread::sleep(Duration::from_millis(1)));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
|
|
|
background_processor.stop().unwrap();
|
2023-04-03 20:34:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
|
|
|
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2023-04-03 20:34:13 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
2023-04-03 20:34:13 +00:00
|
|
|
let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
|
|
|
|
let bp_future = super::process_events_async(
|
|
|
|
persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
|
|
|
|
nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
|
|
|
|
Some(nodes[0].scorer.clone()), move |dur: Duration| {
|
|
|
|
let mut exit_receiver = exit_receiver.clone();
|
|
|
|
Box::pin(async move {
|
|
|
|
tokio::select! {
|
|
|
|
_ = tokio::time::sleep(dur) => false,
|
|
|
|
_ = exit_receiver.changed() => true,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}, false,
|
|
|
|
);
|
2023-04-21 18:02:54 +02:00
|
|
|
|
|
|
|
let t1 = tokio::spawn(bp_future);
|
|
|
|
let t2 = tokio::spawn(async move {
|
2023-04-03 20:34:13 +00:00
|
|
|
do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
|
|
|
|
let mut i = 0;
|
|
|
|
loop {
|
|
|
|
tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
|
|
|
|
if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
|
|
|
|
assert!(i < 5);
|
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
}, tokio::time::sleep(Duration::from_millis(1)).await);
|
|
|
|
exit_sender.send(()).unwrap();
|
|
|
|
});
|
2023-04-21 18:02:54 +02:00
|
|
|
let (r1, r2) = tokio::join!(t1, t2);
|
|
|
|
r1.unwrap().unwrap();
|
|
|
|
r2.unwrap()
|
2022-06-01 15:26:07 -07:00
|
|
|
}
|
|
|
|
|
2023-04-03 20:19:49 +00:00
|
|
|
macro_rules! do_test_payment_path_scoring {
|
|
|
|
($nodes: expr, $receive: expr) => {
|
|
|
|
// Ensure that we update the scorer when relevant events are processed. In this case, we ensure
|
|
|
|
// that we update the scorer upon a payment path succeeding (note that the channel must be
|
|
|
|
// public or else we won't score it).
|
|
|
|
// A background event handler for FundingGenerationReady events must be hooked up to a
|
|
|
|
// running background processor.
|
|
|
|
let scored_scid = 4242;
|
|
|
|
let secp_ctx = Secp256k1::new();
|
|
|
|
let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
|
|
|
|
let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
|
|
|
|
|
2023-04-09 13:50:44 -04:00
|
|
|
let path = Path { hops: vec![RouteHop {
|
2023-04-03 20:19:49 +00:00
|
|
|
pubkey: node_1_id,
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: scored_scid,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
|
|
|
fee_msat: 0,
|
|
|
|
cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
|
2023-09-12 15:51:37 +02:00
|
|
|
maybe_announced_channel: true,
|
2023-04-18 12:06:35 -04:00
|
|
|
}], blinded_tail: None };
|
2023-04-03 20:19:49 +00:00
|
|
|
|
2023-10-21 01:08:38 +00:00
|
|
|
$nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
|
2023-04-03 20:19:49 +00:00
|
|
|
$nodes[0].node.push_pending_event(Event::PaymentPathFailed {
|
|
|
|
payment_id: None,
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
payment_failed_permanently: false,
|
|
|
|
failure: PathFailure::OnPath { network_update: None },
|
|
|
|
path: path.clone(),
|
|
|
|
short_channel_id: Some(scored_scid),
|
|
|
|
});
|
|
|
|
let event = $receive.expect("PaymentPathFailed not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we'll score payments that were explicitly failed back by the destination as
|
|
|
|
// ProbeSuccess.
|
2023-10-21 01:08:38 +00:00
|
|
|
$nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
|
2023-04-03 20:19:49 +00:00
|
|
|
$nodes[0].node.push_pending_event(Event::PaymentPathFailed {
|
|
|
|
payment_id: None,
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
payment_failed_permanently: true,
|
|
|
|
failure: PathFailure::OnPath { network_update: None },
|
|
|
|
path: path.clone(),
|
|
|
|
short_channel_id: None,
|
|
|
|
});
|
|
|
|
let event = $receive.expect("PaymentPathFailed not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
2023-10-21 01:08:38 +00:00
|
|
|
$nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
|
2023-04-03 20:19:49 +00:00
|
|
|
$nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
|
|
|
|
payment_id: PaymentId([42; 32]),
|
|
|
|
payment_hash: None,
|
|
|
|
path: path.clone(),
|
|
|
|
});
|
|
|
|
let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathSuccessful { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
2023-10-21 01:08:38 +00:00
|
|
|
$nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
|
2023-04-03 20:19:49 +00:00
|
|
|
$nodes[0].node.push_pending_event(Event::ProbeSuccessful {
|
|
|
|
payment_id: PaymentId([42; 32]),
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
path: path.clone(),
|
|
|
|
});
|
|
|
|
let event = $receive.expect("ProbeSuccessful not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::ProbeSuccessful { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
2023-10-21 01:08:38 +00:00
|
|
|
$nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
|
2023-04-03 20:19:49 +00:00
|
|
|
$nodes[0].node.push_pending_event(Event::ProbeFailed {
|
|
|
|
payment_id: PaymentId([42; 32]),
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
path,
|
|
|
|
short_channel_id: Some(scored_scid),
|
|
|
|
});
|
|
|
|
let event = $receive.expect("ProbeFailure not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::ProbeFailed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
#[test]
|
|
|
|
fn test_payment_path_scoring() {
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
|
|
|
let event_handler = move |event: Event| match event {
|
|
|
|
Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
|
|
|
|
Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
|
|
|
|
Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
|
|
|
|
Event::ProbeFailed { .. } => sender.send(event).unwrap(),
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
};
|
|
|
|
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2023-02-27 18:24:57 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2023-02-03 11:25:20 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
|
|
|
|
2023-04-03 20:19:49 +00:00
|
|
|
do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
|
2023-02-03 11:25:20 -05:00
|
|
|
|
2023-04-03 20:19:49 +00:00
|
|
|
if !std::thread::panicking() {
|
|
|
|
bg_processor.stop().unwrap();
|
2023-02-03 11:25:20 -05:00
|
|
|
}
|
2023-04-24 16:37:05 -05:00
|
|
|
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
|
|
|
let expected_log = "Persisting scorer after update".to_string();
|
|
|
|
assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
|
2023-04-03 20:19:49 +00:00
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
|
2023-04-03 20:19:49 +00:00
|
|
|
#[tokio::test]
|
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
async fn test_payment_path_scoring_async() {
|
|
|
|
let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
|
|
|
|
let event_handler = move |event: Event| {
|
|
|
|
let sender_ref = sender.clone();
|
|
|
|
async move {
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
|
|
|
|
Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
|
|
|
|
Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
|
|
|
|
Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2023-02-03 11:25:20 -05:00
|
|
|
|
2023-04-26 14:57:18 +02:00
|
|
|
let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
|
2023-08-01 13:37:46 +02:00
|
|
|
let data_dir = nodes[0].kv_store.get_data_dir();
|
2023-04-03 20:19:49 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2023-02-03 11:25:20 -05:00
|
|
|
|
2023-04-03 20:19:49 +00:00
|
|
|
let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
|
|
|
|
|
|
|
|
let bp_future = super::process_events_async(
|
|
|
|
persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
|
|
|
|
nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
|
|
|
|
Some(nodes[0].scorer.clone()), move |dur: Duration| {
|
|
|
|
let mut exit_receiver = exit_receiver.clone();
|
|
|
|
Box::pin(async move {
|
|
|
|
tokio::select! {
|
|
|
|
_ = tokio::time::sleep(dur) => false,
|
|
|
|
_ = exit_receiver.changed() => true,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}, false,
|
|
|
|
);
|
2023-04-21 18:02:54 +02:00
|
|
|
let t1 = tokio::spawn(bp_future);
|
|
|
|
let t2 = tokio::spawn(async move {
|
2023-04-03 20:19:49 +00:00
|
|
|
do_test_payment_path_scoring!(nodes, receiver.recv().await);
|
|
|
|
exit_sender.send(()).unwrap();
|
2023-04-24 16:37:05 -05:00
|
|
|
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
|
|
|
let expected_log = "Persisting scorer after update".to_string();
|
|
|
|
assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
|
2023-02-03 11:25:20 -05:00
|
|
|
});
|
2023-04-21 18:02:54 +02:00
|
|
|
|
|
|
|
let (r1, r2) = tokio::join!(t1, t2);
|
|
|
|
r1.unwrap().unwrap();
|
|
|
|
r2.unwrap()
|
2023-02-03 11:25:20 -05:00
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|