2021-03-17 14:18:37 -04:00
|
|
|
//! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
|
|
|
|
//! running properly, and (2) either can or should be run in the background. See docs for
|
|
|
|
//! [`BackgroundProcessor`] for more details on the nitty-gritty.
|
|
|
|
|
2022-08-07 13:49:10 -04:00
|
|
|
// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
|
2021-03-17 14:05:09 -04:00
|
|
|
#![deny(broken_intra_doc_links)]
|
2022-08-07 13:49:10 -04:00
|
|
|
#![deny(private_intra_doc_links)]
|
|
|
|
|
2021-03-17 14:18:37 -04:00
|
|
|
#![deny(missing_docs)]
|
2023-03-30 22:11:22 +00:00
|
|
|
#![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
|
2021-03-17 14:05:09 -04:00
|
|
|
|
2022-02-11 22:22:20 -06:00
|
|
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
|
|
|
|
|
|
|
|
#[cfg(any(test, feature = "std"))]
|
|
|
|
extern crate core;
|
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
#[cfg(not(feature = "std"))]
|
|
|
|
extern crate alloc;
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[macro_use] extern crate lightning;
|
2022-06-01 15:26:07 -07:00
|
|
|
extern crate lightning_rapid_gossip_sync;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
use lightning::chain;
|
|
|
|
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
|
2021-10-07 23:46:13 +00:00
|
|
|
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
|
2022-12-20 14:46:08 -08:00
|
|
|
use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
|
2023-03-07 13:57:01 -08:00
|
|
|
use lightning::events::{Event, PathFailure};
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use lightning::events::{EventHandler, EventsProvider};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::ln::channelmanager::ChannelManager;
|
2022-08-06 00:33:48 -04:00
|
|
|
use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
|
2021-10-26 02:03:02 +00:00
|
|
|
use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
|
2022-06-02 12:53:34 -07:00
|
|
|
use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
|
2023-01-21 03:28:35 +00:00
|
|
|
use lightning::routing::utxo::UtxoLookup;
|
2022-10-28 11:31:24 -04:00
|
|
|
use lightning::routing::router::Router;
|
2023-02-03 11:25:20 -05:00
|
|
|
use lightning::routing::scoring::{Score, WriteableScore};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::logger::Logger;
|
2022-04-11 13:50:31 -04:00
|
|
|
use lightning::util::persist::Persister;
|
2023-03-09 03:11:13 +00:00
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use lightning::util::wakers::Sleeper;
|
2022-06-01 15:26:07 -07:00
|
|
|
use lightning_rapid_gossip_sync::RapidGossipSync;
|
2023-01-17 00:16:48 +00:00
|
|
|
|
|
|
|
use core::ops::Deref;
|
|
|
|
use core::time::Duration;
|
|
|
|
|
|
|
|
#[cfg(feature = "std")]
|
2021-01-11 18:03:32 -05:00
|
|
|
use std::sync::Arc;
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use core::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use std::thread::{self, JoinHandle};
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
use std::time::Instant;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
#[cfg(not(feature = "std"))]
|
|
|
|
use alloc::vec::Vec;
|
2022-08-09 06:01:10 +00:00
|
|
|
|
2021-08-19 11:21:42 -05:00
|
|
|
/// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
|
2021-01-11 18:03:32 -05:00
|
|
|
/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
|
|
|
|
/// responsibilities are:
|
2021-08-19 11:21:42 -05:00
|
|
|
/// * Processing [`Event`]s with a user-provided [`EventHandler`].
|
|
|
|
/// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
|
2021-01-11 18:03:32 -05:00
|
|
|
/// writing it to disk/backups by invoking the callback given to it at startup.
|
2021-08-19 11:21:42 -05:00
|
|
|
/// [`ChannelManager`] persistence should be done in the background.
|
|
|
|
/// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
|
|
|
|
/// at the appropriate intervals.
|
2022-08-26 12:54:16 +02:00
|
|
|
/// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
|
|
|
|
/// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
|
|
|
|
/// upon as doing so may result in high latency.
|
|
|
|
///
|
|
|
|
/// # Note
|
|
|
|
///
|
|
|
|
/// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
|
|
|
|
/// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
|
|
|
|
/// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
|
|
|
|
/// unilateral chain closure fees are at risk.
|
|
|
|
///
|
|
|
|
/// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
|
2023-03-07 13:57:01 -08:00
|
|
|
/// [`Event`]: lightning::events::Event
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
2021-08-01 02:42:42 +00:00
|
|
|
#[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
|
2021-01-11 18:03:32 -05:00
|
|
|
pub struct BackgroundProcessor {
|
|
|
|
stop_thread: Arc<AtomicBool>,
|
2021-07-19 12:50:56 -05:00
|
|
|
thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(test))]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 60;
|
2021-01-11 18:03:32 -05:00
|
|
|
#[cfg(test)]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 1;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(all(not(test), not(debug_assertions)))]
|
2022-01-20 23:42:56 +00:00
|
|
|
const PING_TIMER: u64 = 10;
|
2021-08-20 15:48:40 +01:00
|
|
|
/// Signature operations take a lot longer without compiler optimisations.
|
|
|
|
/// Increasing the ping timer allows for this but slower devices will be disconnected if the
|
|
|
|
/// timeout is reached.
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(all(not(test), debug_assertions))]
|
2021-08-20 15:48:40 +01:00
|
|
|
const PING_TIMER: u64 = 30;
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(test)]
|
|
|
|
const PING_TIMER: u64 = 1;
|
2021-08-05 17:04:18 +00:00
|
|
|
|
2021-12-15 18:59:15 +00:00
|
|
|
/// Prune the network graph of stale entries hourly.
|
|
|
|
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
|
|
|
|
|
2022-06-09 22:58:26 -07:00
|
|
|
#[cfg(not(test))]
|
2022-06-01 16:25:30 -07:00
|
|
|
const SCORER_PERSIST_TIMER: u64 = 30;
|
|
|
|
#[cfg(test)]
|
|
|
|
const SCORER_PERSIST_TIMER: u64 = 1;
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
#[cfg(not(test))]
|
|
|
|
const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
|
|
|
|
#[cfg(test)]
|
|
|
|
const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
|
|
|
|
|
2022-06-02 14:48:32 -07:00
|
|
|
/// Either [`P2PGossipSync`] or [`RapidGossipSync`].
|
|
|
|
pub enum GossipSync<
|
2023-01-21 03:28:35 +00:00
|
|
|
P: Deref<Target = P2PGossipSync<G, U, L>>,
|
2022-06-02 14:48:32 -07:00
|
|
|
R: Deref<Target = RapidGossipSync<G, L>>,
|
|
|
|
G: Deref<Target = NetworkGraph<L>>,
|
2023-01-21 03:28:35 +00:00
|
|
|
U: Deref,
|
2022-06-02 14:48:32 -07:00
|
|
|
L: Deref,
|
|
|
|
>
|
2023-01-21 03:28:35 +00:00
|
|
|
where U::Target: UtxoLookup, L::Target: Logger {
|
2022-06-02 14:48:32 -07:00
|
|
|
/// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
|
|
|
|
P2P(P),
|
|
|
|
/// Rapid gossip sync from a trusted server.
|
|
|
|
Rapid(R),
|
|
|
|
/// No gossip sync.
|
|
|
|
None,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<
|
2023-01-21 03:28:35 +00:00
|
|
|
P: Deref<Target = P2PGossipSync<G, U, L>>,
|
2022-06-02 14:48:32 -07:00
|
|
|
R: Deref<Target = RapidGossipSync<G, L>>,
|
|
|
|
G: Deref<Target = NetworkGraph<L>>,
|
2023-01-21 03:28:35 +00:00
|
|
|
U: Deref,
|
2022-06-02 14:48:32 -07:00
|
|
|
L: Deref,
|
2023-01-21 03:28:35 +00:00
|
|
|
> GossipSync<P, R, G, U, L>
|
|
|
|
where U::Target: UtxoLookup, L::Target: Logger {
|
2022-06-02 14:48:32 -07:00
|
|
|
fn network_graph(&self) -> Option<&G> {
|
|
|
|
match self {
|
|
|
|
GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
|
|
|
|
GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
|
|
|
|
GossipSync::None => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn prunable_network_graph(&self) -> Option<&G> {
|
|
|
|
match self {
|
|
|
|
GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
|
|
|
|
GossipSync::Rapid(gossip_sync) => {
|
|
|
|
if gossip_sync.is_initial_sync_complete() {
|
|
|
|
Some(gossip_sync.network_graph())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
},
|
|
|
|
GossipSync::None => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-13 19:38:31 -04:00
|
|
|
|
2023-03-16 20:35:52 +03:00
|
|
|
/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
|
2023-01-21 03:28:35 +00:00
|
|
|
impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
|
|
|
|
GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
|
2022-07-14 12:32:30 -07:00
|
|
|
where
|
2023-01-21 03:28:35 +00:00
|
|
|
U::Target: UtxoLookup,
|
2022-07-14 12:32:30 -07:00
|
|
|
L::Target: Logger,
|
|
|
|
{
|
|
|
|
/// Initializes a new [`GossipSync::P2P`] variant.
|
|
|
|
pub fn p2p(gossip_sync: P) -> Self {
|
|
|
|
GossipSync::P2P(gossip_sync)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-16 20:35:52 +03:00
|
|
|
/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
|
2022-07-14 12:32:30 -07:00
|
|
|
impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
|
|
|
|
GossipSync<
|
2023-01-21 03:28:35 +00:00
|
|
|
&P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
|
2022-07-14 12:32:30 -07:00
|
|
|
R,
|
|
|
|
G,
|
2023-01-21 03:28:35 +00:00
|
|
|
&'a (dyn UtxoLookup + Send + Sync),
|
2022-07-14 12:32:30 -07:00
|
|
|
L,
|
|
|
|
>
|
|
|
|
where
|
|
|
|
L::Target: Logger,
|
|
|
|
{
|
|
|
|
/// Initializes a new [`GossipSync::Rapid`] variant.
|
|
|
|
pub fn rapid(gossip_sync: R) -> Self {
|
|
|
|
GossipSync::Rapid(gossip_sync)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-16 20:35:52 +03:00
|
|
|
/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
|
2022-07-14 12:32:30 -07:00
|
|
|
impl<'a, L: Deref>
|
|
|
|
GossipSync<
|
2023-01-21 03:28:35 +00:00
|
|
|
&P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
|
2022-07-14 12:32:30 -07:00
|
|
|
&RapidGossipSync<&'a NetworkGraph<L>, L>,
|
|
|
|
&'a NetworkGraph<L>,
|
2023-01-21 03:28:35 +00:00
|
|
|
&'a (dyn UtxoLookup + Send + Sync),
|
2022-07-14 12:32:30 -07:00
|
|
|
L,
|
|
|
|
>
|
|
|
|
where
|
|
|
|
L::Target: Logger,
|
|
|
|
{
|
|
|
|
/// Initializes a new [`GossipSync::None`] variant.
|
|
|
|
pub fn none() -> Self {
|
|
|
|
GossipSync::None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-31 16:07:41 -07:00
|
|
|
fn handle_network_graph_update<L: Deref>(
|
|
|
|
network_graph: &NetworkGraph<L>, event: &Event
|
|
|
|
) where L::Target: Logger {
|
2023-02-13 17:55:42 -05:00
|
|
|
if let Event::PaymentPathFailed {
|
|
|
|
failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
|
|
|
|
{
|
|
|
|
network_graph.handle_network_update(upd);
|
2022-10-31 16:07:41 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
|
|
|
|
scorer: &'a S, event: &Event
|
|
|
|
) {
|
|
|
|
let mut score = scorer.lock();
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
|
|
|
|
let path = path.iter().collect::<Vec<_>>();
|
|
|
|
score.payment_path_failed(&path, *scid);
|
|
|
|
},
|
|
|
|
Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
|
|
|
|
// Reached if the destination explicitly failed it back. We treat this as a successful probe
|
|
|
|
// because the payment made it all the way to the destination with sufficient liquidity.
|
|
|
|
let path = path.iter().collect::<Vec<_>>();
|
|
|
|
score.probe_successful(&path);
|
|
|
|
},
|
|
|
|
Event::PaymentPathSuccessful { path, .. } => {
|
|
|
|
let path = path.iter().collect::<Vec<_>>();
|
|
|
|
score.payment_path_successful(&path);
|
|
|
|
},
|
|
|
|
Event::ProbeSuccessful { path, .. } => {
|
|
|
|
let path = path.iter().collect::<Vec<_>>();
|
|
|
|
score.probe_successful(&path);
|
|
|
|
},
|
|
|
|
Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
|
|
|
|
let path = path.iter().collect::<Vec<_>>();
|
|
|
|
score.probe_failed(&path, *scid);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-09 06:01:10 +00:00
|
|
|
macro_rules! define_run_body {
|
2022-10-20 15:51:37 -07:00
|
|
|
($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
|
|
|
|
$channel_manager: ident, $process_channel_manager_events: expr,
|
2022-08-09 06:01:10 +00:00
|
|
|
$gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
|
2023-01-16 23:47:11 +00:00
|
|
|
$loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr)
|
2022-08-09 06:01:10 +00:00
|
|
|
=> { {
|
|
|
|
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
|
|
|
|
$channel_manager.timer_tick_occurred();
|
|
|
|
|
2023-01-16 23:47:11 +00:00
|
|
|
let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
|
|
|
|
let mut last_ping_call = $get_timer(PING_TIMER);
|
|
|
|
let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
|
|
|
|
let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
let mut have_pruned = false;
|
|
|
|
|
|
|
|
loop {
|
2022-10-20 15:51:37 -07:00
|
|
|
$process_channel_manager_events;
|
|
|
|
$process_chain_monitor_events;
|
2022-08-09 06:01:10 +00:00
|
|
|
|
|
|
|
// Note that the PeerManager::process_events may block on ChannelManager's locks,
|
|
|
|
// hence it comes last here. When the ChannelManager finishes whatever it's doing,
|
|
|
|
// we want to ensure we get into `persist_manager` as quickly as we can, especially
|
|
|
|
// without running the normal event processing above and handing events to users.
|
|
|
|
//
|
|
|
|
// Specifically, on an *extremely* slow machine, we may see ChannelManager start
|
|
|
|
// processing a message effectively at any point during this loop. In order to
|
|
|
|
// minimize the time between such processing completing and persisting the updated
|
|
|
|
// ChannelManager, we want to minimize methods blocking on a ChannelManager
|
|
|
|
// generally, and as a fallback place such blocking only immediately before
|
|
|
|
// persistence.
|
|
|
|
$peer_manager.process_events();
|
|
|
|
|
|
|
|
// We wait up to 100ms, but track how long it takes to detect being put to sleep,
|
|
|
|
// see `await_start`'s use below.
|
2023-01-16 23:47:11 +00:00
|
|
|
let mut await_start = $get_timer(1);
|
2022-08-09 06:01:10 +00:00
|
|
|
let updates_available = $await;
|
2023-01-16 23:47:11 +00:00
|
|
|
let await_slow = $timer_elapsed(&mut await_start, 1);
|
2022-08-09 06:01:10 +00:00
|
|
|
|
|
|
|
if updates_available {
|
|
|
|
log_trace!($logger, "Persisting ChannelManager...");
|
|
|
|
$persister.persist_manager(&*$channel_manager)?;
|
|
|
|
log_trace!($logger, "Done persisting ChannelManager.");
|
|
|
|
}
|
|
|
|
// Exit the loop if the background processor was requested to stop.
|
|
|
|
if $loop_exit_check {
|
|
|
|
log_trace!($logger, "Terminating background processor.");
|
|
|
|
break;
|
|
|
|
}
|
2023-01-16 23:47:11 +00:00
|
|
|
if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
|
2022-08-09 06:01:10 +00:00
|
|
|
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
|
|
|
|
$channel_manager.timer_tick_occurred();
|
2023-01-16 23:47:11 +00:00
|
|
|
last_freshness_call = $get_timer(FRESHNESS_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
2023-01-16 23:47:11 +00:00
|
|
|
if await_slow {
|
2022-08-09 06:01:10 +00:00
|
|
|
// On various platforms, we may be starved of CPU cycles for several reasons.
|
|
|
|
// E.g. on iOS, if we've been in the background, we will be entirely paused.
|
|
|
|
// Similarly, if we're on a desktop platform and the device has been asleep, we
|
|
|
|
// may not get any cycles.
|
|
|
|
// We detect this by checking if our max-100ms-sleep, above, ran longer than a
|
|
|
|
// full second, at which point we assume sockets may have been killed (they
|
|
|
|
// appear to be at least on some platforms, even if it has only been a second).
|
|
|
|
// Note that we have to take care to not get here just because user event
|
|
|
|
// processing was slow at the top of the loop. For example, the sample client
|
|
|
|
// may call Bitcoin Core RPCs during event handling, which very often takes
|
|
|
|
// more than a handful of seconds to complete, and shouldn't disconnect all our
|
|
|
|
// peers.
|
|
|
|
log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
|
|
|
|
$peer_manager.disconnect_all_peers();
|
2023-01-16 23:47:11 +00:00
|
|
|
last_ping_call = $get_timer(PING_TIMER);
|
|
|
|
} else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
|
2022-08-09 06:01:10 +00:00
|
|
|
log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
|
|
|
|
$peer_manager.timer_tick_occurred();
|
2023-01-16 23:47:11 +00:00
|
|
|
last_ping_call = $get_timer(PING_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note that we want to run a graph prune once not long after startup before
|
|
|
|
// falling back to our usual hourly prunes. This avoids short-lived clients never
|
|
|
|
// pruning their network graph. We run once 60 seconds after startup before
|
|
|
|
// continuing our normal cadence.
|
2023-01-16 23:47:11 +00:00
|
|
|
if $timer_elapsed(&mut last_prune_call, if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER }) {
|
2022-08-09 06:01:10 +00:00
|
|
|
// The network graph must not be pruned while rapid sync completion is pending
|
|
|
|
if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")] {
|
|
|
|
log_trace!($logger, "Pruning and persisting network graph.");
|
|
|
|
network_graph.remove_stale_channels_and_tracking();
|
|
|
|
}
|
|
|
|
#[cfg(not(feature = "std"))] {
|
|
|
|
log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
|
|
|
|
log_trace!($logger, "Persisting network graph.");
|
|
|
|
}
|
2022-08-09 06:01:10 +00:00
|
|
|
|
|
|
|
if let Err(e) = $persister.persist_graph(network_graph) {
|
|
|
|
log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
have_pruned = true;
|
|
|
|
}
|
2023-03-07 18:06:12 +00:00
|
|
|
last_prune_call = $get_timer(NETWORK_PRUNE_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
|
2023-01-16 23:47:11 +00:00
|
|
|
if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
|
2022-08-09 06:01:10 +00:00
|
|
|
if let Some(ref scorer) = $scorer {
|
|
|
|
log_trace!($logger, "Persisting scorer");
|
|
|
|
if let Err(e) = $persister.persist_scorer(&scorer) {
|
|
|
|
log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
|
|
|
|
}
|
|
|
|
}
|
2023-01-16 23:47:11 +00:00
|
|
|
last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
|
2022-08-09 06:01:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// After we exit, ensure we persist the ChannelManager one final time - this avoids
|
|
|
|
// some races where users quit while channel updates were in-flight, with
|
|
|
|
// ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
|
|
|
|
$persister.persist_manager(&*$channel_manager)?;
|
|
|
|
|
|
|
|
// Persist Scorer on exit
|
|
|
|
if let Some(ref scorer) = $scorer {
|
|
|
|
$persister.persist_scorer(&scorer)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist NetworkGraph on exit
|
|
|
|
if let Some(network_graph) = $gossip_sync.network_graph() {
|
|
|
|
$persister.persist_graph(network_graph)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
2023-03-30 21:52:03 +00:00
|
|
|
#[cfg(feature = "futures")]
|
2023-03-30 22:11:22 +00:00
|
|
|
pub(crate) mod futures_util {
|
|
|
|
use core::future::Future;
|
|
|
|
use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
|
|
|
|
use core::pin::Pin;
|
|
|
|
use core::marker::Unpin;
|
2023-03-09 03:11:13 +00:00
|
|
|
pub(crate) struct Selector<
|
|
|
|
A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
|
|
|
|
> {
|
2023-03-30 22:11:22 +00:00
|
|
|
pub a: A,
|
|
|
|
pub b: B,
|
2023-03-09 03:11:13 +00:00
|
|
|
pub c: C,
|
2023-03-30 22:11:22 +00:00
|
|
|
}
|
|
|
|
pub(crate) enum SelectorOutput {
|
2023-03-09 03:11:13 +00:00
|
|
|
A, B, C(bool),
|
2023-03-30 22:11:22 +00:00
|
|
|
}
|
2023-03-30 21:52:03 +00:00
|
|
|
|
2023-03-09 03:11:13 +00:00
|
|
|
impl<
|
|
|
|
A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
|
|
|
|
> Future for Selector<A, B, C> {
|
2023-03-30 22:11:22 +00:00
|
|
|
type Output = SelectorOutput;
|
|
|
|
fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
|
|
|
|
match Pin::new(&mut self.a).poll(ctx) {
|
|
|
|
Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
|
|
|
|
Poll::Pending => {},
|
|
|
|
}
|
|
|
|
match Pin::new(&mut self.b).poll(ctx) {
|
2023-03-09 03:11:13 +00:00
|
|
|
Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
|
|
|
|
Poll::Pending => {},
|
|
|
|
}
|
|
|
|
match Pin::new(&mut self.c).poll(ctx) {
|
|
|
|
Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
|
2023-03-30 22:11:22 +00:00
|
|
|
Poll::Pending => {},
|
|
|
|
}
|
|
|
|
Poll::Pending
|
2023-03-30 21:52:03 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-30 22:11:22 +00:00
|
|
|
|
|
|
|
// If we want to poll a future without an async context to figure out if it has completed or
|
|
|
|
// not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
|
|
|
|
// but sadly there's a good bit of boilerplate here.
|
|
|
|
fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
|
|
|
|
fn dummy_waker_action(_: *const ()) { }
|
|
|
|
|
|
|
|
const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
|
|
|
|
dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
|
|
|
|
pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
|
2023-03-30 21:52:03 +00:00
|
|
|
}
|
2023-03-30 22:11:22 +00:00
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
use futures_util::{Selector, SelectorOutput, dummy_waker};
|
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
use core::task;
|
2023-03-30 21:52:03 +00:00
|
|
|
|
2022-08-09 06:01:10 +00:00
|
|
|
/// Processes background events in a future.
|
|
|
|
///
|
|
|
|
/// `sleeper` should return a future which completes in the given amount of time and returns a
|
2022-11-09 17:35:26 +00:00
|
|
|
/// boolean indicating whether the background processing should exit. Once `sleeper` returns a
|
|
|
|
/// future which outputs true, the loop will exit and this function's future will complete.
|
2022-08-09 06:01:10 +00:00
|
|
|
///
|
|
|
|
/// See [`BackgroundProcessor::start`] for information on which actions this handles.
|
2023-01-17 00:16:48 +00:00
|
|
|
///
|
|
|
|
/// Requires the `futures` feature. Note that while this method is available without the `std`
|
|
|
|
/// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
|
|
|
|
/// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
|
|
|
|
/// manually instead.
|
2022-08-09 06:01:10 +00:00
|
|
|
#[cfg(feature = "futures")]
|
|
|
|
pub async fn process_events_async<
|
|
|
|
'a,
|
2023-01-21 03:28:35 +00:00
|
|
|
UL: 'static + Deref + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
CF: 'static + Deref + Send + Sync,
|
|
|
|
CW: 'static + Deref + Send + Sync,
|
|
|
|
T: 'static + Deref + Send + Sync,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES: 'static + Deref + Send + Sync,
|
|
|
|
NS: 'static + Deref + Send + Sync,
|
|
|
|
SP: 'static + Deref + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
F: 'static + Deref + Send + Sync,
|
2022-10-28 11:31:24 -04:00
|
|
|
R: 'static + Deref + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
|
|
|
|
L: 'static + Deref + Send + Sync,
|
|
|
|
P: 'static + Deref + Send + Sync,
|
|
|
|
Descriptor: 'static + SocketDescriptor + Send + Sync,
|
|
|
|
CMH: 'static + Deref + Send + Sync,
|
|
|
|
RMH: 'static + Deref + Send + Sync,
|
2022-09-26 14:41:59 +02:00
|
|
|
OMH: 'static + Deref + Send + Sync,
|
2022-10-20 15:51:37 -07:00
|
|
|
EventHandlerFuture: core::future::Future<Output = ()>,
|
|
|
|
EventHandler: Fn(Event) -> EventHandlerFuture,
|
2022-08-09 06:01:10 +00:00
|
|
|
PS: 'static + Deref + Send,
|
2022-12-20 14:46:08 -08:00
|
|
|
M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
|
|
|
|
CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
|
2023-01-21 03:28:35 +00:00
|
|
|
PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
|
|
|
|
UMH: 'static + Deref + Send + Sync,
|
2023-01-18 13:03:20 -08:00
|
|
|
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
|
2022-08-09 06:01:10 +00:00
|
|
|
S: 'static + Deref<Target = SC> + Send + Sync,
|
2023-02-03 11:25:20 -05:00
|
|
|
SC: for<'b> WriteableScore<'b>,
|
2023-01-17 00:03:43 +00:00
|
|
|
SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
|
2022-08-09 06:01:10 +00:00
|
|
|
Sleeper: Fn(Duration) -> SleepFuture
|
|
|
|
>(
|
2022-10-20 15:51:37 -07:00
|
|
|
persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
|
2023-01-21 03:28:35 +00:00
|
|
|
gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
|
2022-08-09 06:01:10 +00:00
|
|
|
sleeper: Sleeper,
|
2023-02-24 02:56:11 +00:00
|
|
|
) -> Result<(), lightning::io::Error>
|
2022-08-09 06:01:10 +00:00
|
|
|
where
|
2023-01-21 03:28:35 +00:00
|
|
|
UL::Target: 'static + UtxoLookup,
|
2022-08-09 06:01:10 +00:00
|
|
|
CF::Target: 'static + chain::Filter,
|
2022-12-20 14:46:08 -08:00
|
|
|
CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
|
2022-08-09 06:01:10 +00:00
|
|
|
T::Target: 'static + BroadcasterInterface,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES::Target: 'static + EntropySource,
|
|
|
|
NS::Target: 'static + NodeSigner,
|
|
|
|
SP::Target: 'static + SignerProvider,
|
2022-08-09 06:01:10 +00:00
|
|
|
F::Target: 'static + FeeEstimator,
|
2022-10-28 11:31:24 -04:00
|
|
|
R::Target: 'static + Router,
|
2022-08-09 06:01:10 +00:00
|
|
|
L::Target: 'static + Logger,
|
2022-12-20 14:46:08 -08:00
|
|
|
P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
|
2022-08-09 06:01:10 +00:00
|
|
|
CMH::Target: 'static + ChannelMessageHandler,
|
2022-09-26 14:41:59 +02:00
|
|
|
OMH::Target: 'static + OnionMessageHandler,
|
2022-08-09 06:01:10 +00:00
|
|
|
RMH::Target: 'static + RoutingMessageHandler,
|
|
|
|
UMH::Target: 'static + CustomMessageHandler,
|
2022-12-20 14:46:08 -08:00
|
|
|
PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
|
2022-08-09 06:01:10 +00:00
|
|
|
{
|
2022-11-09 17:35:26 +00:00
|
|
|
let mut should_break = true;
|
2022-10-20 15:51:37 -07:00
|
|
|
let async_event_handler = |event| {
|
|
|
|
let network_graph = gossip_sync.network_graph();
|
|
|
|
let event_handler = &event_handler;
|
2023-02-03 11:25:20 -05:00
|
|
|
let scorer = &scorer;
|
2022-10-20 15:51:37 -07:00
|
|
|
async move {
|
|
|
|
if let Some(network_graph) = network_graph {
|
|
|
|
handle_network_graph_update(network_graph, &event)
|
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
if let Some(ref scorer) = scorer {
|
|
|
|
update_scorer(scorer, &event);
|
|
|
|
}
|
2022-10-20 15:51:37 -07:00
|
|
|
event_handler(event).await;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
define_run_body!(persister,
|
|
|
|
chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
|
|
|
|
channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
|
2022-11-09 17:35:26 +00:00
|
|
|
gossip_sync, peer_manager, logger, scorer, should_break, {
|
2023-03-30 21:52:03 +00:00
|
|
|
let fut = Selector {
|
|
|
|
a: channel_manager.get_persistable_update_future(),
|
2023-03-09 03:11:13 +00:00
|
|
|
b: chain_monitor.get_update_future(),
|
|
|
|
c: sleeper(Duration::from_millis(100)),
|
2023-03-30 21:52:03 +00:00
|
|
|
};
|
|
|
|
match fut.await {
|
|
|
|
SelectorOutput::A => true,
|
2023-03-09 03:11:13 +00:00
|
|
|
SelectorOutput::B => false,
|
|
|
|
SelectorOutput::C(exit) => {
|
2022-11-09 17:35:26 +00:00
|
|
|
should_break = exit;
|
2022-08-09 06:01:10 +00:00
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2023-01-17 00:03:43 +00:00
|
|
|
}, |t| sleeper(Duration::from_secs(t)),
|
|
|
|
|fut: &mut SleepFuture, _| {
|
2023-03-30 22:11:22 +00:00
|
|
|
let mut waker = dummy_waker();
|
2023-01-17 00:03:43 +00:00
|
|
|
let mut ctx = task::Context::from_waker(&mut waker);
|
|
|
|
core::pin::Pin::new(fut).poll(&mut ctx).is_ready()
|
2022-08-09 06:01:10 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
2021-01-11 18:03:32 -05:00
|
|
|
impl BackgroundProcessor {
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Start a background thread that takes care of responsibilities enumerated in the [top-level
|
|
|
|
/// documentation].
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-07-19 12:50:56 -05:00
|
|
|
/// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
|
2022-03-21 20:13:14 -07:00
|
|
|
/// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
|
2021-07-19 12:50:56 -05:00
|
|
|
/// either [`join`] or [`stop`].
|
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// # Data Persistence
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2022-03-21 20:13:14 -07:00
|
|
|
/// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
|
2021-03-17 15:53:29 -04:00
|
|
|
/// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
|
2022-04-11 13:50:31 -04:00
|
|
|
/// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
|
2021-03-17 15:53:29 -04:00
|
|
|
/// provided implementation.
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2022-06-02 14:48:32 -07:00
|
|
|
/// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
|
|
|
|
/// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
|
|
|
|
/// See the `lightning-persister` crate for LDK's provided implementation.
|
2022-03-21 20:13:14 -07:00
|
|
|
///
|
|
|
|
/// Typically, users should either implement [`Persister::persist_manager`] to never return an
|
2021-08-19 11:21:42 -05:00
|
|
|
/// error or call [`join`] and handle any error that may arise. For the latter case,
|
|
|
|
/// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
|
|
|
|
///
|
|
|
|
/// # Event Handling
|
|
|
|
///
|
|
|
|
/// `event_handler` is responsible for handling events that users should be notified of (e.g.,
|
2021-09-14 21:38:00 -05:00
|
|
|
/// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
|
|
|
|
/// functionality implemented by other handlers.
|
2022-06-01 10:28:34 -07:00
|
|
|
/// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
|
2021-08-19 11:21:42 -05:00
|
|
|
///
|
2022-06-01 15:26:07 -07:00
|
|
|
/// # Rapid Gossip Sync
|
|
|
|
///
|
2022-06-13 18:18:19 -05:00
|
|
|
/// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
|
2022-06-02 14:48:32 -07:00
|
|
|
/// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
|
|
|
|
/// until the [`RapidGossipSync`] instance completes its first sync.
|
2022-06-01 15:26:07 -07:00
|
|
|
///
|
2021-09-17 16:00:24 +00:00
|
|
|
/// [top-level documentation]: BackgroundProcessor
|
2021-07-19 12:50:56 -05:00
|
|
|
/// [`join`]: Self::join
|
|
|
|
/// [`stop`]: Self::stop
|
2021-03-17 15:53:29 -04:00
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
/// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
|
2022-04-11 13:50:31 -04:00
|
|
|
/// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
|
|
|
|
/// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
|
2022-06-02 12:53:34 -07:00
|
|
|
/// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
|
|
|
|
/// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
|
2021-04-13 16:04:17 -04:00
|
|
|
pub fn start<
|
2022-04-27 22:16:38 -07:00
|
|
|
'a,
|
2023-01-21 03:28:35 +00:00
|
|
|
UL: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF: 'static + Deref + Send + Sync,
|
|
|
|
CW: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
T: 'static + Deref + Send + Sync,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES: 'static + Deref + Send + Sync,
|
|
|
|
NS: 'static + Deref + Send + Sync,
|
|
|
|
SP: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
F: 'static + Deref + Send + Sync,
|
2022-10-28 11:31:24 -04:00
|
|
|
R: 'static + Deref + Send + Sync,
|
2022-06-03 21:35:37 -07:00
|
|
|
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
L: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
P: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
Descriptor: 'static + SocketDescriptor + Send + Sync,
|
2021-04-13 19:38:31 -04:00
|
|
|
CMH: 'static + Deref + Send + Sync,
|
2022-08-06 00:33:48 -04:00
|
|
|
OMH: 'static + Deref + Send + Sync,
|
2021-04-13 19:38:31 -04:00
|
|
|
RMH: 'static + Deref + Send + Sync,
|
2021-08-23 23:56:59 -05:00
|
|
|
EH: 'static + EventHandler + Send,
|
2022-04-11 13:50:31 -04:00
|
|
|
PS: 'static + Deref + Send,
|
2022-12-20 14:46:08 -08:00
|
|
|
M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
|
|
|
|
CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
|
2023-01-21 03:28:35 +00:00
|
|
|
PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
|
2022-06-02 14:48:32 -07:00
|
|
|
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
|
2021-08-05 14:51:17 +09:00
|
|
|
UMH: 'static + Deref + Send + Sync,
|
2023-01-18 13:03:20 -08:00
|
|
|
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
|
2022-04-27 22:16:38 -07:00
|
|
|
S: 'static + Deref<Target = SC> + Send + Sync,
|
2023-02-03 11:25:20 -05:00
|
|
|
SC: for <'b> WriteableScore<'b>,
|
2021-09-14 21:38:00 -05:00
|
|
|
>(
|
2022-03-21 20:13:14 -07:00
|
|
|
persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
|
2023-01-21 03:28:35 +00:00
|
|
|
gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
|
2021-09-14 21:38:00 -05:00
|
|
|
) -> Self
|
2021-04-13 19:38:31 -04:00
|
|
|
where
|
2023-01-21 03:28:35 +00:00
|
|
|
UL::Target: 'static + UtxoLookup,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF::Target: 'static + chain::Filter,
|
2022-12-20 14:46:08 -08:00
|
|
|
CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
|
2021-04-13 16:04:17 -04:00
|
|
|
T::Target: 'static + BroadcasterInterface,
|
2022-12-20 14:46:08 -08:00
|
|
|
ES::Target: 'static + EntropySource,
|
|
|
|
NS::Target: 'static + NodeSigner,
|
|
|
|
SP::Target: 'static + SignerProvider,
|
2021-04-13 16:04:17 -04:00
|
|
|
F::Target: 'static + FeeEstimator,
|
2022-10-28 11:31:24 -04:00
|
|
|
R::Target: 'static + Router,
|
2021-04-13 16:04:17 -04:00
|
|
|
L::Target: 'static + Logger,
|
2022-12-20 14:46:08 -08:00
|
|
|
P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
|
2021-04-13 19:38:31 -04:00
|
|
|
CMH::Target: 'static + ChannelMessageHandler,
|
2022-08-06 00:33:48 -04:00
|
|
|
OMH::Target: 'static + OnionMessageHandler,
|
2021-04-13 19:38:31 -04:00
|
|
|
RMH::Target: 'static + RoutingMessageHandler,
|
2021-08-05 14:51:17 +09:00
|
|
|
UMH::Target: 'static + CustomMessageHandler,
|
2022-12-20 14:46:08 -08:00
|
|
|
PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
|
2021-01-11 18:03:32 -05:00
|
|
|
{
|
|
|
|
let stop_thread = Arc::new(AtomicBool::new(false));
|
|
|
|
let stop_thread_clone = stop_thread.clone();
|
|
|
|
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
|
2022-11-02 16:29:00 -07:00
|
|
|
let event_handler = |event| {
|
|
|
|
let network_graph = gossip_sync.network_graph();
|
|
|
|
if let Some(network_graph) = network_graph {
|
|
|
|
handle_network_graph_update(network_graph, &event)
|
|
|
|
}
|
2023-02-03 11:25:20 -05:00
|
|
|
if let Some(ref scorer) = scorer {
|
|
|
|
update_scorer(scorer, &event);
|
|
|
|
}
|
2022-11-02 16:29:00 -07:00
|
|
|
event_handler.handle_event(event);
|
2022-10-20 15:51:37 -07:00
|
|
|
};
|
|
|
|
define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
|
|
|
|
channel_manager, channel_manager.process_pending_events(&event_handler),
|
2022-08-09 06:01:10 +00:00
|
|
|
gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
|
2023-03-09 03:11:13 +00:00
|
|
|
Sleeper::from_two_futures(
|
|
|
|
channel_manager.get_persistable_update_future(),
|
|
|
|
chain_monitor.get_update_future()
|
|
|
|
).wait_timeout(Duration::from_millis(100)),
|
2023-01-16 23:47:11 +00:00
|
|
|
|_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur)
|
2021-01-11 18:03:32 -05:00
|
|
|
});
|
2021-07-18 13:11:01 -05:00
|
|
|
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
pub fn join(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
2021-07-18 13:11:01 -05:00
|
|
|
pub fn stop(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.stop_and_join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-01-11 18:03:32 -05:00
|
|
|
self.stop_thread.store(true, Ordering::Release);
|
2021-07-19 12:50:56 -05:00
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-07-18 13:11:01 -05:00
|
|
|
match self.thread_handle.take() {
|
|
|
|
Some(handle) => handle.join().unwrap(),
|
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(feature = "std")]
|
2021-07-18 13:11:01 -05:00
|
|
|
impl Drop for BackgroundProcessor {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.stop_and_join_thread().unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-17 00:16:48 +00:00
|
|
|
#[cfg(all(feature = "std", test))]
|
2021-01-11 18:03:32 -05:00
|
|
|
mod tests {
|
2021-05-11 08:34:57 -07:00
|
|
|
use bitcoin::blockdata::block::BlockHeader;
|
2021-01-11 18:03:32 -05:00
|
|
|
use bitcoin::blockdata::constants::genesis_block;
|
2022-08-09 17:39:51 +02:00
|
|
|
use bitcoin::blockdata::locktime::PackedLockTime;
|
2021-01-11 18:03:32 -05:00
|
|
|
use bitcoin::blockdata::transaction::{Transaction, TxOut};
|
|
|
|
use bitcoin::network::constants::Network;
|
2023-02-03 11:25:20 -05:00
|
|
|
use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
|
2022-04-24 16:03:26 +00:00
|
|
|
use lightning::chain::{BestBlock, Confirm, chainmonitor};
|
2021-05-11 08:34:57 -07:00
|
|
|
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
|
2023-02-02 20:08:00 -05:00
|
|
|
use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::chain::transaction::OutPoint;
|
2023-03-07 13:57:01 -08:00
|
|
|
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::get_event_msg;
|
2023-02-03 11:25:20 -05:00
|
|
|
use lightning::ln::PaymentHash;
|
2023-02-03 11:14:53 -05:00
|
|
|
use lightning::ln::channelmanager;
|
2023-02-03 11:25:20 -05:00
|
|
|
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
|
|
|
|
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
|
2021-07-31 09:32:27 -05:00
|
|
|
use lightning::ln::msgs::{ChannelMessageHandler, Init};
|
2021-08-05 14:51:17 +09:00
|
|
|
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
|
2023-02-03 11:14:53 -05:00
|
|
|
use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
|
|
|
|
use lightning::routing::router::{DefaultRouter, RouteHop};
|
|
|
|
use lightning::routing::scoring::{ChannelUsage, Score};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::config::UserConfig;
|
|
|
|
use lightning::util::ser::Writeable;
|
|
|
|
use lightning::util::test_utils;
|
2022-04-11 13:50:31 -04:00
|
|
|
use lightning::util::persist::KVStorePersister;
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning_persister::FilesystemPersister;
|
2023-02-03 11:14:53 -05:00
|
|
|
use std::collections::VecDeque;
|
2022-04-24 16:03:26 +00:00
|
|
|
use std::fs;
|
2021-01-11 18:03:32 -05:00
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::sync::{Arc, Mutex};
|
2022-06-01 15:26:07 -07:00
|
|
|
use std::sync::mpsc::SyncSender;
|
2021-01-11 18:03:32 -05:00
|
|
|
use std::time::Duration;
|
2022-08-09 17:39:51 +02:00
|
|
|
use bitcoin::hashes::Hash;
|
|
|
|
use bitcoin::TxMerkleNode;
|
2022-06-01 15:26:07 -07:00
|
|
|
use lightning_rapid_gossip_sync::RapidGossipSync;
|
2022-06-02 14:48:32 -07:00
|
|
|
use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:34:57 -07:00
|
|
|
const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
|
|
|
|
|
2022-10-14 13:24:02 +02:00
|
|
|
#[derive(Clone, Hash, PartialEq, Eq)]
|
2021-04-02 18:40:57 -04:00
|
|
|
struct TestDescriptor{}
|
|
|
|
impl SocketDescriptor for TestDescriptor {
|
|
|
|
fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
|
|
|
|
fn disconnect_socket(&mut self) {}
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:14:53 -05:00
|
|
|
type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
|
|
|
|
|
2021-02-16 16:30:08 -05:00
|
|
|
type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2022-06-02 14:48:32 -07:00
|
|
|
type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
|
|
|
|
type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
struct Node {
|
2023-02-03 11:14:53 -05:00
|
|
|
node: Arc<ChannelManager>,
|
2022-06-02 14:48:32 -07:00
|
|
|
p2p_gossip_sync: PGS,
|
|
|
|
rapid_gossip_sync: RGS,
|
2023-01-18 13:03:20 -08:00
|
|
|
peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
|
2021-05-11 08:34:57 -07:00
|
|
|
chain_monitor: Arc<ChainMonitor>,
|
2021-01-11 18:03:32 -05:00
|
|
|
persister: Arc<FilesystemPersister>,
|
2021-05-11 08:34:57 -07:00
|
|
|
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
|
2022-06-03 21:35:37 -07:00
|
|
|
network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
|
2021-01-11 18:03:32 -05:00
|
|
|
logger: Arc<test_utils::TestLogger>,
|
2021-05-11 08:34:57 -07:00
|
|
|
best_block: BestBlock,
|
2023-02-03 11:14:53 -05:00
|
|
|
scorer: Arc<Mutex<TestScorer>>,
|
2022-06-02 14:48:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Node {
|
|
|
|
fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
|
|
|
|
GossipSync::P2P(self.p2p_gossip_sync.clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
|
|
|
|
GossipSync::Rapid(self.rapid_gossip_sync.clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
|
|
|
|
GossipSync::None
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Node {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let data_dir = self.persister.get_data_dir();
|
|
|
|
match fs::remove_dir_all(data_dir.clone()) {
|
|
|
|
Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
struct Persister {
|
2022-03-28 19:36:43 -07:00
|
|
|
graph_error: Option<(std::io::ErrorKind, &'static str)>,
|
2022-06-01 15:26:07 -07:00
|
|
|
graph_persistence_notifier: Option<SyncSender<()>>,
|
2022-04-11 13:50:31 -04:00
|
|
|
manager_error: Option<(std::io::ErrorKind, &'static str)>,
|
2022-04-27 22:16:38 -07:00
|
|
|
scorer_error: Option<(std::io::ErrorKind, &'static str)>,
|
2022-04-11 13:50:31 -04:00
|
|
|
filesystem_persister: FilesystemPersister,
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Persister {
|
|
|
|
fn new(data_dir: String) -> Self {
|
2023-02-27 18:24:57 +00:00
|
|
|
let filesystem_persister = FilesystemPersister::new(data_dir);
|
2022-06-01 15:26:07 -07:00
|
|
|
Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
|
|
|
|
Self { graph_error: Some((error, message)), ..self }
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:26:07 -07:00
|
|
|
fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
|
|
|
|
Self { graph_persistence_notifier: Some(sender), ..self }
|
|
|
|
}
|
|
|
|
|
2022-03-28 19:36:43 -07:00
|
|
|
fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
|
|
|
|
Self { manager_error: Some((error, message)), ..self }
|
|
|
|
}
|
2022-04-27 22:16:38 -07:00
|
|
|
|
|
|
|
fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
|
|
|
|
Self { scorer_error: Some((error, message)), ..self }
|
|
|
|
}
|
2022-03-21 20:13:14 -07:00
|
|
|
}
|
|
|
|
|
2022-04-11 13:50:31 -04:00
|
|
|
impl KVStorePersister for Persister {
|
|
|
|
fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
|
|
|
|
if key == "manager" {
|
|
|
|
if let Some((error, message)) = self.manager_error {
|
|
|
|
return Err(std::io::Error::new(error, message))
|
|
|
|
}
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
2022-03-21 20:13:14 -07:00
|
|
|
|
2022-04-11 13:50:31 -04:00
|
|
|
if key == "network_graph" {
|
2022-06-01 15:26:07 -07:00
|
|
|
if let Some(sender) = &self.graph_persistence_notifier {
|
|
|
|
sender.send(()).unwrap();
|
|
|
|
};
|
|
|
|
|
2022-04-11 13:50:31 -04:00
|
|
|
if let Some((error, message)) = self.graph_error {
|
|
|
|
return Err(std::io::Error::new(error, message))
|
|
|
|
}
|
2022-03-28 19:36:43 -07:00
|
|
|
}
|
2022-04-11 13:50:31 -04:00
|
|
|
|
2022-04-27 22:16:38 -07:00
|
|
|
if key == "scorer" {
|
|
|
|
if let Some((error, message)) = self.scorer_error {
|
|
|
|
return Err(std::io::Error::new(error, message))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-11 13:50:31 -04:00
|
|
|
self.filesystem_persister.persist(key, object)
|
2022-03-21 20:13:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:14:53 -05:00
|
|
|
struct TestScorer {
|
|
|
|
event_expectations: Option<VecDeque<TestResult>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
enum TestResult {
|
|
|
|
PaymentFailure { path: Vec<RouteHop>, short_channel_id: u64 },
|
|
|
|
PaymentSuccess { path: Vec<RouteHop> },
|
|
|
|
ProbeFailure { path: Vec<RouteHop> },
|
|
|
|
ProbeSuccess { path: Vec<RouteHop> },
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TestScorer {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self { event_expectations: None }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn expect(&mut self, expectation: TestResult) {
|
2023-02-27 18:24:57 +00:00
|
|
|
self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
|
2023-02-03 11:14:53 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl lightning::util::ser::Writeable for TestScorer {
|
|
|
|
fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Score for TestScorer {
|
|
|
|
fn channel_penalty_msat(
|
|
|
|
&self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
|
|
|
|
) -> u64 { unimplemented!(); }
|
|
|
|
|
|
|
|
fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
|
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, short_channel_id } => {
|
|
|
|
assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
|
|
|
|
assert_eq!(actual_short_channel_id, short_channel_id);
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
panic!("Unexpected successful payment path: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
panic!("Unexpected probe failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
panic!("Unexpected probe success: {:?}", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) {
|
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, .. } => {
|
|
|
|
panic!("Unexpected payment path failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
panic!("Unexpected probe failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
panic!("Unexpected probe success: {:?}", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
|
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, .. } => {
|
|
|
|
panic!("Unexpected payment path failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
panic!("Unexpected payment path success: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
panic!("Unexpected probe success: {:?}", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn probe_successful(&mut self, actual_path: &[&RouteHop]) {
|
|
|
|
if let Some(expectations) = &mut self.event_expectations {
|
|
|
|
match expectations.pop_front().unwrap() {
|
|
|
|
TestResult::PaymentFailure { path, .. } => {
|
|
|
|
panic!("Unexpected payment path failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::PaymentSuccess { path } => {
|
|
|
|
panic!("Unexpected payment path success: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeFailure { path } => {
|
|
|
|
panic!("Unexpected probe failure: {:?}", path)
|
|
|
|
},
|
|
|
|
TestResult::ProbeSuccess { path } => {
|
|
|
|
assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for TestScorer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if std::thread::panicking() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(event_expectations) = &self.event_expectations {
|
|
|
|
if !event_expectations.is_empty() {
|
|
|
|
panic!("Unsatisfied event expectations: {:?}", event_expectations);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
fn get_full_filepath(filepath: String, filename: String) -> String {
|
|
|
|
let mut path = PathBuf::from(filepath);
|
|
|
|
path.push(filename);
|
|
|
|
path.to_str().unwrap().to_string()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
|
|
|
|
let mut nodes = Vec::new();
|
|
|
|
for i in 0..num_nodes {
|
2021-05-26 19:05:00 +00:00
|
|
|
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
|
2021-01-11 18:03:32 -05:00
|
|
|
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
|
|
|
|
let network = Network::Testnet;
|
2021-09-14 21:38:00 -05:00
|
|
|
let genesis_block = genesis_block(network);
|
2023-02-09 19:20:22 +00:00
|
|
|
let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
|
2023-02-03 11:14:53 -05:00
|
|
|
let scorer = Arc::new(Mutex::new(TestScorer::new()));
|
2022-11-29 15:16:47 -05:00
|
|
|
let seed = [i as u8; 32];
|
|
|
|
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
|
|
|
|
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
|
|
|
|
let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
|
2021-09-14 21:38:00 -05:00
|
|
|
let now = Duration::from_secs(genesis_block.header.time as u64);
|
2021-01-11 18:03:32 -05:00
|
|
|
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
|
|
|
|
let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
|
2023-02-15 06:09:00 +00:00
|
|
|
let best_block = BestBlock::from_network(network);
|
2021-05-11 08:34:57 -07:00
|
|
|
let params = ChainParameters { network, best_block };
|
2022-12-20 14:46:08 -08:00
|
|
|
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
|
2022-06-02 14:48:32 -07:00
|
|
|
let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
|
2023-02-23 19:20:41 +00:00
|
|
|
let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
|
2022-08-06 00:33:48 -04:00
|
|
|
let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
|
2023-01-18 13:43:32 -08:00
|
|
|
let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
|
2022-06-02 14:48:32 -07:00
|
|
|
let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
|
2021-01-11 18:03:32 -05:00
|
|
|
nodes.push(node);
|
|
|
|
}
|
2021-07-31 09:32:27 -05:00
|
|
|
|
|
|
|
for i in 0..num_nodes {
|
|
|
|
for j in (i+1)..num_nodes {
|
2023-01-26 04:45:58 +00:00
|
|
|
nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
|
|
|
|
nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
|
2021-07-31 09:32:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
nodes
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! open_channel {
|
2021-05-11 08:07:54 -07:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
begin_open_channel!($node_a, $node_b, $channel_value);
|
|
|
|
let events = $node_a.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
2022-10-31 10:36:12 -07:00
|
|
|
let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
|
2021-05-11 08:07:54 -07:00
|
|
|
end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
|
|
|
|
tx
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! begin_open_channel {
|
2021-01-11 18:03:32 -05:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
$node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
|
2023-01-16 20:34:59 +00:00
|
|
|
$node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
|
2021-05-11 08:07:54 -07:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! handle_funding_generation_ready {
|
|
|
|
($event: expr, $channel_value: expr) => {{
|
|
|
|
match $event {
|
2022-10-31 10:36:12 -07:00
|
|
|
Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
|
2021-08-17 11:12:18 -05:00
|
|
|
assert_eq!(channel_value_satoshis, $channel_value);
|
2021-01-11 18:03:32 -05:00
|
|
|
assert_eq!(user_channel_id, 42);
|
|
|
|
|
2022-08-09 17:39:51 +02:00
|
|
|
let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
|
2021-08-17 11:12:18 -05:00
|
|
|
value: channel_value_satoshis, script_pubkey: output_script.clone(),
|
2021-01-11 18:03:32 -05:00
|
|
|
}]};
|
2021-08-17 11:12:18 -05:00
|
|
|
(temporary_channel_id, tx)
|
2021-01-11 18:03:32 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
2021-05-11 08:07:54 -07:00
|
|
|
}
|
|
|
|
}}
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
macro_rules! end_open_channel {
|
|
|
|
($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
|
2022-05-12 23:59:41 +02:00
|
|
|
$node_a.node.funding_transaction_generated(&$temporary_channel_id, &$node_b.node.get_our_node_id(), $tx.clone()).unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
$node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
|
|
|
|
for i in 1..=depth {
|
2021-05-11 08:34:57 -07:00
|
|
|
let prev_blockhash = node.best_block.block_hash();
|
|
|
|
let height = node.best_block.height() + 1;
|
2022-08-09 17:39:51 +02:00
|
|
|
let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
|
2021-05-11 08:34:57 -07:00
|
|
|
let txdata = vec![(0, tx)];
|
|
|
|
node.best_block = BestBlock::new(header.block_hash(), height);
|
|
|
|
match i {
|
|
|
|
1 => {
|
|
|
|
node.node.transactions_confirmed(&header, &txdata, height);
|
|
|
|
node.chain_monitor.transactions_confirmed(&header, &txdata, height);
|
|
|
|
},
|
2021-05-31 20:04:36 +00:00
|
|
|
x if x == depth => {
|
2021-05-11 08:34:57 -07:00
|
|
|
node.node.best_block_updated(&header, height);
|
|
|
|
node.chain_monitor.best_block_updated(&header, height);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction(node: &mut Node, tx: &Transaction) {
|
|
|
|
confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
|
|
|
|
}
|
2021-05-11 08:34:57 -07:00
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[test]
|
|
|
|
fn test_background_processor() {
|
|
|
|
// Test that when a new channel is created, the ChannelManager needs to be re-persisted with
|
|
|
|
// updates. Also test that when new updates are available, the manager signals that it needs
|
|
|
|
// re-persistence and is successfully re-persisted.
|
|
|
|
let nodes = create_nodes(2, "test_background_processor".to_string());
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
// Go through the channel creation process so that each node has something to persist. Since
|
|
|
|
// open_channel consumes events, it must complete before starting BackgroundProcessor to
|
|
|
|
// avoid a race with processing events.
|
|
|
|
let tx = open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
// Initiate the background processors to watch each node.
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
macro_rules! check_persisted_data {
|
2022-03-28 18:39:39 -07:00
|
|
|
($node: expr, $filepath: expr) => {
|
|
|
|
let mut expected_bytes = Vec::new();
|
2021-11-13 01:06:09 +00:00
|
|
|
loop {
|
2022-03-28 18:39:39 -07:00
|
|
|
expected_bytes.clear();
|
|
|
|
match $node.write(&mut expected_bytes) {
|
2021-11-13 01:06:09 +00:00
|
|
|
Ok(()) => {
|
2021-01-11 18:03:32 -05:00
|
|
|
match std::fs::read($filepath) {
|
|
|
|
Ok(bytes) => {
|
2022-03-28 18:39:39 -07:00
|
|
|
if bytes == expected_bytes {
|
2021-01-11 18:03:32 -05:00
|
|
|
break
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(_) => continue
|
|
|
|
}
|
2021-11-13 01:06:09 +00:00
|
|
|
},
|
|
|
|
Err(e) => panic!("Unexpected error: {}", e)
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the initial channel manager data is persisted as expected.
|
|
|
|
let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
|
2022-03-28 18:39:39 -07:00
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone());
|
2022-03-21 20:13:14 -07:00
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
|
|
|
if !nodes[0].node.get_persistence_condvar_value() { break }
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force-close the channel.
|
2022-06-23 20:25:58 +00:00
|
|
|
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
// Check that the force-close updates are persisted.
|
2022-03-28 18:39:39 -07:00
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone());
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
|
|
|
if !nodes[0].node.get_persistence_condvar_value() { break }
|
|
|
|
}
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
// Check network graph is persisted
|
|
|
|
let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
|
2022-06-02 14:48:32 -07:00
|
|
|
check_persisted_data!(nodes[0].network_graph, filepath.clone());
|
2022-03-21 20:13:14 -07:00
|
|
|
|
2022-04-27 22:16:38 -07:00
|
|
|
// Check scorer is persisted
|
|
|
|
let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
|
|
|
|
check_persisted_data!(nodes[0].scorer, filepath.clone());
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-04-09 16:55:10 -04:00
|
|
|
fn test_timer_tick_called() {
|
2021-04-09 16:58:31 -04:00
|
|
|
// Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
|
|
|
|
// `FRESHNESS_TIMER`.
|
|
|
|
let nodes = create_nodes(1, "test_timer_tick_called".to_string());
|
2021-01-11 18:03:32 -05:00
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
2021-08-05 17:04:18 +00:00
|
|
|
let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
|
|
|
|
let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
|
|
|
|
if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
|
|
|
|
log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
|
2021-01-11 18:03:32 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-03-21 20:13:14 -07:00
|
|
|
fn test_channel_manager_persist_error() {
|
2021-01-11 18:03:32 -05:00
|
|
|
// Test that if we encounter an error during manager persistence, the thread panics.
|
|
|
|
let nodes = create_nodes(2, "test_persist_error".to_string());
|
|
|
|
open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-07-19 12:50:56 -05:00
|
|
|
match bg_processor.join() {
|
2021-07-18 12:59:27 -05:00
|
|
|
Ok(_) => panic!("Expected error persisting manager"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
2022-03-21 20:13:14 -07:00
|
|
|
#[test]
|
|
|
|
fn test_network_graph_persist_error() {
|
|
|
|
// Test that if we encounter an error during network graph persistence, an error gets returned.
|
|
|
|
let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-03-21 20:13:14 -07:00
|
|
|
|
|
|
|
match bg_processor.stop() {
|
|
|
|
Ok(_) => panic!("Expected error persisting network graph"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-27 22:16:38 -07:00
|
|
|
#[test]
|
|
|
|
fn test_scorer_persist_error() {
|
|
|
|
// Test that if we encounter an error during scorer persistence, an error gets returned.
|
|
|
|
let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-04-27 22:16:38 -07:00
|
|
|
|
|
|
|
match bg_processor.stop() {
|
|
|
|
Ok(_) => panic!("Expected error persisting scorer"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
#[test]
|
|
|
|
fn test_background_event_handling() {
|
2021-05-11 08:34:57 -07:00
|
|
|
let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
|
2021-05-11 08:07:54 -07:00
|
|
|
let channel_value = 100000;
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir.clone()));
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
// Set up a background event handler for FundingGenerationReady events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = move |event: Event| match event {
|
2022-11-01 09:57:37 +01:00
|
|
|
Event::FundingGenerationReady { .. } => sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
|
|
|
|
Event::ChannelReady { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
2021-05-11 08:07:54 -07:00
|
|
|
};
|
2022-11-01 09:57:37 +01:00
|
|
|
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
// Open a channel and check that the FundingGenerationReady event was handled.
|
|
|
|
begin_open_channel!(nodes[0], nodes[1], channel_value);
|
2021-05-11 08:34:57 -07:00
|
|
|
let (temporary_channel_id, funding_tx) = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
2021-05-11 08:07:54 -07:00
|
|
|
.expect("FundingGenerationReady not handled within deadline");
|
2021-05-11 08:34:57 -07:00
|
|
|
end_open_channel!(nodes[0], nodes[1], temporary_channel_id, funding_tx);
|
|
|
|
|
|
|
|
// Confirm the funding transaction.
|
|
|
|
confirm_transaction(&mut nodes[0], &funding_tx);
|
2022-05-30 14:39:04 -07:00
|
|
|
let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
confirm_transaction(&mut nodes[1], &funding_tx);
|
2022-05-30 14:39:04 -07:00
|
|
|
let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
|
|
|
|
nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
|
2021-06-12 21:58:50 +00:00
|
|
|
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
|
2022-05-30 14:39:04 -07:00
|
|
|
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
|
2021-06-12 21:58:50 +00:00
|
|
|
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
|
|
|
|
// Set up a background event handler for SpendableOutputs events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = move |event: Event| match event {
|
2023-02-27 18:24:57 +00:00
|
|
|
Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
|
2022-11-01 09:57:37 +01:00
|
|
|
Event::ChannelReady { .. } => {},
|
|
|
|
Event::ChannelClosed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
};
|
2022-04-11 13:50:31 -04:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
// Force close the channel and check that the SpendableOutputs event was handled.
|
2022-06-23 20:25:58 +00:00
|
|
|
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
|
2021-05-11 08:34:57 -07:00
|
|
|
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
|
2021-05-31 20:04:36 +00:00
|
|
|
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
|
2022-11-01 09:57:37 +01:00
|
|
|
|
2021-05-11 08:34:57 -07:00
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
2022-11-01 09:57:37 +01:00
|
|
|
.expect("Events not handled within deadline");
|
2021-05-11 08:34:57 -07:00
|
|
|
match event {
|
|
|
|
Event::SpendableOutputs { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
2021-08-23 23:56:59 -05:00
|
|
|
|
2022-06-01 15:26:07 -07:00
|
|
|
#[test]
|
|
|
|
fn test_scorer_persistence() {
|
|
|
|
let nodes = create_nodes(2, "test_scorer_persistence".to_string());
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
|
|
|
let expected_log = "Persisting scorer".to_string();
|
|
|
|
if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_not_pruning_network_graph_until_graph_sync_completion() {
|
|
|
|
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
2023-02-27 18:24:57 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
|
2022-06-01 15:26:07 -07:00
|
|
|
let network_graph = nodes[0].network_graph.clone();
|
|
|
|
let features = ChannelFeatures::empty();
|
|
|
|
network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
|
|
|
|
.expect("Failed to update channel from partial announcement");
|
|
|
|
let original_graph_description = network_graph.to_string();
|
|
|
|
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
|
|
|
|
assert_eq!(network_graph.read_only().channels().len(), 1);
|
|
|
|
|
2022-10-31 10:36:12 -07:00
|
|
|
let event_handler = |_: _| {};
|
2022-06-02 14:48:32 -07:00
|
|
|
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
2022-06-01 15:26:07 -07:00
|
|
|
|
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
2022-11-21 20:37:25 +00:00
|
|
|
let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
|
|
|
|
if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
|
|
|
|
.unwrap_or(&0) > 1
|
|
|
|
{
|
|
|
|
// Wait until the loop has gone around at least twice.
|
2022-06-01 15:26:07 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let initialization_input = vec![
|
|
|
|
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
|
|
|
|
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
|
|
|
|
0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
|
|
|
|
187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
|
|
|
|
157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
|
|
|
|
88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
|
|
|
|
204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
|
|
|
|
181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
|
|
|
|
110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
|
|
|
|
76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
|
|
|
|
226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
|
|
|
|
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
|
|
|
|
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
|
|
|
|
];
|
2022-12-16 10:52:56 -08:00
|
|
|
nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
|
2022-06-01 15:26:07 -07:00
|
|
|
|
|
|
|
// this should have added two channels
|
|
|
|
assert_eq!(network_graph.read_only().channels().len(), 3);
|
|
|
|
|
2023-02-27 18:24:57 +00:00
|
|
|
receiver
|
2022-06-01 15:26:07 -07:00
|
|
|
.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
|
|
|
|
.expect("Network graph not pruned within deadline");
|
|
|
|
|
|
|
|
background_processor.stop().unwrap();
|
|
|
|
|
|
|
|
// all channels should now be pruned
|
|
|
|
assert_eq!(network_graph.read_only().channels().len(), 0);
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:25:20 -05:00
|
|
|
#[test]
|
|
|
|
fn test_payment_path_scoring() {
|
|
|
|
// Ensure that we update the scorer when relevant events are processed. In this case, we ensure
|
|
|
|
// that we update the scorer upon a payment path succeeding (note that the channel must be
|
|
|
|
// public or else we won't score it).
|
|
|
|
// Set up a background event handler for FundingGenerationReady events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
|
|
|
let event_handler = move |event: Event| match event {
|
|
|
|
Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
|
|
|
|
Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
|
|
|
|
Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
|
|
|
|
Event::ProbeFailed { .. } => sender.send(event).unwrap(),
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
};
|
|
|
|
|
|
|
|
let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2023-02-27 18:24:57 +00:00
|
|
|
let persister = Arc::new(Persister::new(data_dir));
|
2023-02-03 11:25:20 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
|
|
|
|
|
|
|
let scored_scid = 4242;
|
|
|
|
let secp_ctx = Secp256k1::new();
|
|
|
|
let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
|
|
|
|
let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
|
|
|
|
|
|
|
|
let path = vec![RouteHop {
|
|
|
|
pubkey: node_1_id,
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: scored_scid,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
|
|
|
fee_msat: 0,
|
|
|
|
cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
|
|
|
|
}];
|
|
|
|
|
|
|
|
nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
|
|
|
|
nodes[0].node.push_pending_event(Event::PaymentPathFailed {
|
|
|
|
payment_id: None,
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
payment_failed_permanently: false,
|
2023-02-13 17:55:42 -05:00
|
|
|
failure: PathFailure::OnPath { network_update: None },
|
2023-02-03 11:25:20 -05:00
|
|
|
path: path.clone(),
|
|
|
|
short_channel_id: Some(scored_scid),
|
|
|
|
});
|
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("PaymentPathFailed not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we'll score payments that were explicitly failed back by the destination as
|
|
|
|
// ProbeSuccess.
|
|
|
|
nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
|
|
|
|
nodes[0].node.push_pending_event(Event::PaymentPathFailed {
|
|
|
|
payment_id: None,
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
payment_failed_permanently: true,
|
2023-02-13 17:55:42 -05:00
|
|
|
failure: PathFailure::OnPath { network_update: None },
|
2023-02-03 11:25:20 -05:00
|
|
|
path: path.clone(),
|
|
|
|
short_channel_id: None,
|
|
|
|
});
|
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("PaymentPathFailed not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathFailed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
|
|
|
|
nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
|
|
|
|
payment_id: PaymentId([42; 32]),
|
|
|
|
payment_hash: None,
|
|
|
|
path: path.clone(),
|
|
|
|
});
|
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("PaymentPathSuccessful not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::PaymentPathSuccessful { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
|
|
|
|
nodes[0].node.push_pending_event(Event::ProbeSuccessful {
|
|
|
|
payment_id: PaymentId([42; 32]),
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
|
|
|
path: path.clone(),
|
|
|
|
});
|
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("ProbeSuccessful not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::ProbeSuccessful { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
|
|
|
|
nodes[0].node.push_pending_event(Event::ProbeFailed {
|
|
|
|
payment_id: PaymentId([42; 32]),
|
|
|
|
payment_hash: PaymentHash([42; 32]),
|
2023-02-27 18:24:57 +00:00
|
|
|
path,
|
2023-02-03 11:25:20 -05:00
|
|
|
short_channel_id: Some(scored_scid),
|
|
|
|
});
|
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("ProbeFailure not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::ProbeFailed { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|