2021-03-17 14:18:37 -04:00
|
|
|
//! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
|
|
|
|
//! running properly, and (2) either can or should be run in the background. See docs for
|
|
|
|
//! [`BackgroundProcessor`] for more details on the nitty-gritty.
|
|
|
|
|
2021-03-17 14:05:09 -04:00
|
|
|
#![deny(broken_intra_doc_links)]
|
2021-03-17 14:18:37 -04:00
|
|
|
#![deny(missing_docs)]
|
2021-03-17 14:05:09 -04:00
|
|
|
#![deny(unsafe_code)]
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[macro_use] extern crate lightning;
|
|
|
|
|
|
|
|
use lightning::chain;
|
|
|
|
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
|
2021-10-07 23:46:13 +00:00
|
|
|
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
|
2021-02-16 16:30:08 -05:00
|
|
|
use lightning::chain::keysinterface::{Sign, KeysInterface};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::ln::channelmanager::ChannelManager;
|
2021-04-02 18:40:57 -04:00
|
|
|
use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
|
2021-10-26 02:03:02 +00:00
|
|
|
use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
|
2021-11-01 13:14:14 -05:00
|
|
|
use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
|
2021-09-14 21:38:00 -05:00
|
|
|
use lightning::util::events::{Event, EventHandler, EventsProvider};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::logger::Logger;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
use std::thread;
|
|
|
|
use std::thread::JoinHandle;
|
|
|
|
use std::time::{Duration, Instant};
|
2021-04-13 16:04:17 -04:00
|
|
|
use std::ops::Deref;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-08-19 11:21:42 -05:00
|
|
|
/// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
|
2021-01-11 18:03:32 -05:00
|
|
|
/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
|
|
|
|
/// responsibilities are:
|
2021-08-19 11:21:42 -05:00
|
|
|
/// * Processing [`Event`]s with a user-provided [`EventHandler`].
|
|
|
|
/// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
|
2021-01-11 18:03:32 -05:00
|
|
|
/// writing it to disk/backups by invoking the callback given to it at startup.
|
2021-08-19 11:21:42 -05:00
|
|
|
/// [`ChannelManager`] persistence should be done in the background.
|
|
|
|
/// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
|
|
|
|
/// at the appropriate intervals.
|
2021-12-15 18:59:15 +00:00
|
|
|
/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to
|
|
|
|
/// [`BackgroundProcessor::start`]).
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
|
|
|
|
/// upon as doing so may result in high latency.
|
|
|
|
///
|
|
|
|
/// # Note
|
|
|
|
///
|
|
|
|
/// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
|
|
|
|
/// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
|
|
|
|
/// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
|
|
|
|
/// unilateral chain closure fees are at risk.
|
|
|
|
///
|
|
|
|
/// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
|
|
|
|
/// [`Event`]: lightning::util::events::Event
|
2021-08-01 02:42:42 +00:00
|
|
|
#[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
|
2021-01-11 18:03:32 -05:00
|
|
|
pub struct BackgroundProcessor {
|
|
|
|
stop_thread: Arc<AtomicBool>,
|
2021-07-19 12:50:56 -05:00
|
|
|
thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(test))]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 60;
|
2021-01-11 18:03:32 -05:00
|
|
|
#[cfg(test)]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 1;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(all(not(test), not(debug_assertions)))]
|
2022-01-20 23:42:56 +00:00
|
|
|
const PING_TIMER: u64 = 10;
|
2021-08-20 15:48:40 +01:00
|
|
|
/// Signature operations take a lot longer without compiler optimisations.
|
|
|
|
/// Increasing the ping timer allows for this but slower devices will be disconnected if the
|
|
|
|
/// timeout is reached.
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(all(not(test), debug_assertions))]
|
2021-08-20 15:48:40 +01:00
|
|
|
const PING_TIMER: u64 = 30;
|
2021-10-04 16:06:38 -05:00
|
|
|
#[cfg(test)]
|
|
|
|
const PING_TIMER: u64 = 1;
|
2021-08-05 17:04:18 +00:00
|
|
|
|
2021-12-15 18:59:15 +00:00
|
|
|
/// Prune the network graph of stale entries hourly.
|
|
|
|
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
|
|
|
|
|
2021-04-13 19:38:31 -04:00
|
|
|
/// Trait which handles persisting a [`ChannelManager`] to disk.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
pub trait ChannelManagerPersister<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|
|
|
where
|
|
|
|
M::Target: 'static + chain::Watch<Signer>,
|
|
|
|
T::Target: 'static + BroadcasterInterface,
|
|
|
|
K::Target: 'static + KeysInterface<Signer = Signer>,
|
|
|
|
F::Target: 'static + FeeEstimator,
|
|
|
|
L::Target: 'static + Logger,
|
|
|
|
{
|
|
|
|
/// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed
|
|
|
|
/// (which will cause the [`BackgroundProcessor`] which called this method to exit.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<Fun, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|
|
|
ChannelManagerPersister<Signer, M, T, K, F, L> for Fun where
|
|
|
|
M::Target: 'static + chain::Watch<Signer>,
|
|
|
|
T::Target: 'static + BroadcasterInterface,
|
|
|
|
K::Target: 'static + KeysInterface<Signer = Signer>,
|
|
|
|
F::Target: 'static + FeeEstimator,
|
|
|
|
L::Target: 'static + Logger,
|
|
|
|
Fun: Fn(&ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>,
|
|
|
|
{
|
|
|
|
fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error> {
|
|
|
|
self(channel_manager)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 21:38:00 -05:00
|
|
|
/// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s.
|
|
|
|
struct DecoratingEventHandler<
|
|
|
|
E: EventHandler,
|
2021-11-01 13:14:14 -05:00
|
|
|
N: Deref<Target = NetGraphMsgHandler<G, A, L>>,
|
|
|
|
G: Deref<Target = NetworkGraph>,
|
2021-09-14 21:38:00 -05:00
|
|
|
A: Deref,
|
|
|
|
L: Deref,
|
|
|
|
>
|
|
|
|
where A::Target: chain::Access, L::Target: Logger {
|
|
|
|
event_handler: E,
|
|
|
|
net_graph_msg_handler: Option<N>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<
|
|
|
|
E: EventHandler,
|
2021-11-01 13:14:14 -05:00
|
|
|
N: Deref<Target = NetGraphMsgHandler<G, A, L>>,
|
|
|
|
G: Deref<Target = NetworkGraph>,
|
2021-09-14 21:38:00 -05:00
|
|
|
A: Deref,
|
|
|
|
L: Deref,
|
2021-11-01 13:14:14 -05:00
|
|
|
> EventHandler for DecoratingEventHandler<E, N, G, A, L>
|
2021-09-14 21:38:00 -05:00
|
|
|
where A::Target: chain::Access, L::Target: Logger {
|
|
|
|
fn handle_event(&self, event: &Event) {
|
|
|
|
if let Some(event_handler) = &self.net_graph_msg_handler {
|
|
|
|
event_handler.handle_event(event);
|
|
|
|
}
|
|
|
|
self.event_handler.handle_event(event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
impl BackgroundProcessor {
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Start a background thread that takes care of responsibilities enumerated in the [top-level
|
|
|
|
/// documentation].
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-07-19 12:50:56 -05:00
|
|
|
/// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
|
|
|
|
/// `persist_manager` returns an error. In case of an error, the error is retrieved by calling
|
|
|
|
/// either [`join`] or [`stop`].
|
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// # Data Persistence
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-03-17 15:53:29 -04:00
|
|
|
/// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
|
|
|
|
/// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
|
|
|
|
/// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
|
|
|
|
/// provided implementation.
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-08-19 11:21:42 -05:00
|
|
|
/// Typically, users should either implement [`ChannelManagerPersister`] to never return an
|
|
|
|
/// error or call [`join`] and handle any error that may arise. For the latter case,
|
|
|
|
/// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
|
|
|
|
///
|
|
|
|
/// # Event Handling
|
|
|
|
///
|
|
|
|
/// `event_handler` is responsible for handling events that users should be notified of (e.g.,
|
2021-09-14 21:38:00 -05:00
|
|
|
/// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
|
|
|
|
/// functionality implemented by other handlers.
|
|
|
|
/// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures.
|
2021-08-19 11:21:42 -05:00
|
|
|
///
|
2021-09-17 16:00:24 +00:00
|
|
|
/// [top-level documentation]: BackgroundProcessor
|
2021-07-19 12:50:56 -05:00
|
|
|
/// [`join`]: Self::join
|
|
|
|
/// [`stop`]: Self::stop
|
2021-03-17 15:53:29 -04:00
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
/// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
|
|
|
|
/// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
|
2021-09-14 21:38:00 -05:00
|
|
|
/// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph
|
2021-04-13 16:04:17 -04:00
|
|
|
pub fn start<
|
2021-04-13 19:38:31 -04:00
|
|
|
Signer: 'static + Sign,
|
2021-09-14 21:38:00 -05:00
|
|
|
CA: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF: 'static + Deref + Send + Sync,
|
|
|
|
CW: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
T: 'static + Deref + Send + Sync,
|
|
|
|
K: 'static + Deref + Send + Sync,
|
|
|
|
F: 'static + Deref + Send + Sync,
|
2021-11-01 13:14:14 -05:00
|
|
|
G: 'static + Deref<Target = NetworkGraph> + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
L: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
P: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
Descriptor: 'static + SocketDescriptor + Send + Sync,
|
2021-04-13 19:38:31 -04:00
|
|
|
CMH: 'static + Deref + Send + Sync,
|
|
|
|
RMH: 'static + Deref + Send + Sync,
|
2021-08-23 23:56:59 -05:00
|
|
|
EH: 'static + EventHandler + Send,
|
2021-05-11 08:34:57 -07:00
|
|
|
CMP: 'static + Send + ChannelManagerPersister<Signer, CW, T, K, F, L>,
|
|
|
|
M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
|
|
|
|
CM: 'static + Deref<Target = ChannelManager<Signer, CW, T, K, F, L>> + Send + Sync,
|
2021-11-01 13:14:14 -05:00
|
|
|
NG: 'static + Deref<Target = NetGraphMsgHandler<G, CA, L>> + Send + Sync,
|
2021-08-05 14:51:17 +09:00
|
|
|
UMH: 'static + Deref + Send + Sync,
|
|
|
|
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
|
2021-09-14 21:38:00 -05:00
|
|
|
>(
|
|
|
|
persister: CMP, event_handler: EH, chain_monitor: M, channel_manager: CM,
|
|
|
|
net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L
|
|
|
|
) -> Self
|
2021-04-13 19:38:31 -04:00
|
|
|
where
|
2021-09-14 21:38:00 -05:00
|
|
|
CA::Target: 'static + chain::Access,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF::Target: 'static + chain::Filter,
|
|
|
|
CW::Target: 'static + chain::Watch<Signer>,
|
2021-04-13 16:04:17 -04:00
|
|
|
T::Target: 'static + BroadcasterInterface,
|
|
|
|
K::Target: 'static + KeysInterface<Signer = Signer>,
|
|
|
|
F::Target: 'static + FeeEstimator,
|
|
|
|
L::Target: 'static + Logger,
|
2021-10-07 23:46:13 +00:00
|
|
|
P::Target: 'static + Persist<Signer>,
|
2021-04-13 19:38:31 -04:00
|
|
|
CMH::Target: 'static + ChannelMessageHandler,
|
|
|
|
RMH::Target: 'static + RoutingMessageHandler,
|
2021-08-05 14:51:17 +09:00
|
|
|
UMH::Target: 'static + CustomMessageHandler,
|
2021-01-11 18:03:32 -05:00
|
|
|
{
|
|
|
|
let stop_thread = Arc::new(AtomicBool::new(false));
|
|
|
|
let stop_thread_clone = stop_thread.clone();
|
|
|
|
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
|
2021-12-15 18:59:15 +00:00
|
|
|
let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) };
|
2021-09-14 21:38:00 -05:00
|
|
|
|
Automatically update fees on outbound channels as fees change
Previously we'd been expecting to implement anchor outputs before
shipping 0.1, thus reworking our channel fee update process
entirely and leaving it as a future task. However, due to the
difficulty of working with on-chain anchor pools, we are now likely
to ship 0.1 without requiring anchor outputs.
In either case, there isn't a lot of reason to require that users
call an explicit "prevailing feerates have changed" function now
that we have a timer method which is called regularly. Further, we
really should be the ones deciding on the channel feerate in terms
of the users' FeeEstimator, instead of requiring users implement a
second fee-providing interface by calling an update_fee method.
Finally, there is no reason for an update_fee method to be
channel-specific, as we should be updating all (outbound) channel
fees at once.
Thus, we move the update_fee handling to the background, calling it
on the regular 1-minute timer. We also update the regular 1-minute
timer to fire on startup as well as every minute to ensure we get
fee updates even on mobile clients that are rarely, if ever, open
for more than one minute.
2021-06-28 03:41:44 +00:00
|
|
|
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup");
|
|
|
|
channel_manager.timer_tick_occurred();
|
|
|
|
|
2021-08-05 17:04:18 +00:00
|
|
|
let mut last_freshness_call = Instant::now();
|
|
|
|
let mut last_ping_call = Instant::now();
|
2021-12-15 18:59:15 +00:00
|
|
|
let mut last_prune_call = Instant::now();
|
|
|
|
let mut have_pruned = false;
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
2021-09-26 00:09:17 +00:00
|
|
|
peer_manager.process_events(); // Note that this may block on ChannelManager's locking
|
2021-05-11 08:07:54 -07:00
|
|
|
channel_manager.process_pending_events(&event_handler);
|
2021-05-11 08:34:57 -07:00
|
|
|
chain_monitor.process_pending_events(&event_handler);
|
2021-09-26 00:09:17 +00:00
|
|
|
|
|
|
|
// We wait up to 100ms, but track how long it takes to detect being put to sleep,
|
|
|
|
// see `await_start`'s use below.
|
|
|
|
let await_start = Instant::now();
|
2021-04-02 18:40:57 -04:00
|
|
|
let updates_available =
|
|
|
|
channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
|
2021-09-26 00:09:17 +00:00
|
|
|
let await_time = await_start.elapsed();
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
if updates_available {
|
2021-10-04 03:11:36 +00:00
|
|
|
log_trace!(logger, "Persisting ChannelManager...");
|
2021-05-11 08:07:54 -07:00
|
|
|
persister.persist_manager(&*channel_manager)?;
|
2021-10-04 03:11:36 +00:00
|
|
|
log_trace!(logger, "Done persisting ChannelManager.");
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
// Exit the loop if the background processor was requested to stop.
|
|
|
|
if stop_thread.load(Ordering::Acquire) == true {
|
|
|
|
log_trace!(logger, "Terminating background processor.");
|
2022-01-18 21:48:28 +00:00
|
|
|
break;
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-08-05 17:04:18 +00:00
|
|
|
if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER {
|
|
|
|
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
|
2021-04-09 16:55:10 -04:00
|
|
|
channel_manager.timer_tick_occurred();
|
2021-08-05 17:04:18 +00:00
|
|
|
last_freshness_call = Instant::now();
|
|
|
|
}
|
2021-09-26 00:09:17 +00:00
|
|
|
if await_time > Duration::from_secs(1) {
|
2021-08-07 20:45:01 +00:00
|
|
|
// On various platforms, we may be starved of CPU cycles for several reasons.
|
|
|
|
// E.g. on iOS, if we've been in the background, we will be entirely paused.
|
|
|
|
// Similarly, if we're on a desktop platform and the device has been asleep, we
|
|
|
|
// may not get any cycles.
|
2021-09-26 00:09:17 +00:00
|
|
|
// We detect this by checking if our max-100ms-sleep, above, ran longer than a
|
|
|
|
// full second, at which point we assume sockets may have been killed (they
|
|
|
|
// appear to be at least on some platforms, even if it has only been a second).
|
|
|
|
// Note that we have to take care to not get here just because user event
|
|
|
|
// processing was slow at the top of the loop. For example, the sample client
|
|
|
|
// may call Bitcoin Core RPCs during event handling, which very often takes
|
|
|
|
// more than a handful of seconds to complete, and shouldn't disconnect all our
|
|
|
|
// peers.
|
|
|
|
log_trace!(logger, "100ms sleep took more than a second, disconnecting peers.");
|
2021-10-26 02:03:02 +00:00
|
|
|
peer_manager.disconnect_all_peers();
|
2021-08-07 20:45:01 +00:00
|
|
|
last_ping_call = Instant::now();
|
|
|
|
} else if last_ping_call.elapsed().as_secs() > PING_TIMER {
|
2021-08-05 17:04:18 +00:00
|
|
|
log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
|
2021-04-09 16:58:31 -04:00
|
|
|
peer_manager.timer_tick_occurred();
|
2021-08-05 17:04:18 +00:00
|
|
|
last_ping_call = Instant::now();
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-12-15 18:59:15 +00:00
|
|
|
|
|
|
|
// Note that we want to run a graph prune once not long after startup before
|
|
|
|
// falling back to our usual hourly prunes. This avoids short-lived clients never
|
|
|
|
// pruning their network graph. We run once 60 seconds after startup before
|
|
|
|
// continuing our normal cadence.
|
|
|
|
if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { 60 } {
|
|
|
|
if let Some(ref handler) = net_graph_msg_handler {
|
|
|
|
log_trace!(logger, "Pruning network graph of stale entries");
|
|
|
|
handler.network_graph().remove_stale_channels();
|
|
|
|
last_prune_call = Instant::now();
|
|
|
|
have_pruned = true;
|
|
|
|
}
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2022-01-18 21:48:28 +00:00
|
|
|
// After we exit, ensure we persist the ChannelManager one final time - this avoids
|
|
|
|
// some races where users quit while channel updates were in-flight, with
|
|
|
|
// ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
|
|
|
|
persister.persist_manager(&*channel_manager)
|
2021-01-11 18:03:32 -05:00
|
|
|
});
|
2021-07-18 13:11:01 -05:00
|
|
|
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
pub fn join(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
2021-07-18 13:11:01 -05:00
|
|
|
pub fn stop(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.stop_and_join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-01-11 18:03:32 -05:00
|
|
|
self.stop_thread.store(true, Ordering::Release);
|
2021-07-19 12:50:56 -05:00
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-07-18 13:11:01 -05:00
|
|
|
match self.thread_handle.take() {
|
|
|
|
Some(handle) => handle.join().unwrap(),
|
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for BackgroundProcessor {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.stop_and_join_thread().unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-05-11 08:34:57 -07:00
|
|
|
use bitcoin::blockdata::block::BlockHeader;
|
2021-01-11 18:03:32 -05:00
|
|
|
use bitcoin::blockdata::constants::genesis_block;
|
|
|
|
use bitcoin::blockdata::transaction::{Transaction, TxOut};
|
|
|
|
use bitcoin::network::constants::Network;
|
2021-07-03 01:58:30 +00:00
|
|
|
use lightning::chain::{BestBlock, Confirm, chainmonitor};
|
2021-05-11 08:34:57 -07:00
|
|
|
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
|
2021-05-11 08:07:54 -07:00
|
|
|
use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::chain::transaction::OutPoint;
|
|
|
|
use lightning::get_event_msg;
|
2021-07-03 01:58:30 +00:00
|
|
|
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::ln::features::InitFeatures;
|
2021-07-31 09:32:27 -05:00
|
|
|
use lightning::ln::msgs::{ChannelMessageHandler, Init};
|
2021-08-05 14:51:17 +09:00
|
|
|
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
|
2021-09-14 21:38:00 -05:00
|
|
|
use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::config::UserConfig;
|
2021-05-12 00:34:30 -07:00
|
|
|
use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::ser::Writeable;
|
|
|
|
use lightning::util::test_utils;
|
2021-08-23 23:56:59 -05:00
|
|
|
use lightning_invoice::payment::{InvoicePayer, RetryAttempts};
|
|
|
|
use lightning_invoice::utils::DefaultRouter;
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning_persister::FilesystemPersister;
|
|
|
|
use std::fs;
|
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use std::time::Duration;
|
2021-05-11 08:07:54 -07:00
|
|
|
use super::{BackgroundProcessor, FRESHNESS_TIMER};
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:34:57 -07:00
|
|
|
const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
|
|
|
|
|
2021-04-02 18:40:57 -04:00
|
|
|
#[derive(Clone, Eq, Hash, PartialEq)]
|
|
|
|
struct TestDescriptor{}
|
|
|
|
impl SocketDescriptor for TestDescriptor {
|
|
|
|
fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
|
|
|
|
fn disconnect_socket(&mut self) {}
|
|
|
|
}
|
|
|
|
|
2021-02-16 16:30:08 -05:00
|
|
|
type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
struct Node {
|
2021-02-26 17:55:13 -05:00
|
|
|
node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
|
2021-11-01 13:14:14 -05:00
|
|
|
net_graph_msg_handler: Option<Arc<NetGraphMsgHandler<Arc<NetworkGraph>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>>,
|
2021-08-05 14:51:17 +09:00
|
|
|
peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
|
2021-05-11 08:34:57 -07:00
|
|
|
chain_monitor: Arc<ChainMonitor>,
|
2021-01-11 18:03:32 -05:00
|
|
|
persister: Arc<FilesystemPersister>,
|
2021-05-11 08:34:57 -07:00
|
|
|
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
|
2021-11-01 13:14:14 -05:00
|
|
|
network_graph: Arc<NetworkGraph>,
|
2021-01-11 18:03:32 -05:00
|
|
|
logger: Arc<test_utils::TestLogger>,
|
2021-05-11 08:34:57 -07:00
|
|
|
best_block: BestBlock,
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Node {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let data_dir = self.persister.get_data_dir();
|
|
|
|
match fs::remove_dir_all(data_dir.clone()) {
|
|
|
|
Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_full_filepath(filepath: String, filename: String) -> String {
|
|
|
|
let mut path = PathBuf::from(filepath);
|
|
|
|
path.push(filename);
|
|
|
|
path.to_str().unwrap().to_string()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
|
|
|
|
let mut nodes = Vec::new();
|
|
|
|
for i in 0..num_nodes {
|
2021-05-26 19:05:00 +00:00
|
|
|
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
|
2021-01-11 18:03:32 -05:00
|
|
|
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
|
|
|
|
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
|
|
|
|
let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
|
|
|
|
let seed = [i as u8; 32];
|
|
|
|
let network = Network::Testnet;
|
2021-09-14 21:38:00 -05:00
|
|
|
let genesis_block = genesis_block(network);
|
|
|
|
let now = Duration::from_secs(genesis_block.header.time as u64);
|
2021-01-11 18:03:32 -05:00
|
|
|
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
|
|
|
|
let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
|
2021-05-11 08:34:57 -07:00
|
|
|
let best_block = BestBlock::from_genesis(network);
|
|
|
|
let params = ChainParameters { network, best_block };
|
|
|
|
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params));
|
2021-11-01 13:14:14 -05:00
|
|
|
let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash()));
|
|
|
|
let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())));
|
2021-04-02 18:40:57 -04:00
|
|
|
let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
|
2021-08-05 14:51:17 +09:00
|
|
|
let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone(), IgnoringMessageHandler{}));
|
2021-11-01 13:14:14 -05:00
|
|
|
let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block };
|
2021-01-11 18:03:32 -05:00
|
|
|
nodes.push(node);
|
|
|
|
}
|
2021-07-31 09:32:27 -05:00
|
|
|
|
|
|
|
for i in 0..num_nodes {
|
|
|
|
for j in (i+1)..num_nodes {
|
|
|
|
nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known() });
|
|
|
|
nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known() });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
nodes
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! open_channel {
|
2021-05-11 08:07:54 -07:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
begin_open_channel!($node_a, $node_b, $channel_value);
|
|
|
|
let events = $node_a.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
2021-08-17 11:12:18 -05:00
|
|
|
let (temporary_channel_id, tx) = handle_funding_generation_ready!(&events[0], $channel_value);
|
2021-05-11 08:07:54 -07:00
|
|
|
end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
|
|
|
|
tx
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! begin_open_channel {
|
2021-01-11 18:03:32 -05:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
$node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
|
|
|
|
$node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
|
2021-05-11 08:07:54 -07:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! handle_funding_generation_ready {
|
|
|
|
($event: expr, $channel_value: expr) => {{
|
|
|
|
match $event {
|
2021-08-17 11:12:18 -05:00
|
|
|
&Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id } => {
|
|
|
|
assert_eq!(channel_value_satoshis, $channel_value);
|
2021-01-11 18:03:32 -05:00
|
|
|
assert_eq!(user_channel_id, 42);
|
|
|
|
|
|
|
|
let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
|
2021-08-17 11:12:18 -05:00
|
|
|
value: channel_value_satoshis, script_pubkey: output_script.clone(),
|
2021-01-11 18:03:32 -05:00
|
|
|
}]};
|
2021-08-17 11:12:18 -05:00
|
|
|
(temporary_channel_id, tx)
|
2021-01-11 18:03:32 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
2021-05-11 08:07:54 -07:00
|
|
|
}
|
|
|
|
}}
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
macro_rules! end_open_channel {
|
|
|
|
($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
|
|
|
|
$node_a.node.funding_transaction_generated(&$temporary_channel_id, $tx.clone()).unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
$node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
|
|
|
|
for i in 1..=depth {
|
2021-05-11 08:34:57 -07:00
|
|
|
let prev_blockhash = node.best_block.block_hash();
|
|
|
|
let height = node.best_block.height() + 1;
|
|
|
|
let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height, bits: 42, nonce: 42 };
|
|
|
|
let txdata = vec![(0, tx)];
|
|
|
|
node.best_block = BestBlock::new(header.block_hash(), height);
|
|
|
|
match i {
|
|
|
|
1 => {
|
|
|
|
node.node.transactions_confirmed(&header, &txdata, height);
|
|
|
|
node.chain_monitor.transactions_confirmed(&header, &txdata, height);
|
|
|
|
},
|
2021-05-31 20:04:36 +00:00
|
|
|
x if x == depth => {
|
2021-05-11 08:34:57 -07:00
|
|
|
node.node.best_block_updated(&header, height);
|
|
|
|
node.chain_monitor.best_block_updated(&header, height);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction(node: &mut Node, tx: &Transaction) {
|
|
|
|
confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
|
|
|
|
}
|
2021-05-11 08:34:57 -07:00
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[test]
|
|
|
|
fn test_background_processor() {
|
|
|
|
// Test that when a new channel is created, the ChannelManager needs to be re-persisted with
|
|
|
|
// updates. Also test that when new updates are available, the manager signals that it needs
|
|
|
|
// re-persistence and is successfully re-persisted.
|
|
|
|
let nodes = create_nodes(2, "test_background_processor".to_string());
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
// Go through the channel creation process so that each node has something to persist. Since
|
|
|
|
// open_channel consumes events, it must complete before starting BackgroundProcessor to
|
|
|
|
// avoid a race with processing events.
|
|
|
|
let tx = open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
// Initiate the background processors to watch each node.
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2021-05-11 08:07:54 -07:00
|
|
|
let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
2021-08-17 11:12:18 -05:00
|
|
|
let event_handler = |_: &_| {};
|
2021-09-14 21:38:00 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
macro_rules! check_persisted_data {
|
|
|
|
($node: expr, $filepath: expr, $expected_bytes: expr) => {
|
2021-11-13 01:06:09 +00:00
|
|
|
loop {
|
|
|
|
$expected_bytes.clear();
|
|
|
|
match $node.write(&mut $expected_bytes) {
|
|
|
|
Ok(()) => {
|
2021-01-11 18:03:32 -05:00
|
|
|
match std::fs::read($filepath) {
|
|
|
|
Ok(bytes) => {
|
|
|
|
if bytes == $expected_bytes {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(_) => continue
|
|
|
|
}
|
2021-11-13 01:06:09 +00:00
|
|
|
},
|
|
|
|
Err(e) => panic!("Unexpected error: {}", e)
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the initial channel manager data is persisted as expected.
|
|
|
|
let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
|
|
|
|
let mut expected_bytes = Vec::new();
|
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
|
|
|
|
loop {
|
|
|
|
if !nodes[0].node.get_persistence_condvar_value() { break }
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force-close the channel.
|
|
|
|
nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
|
|
|
|
|
|
|
|
// Check that the force-close updates are persisted.
|
|
|
|
let mut expected_bytes = Vec::new();
|
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
|
|
|
|
loop {
|
|
|
|
if !nodes[0].node.get_persistence_condvar_value() { break }
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-04-09 16:55:10 -04:00
|
|
|
fn test_timer_tick_called() {
|
2021-04-09 16:58:31 -04:00
|
|
|
// Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
|
|
|
|
// `FRESHNESS_TIMER`.
|
|
|
|
let nodes = create_nodes(1, "test_timer_tick_called".to_string());
|
2021-01-11 18:03:32 -05:00
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2021-05-11 08:07:54 -07:00
|
|
|
let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
2021-08-17 11:12:18 -05:00
|
|
|
let event_handler = |_: &_| {};
|
2021-09-14 21:38:00 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
2021-08-05 17:04:18 +00:00
|
|
|
let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
|
|
|
|
let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
|
|
|
|
if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
|
|
|
|
log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
|
2021-01-11 18:03:32 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_persist_error() {
|
|
|
|
// Test that if we encounter an error during manager persistence, the thread panics.
|
|
|
|
let nodes = create_nodes(2, "test_persist_error".to_string());
|
|
|
|
open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
let persister = |_: &_| Err(std::io::Error::new(std::io::ErrorKind::Other, "test"));
|
2021-08-17 11:12:18 -05:00
|
|
|
let event_handler = |_: &_| {};
|
2021-09-14 21:38:00 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-07-19 12:50:56 -05:00
|
|
|
match bg_processor.join() {
|
2021-07-18 12:59:27 -05:00
|
|
|
Ok(_) => panic!("Expected error persisting manager"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_background_event_handling() {
|
2021-05-11 08:34:57 -07:00
|
|
|
let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
|
2021-05-11 08:07:54 -07:00
|
|
|
let channel_value = 100000;
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
|
|
|
let persister = move |node: &_| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
|
|
|
|
|
|
|
// Set up a background event handler for FundingGenerationReady events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
2021-08-17 11:12:18 -05:00
|
|
|
let event_handler = move |event: &Event| {
|
2021-05-11 08:07:54 -07:00
|
|
|
sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
|
|
|
|
};
|
2021-09-14 21:38:00 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister.clone(), event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
// Open a channel and check that the FundingGenerationReady event was handled.
|
|
|
|
begin_open_channel!(nodes[0], nodes[1], channel_value);
|
2021-05-11 08:34:57 -07:00
|
|
|
let (temporary_channel_id, funding_tx) = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
2021-05-11 08:07:54 -07:00
|
|
|
.expect("FundingGenerationReady not handled within deadline");
|
2021-05-11 08:34:57 -07:00
|
|
|
end_open_channel!(nodes[0], nodes[1], temporary_channel_id, funding_tx);
|
|
|
|
|
|
|
|
// Confirm the funding transaction.
|
|
|
|
confirm_transaction(&mut nodes[0], &funding_tx);
|
2021-06-12 21:58:50 +00:00
|
|
|
let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
confirm_transaction(&mut nodes[1], &funding_tx);
|
2021-06-12 21:58:50 +00:00
|
|
|
let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
|
|
|
|
nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
|
|
|
|
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
|
|
|
|
nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
|
|
|
|
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
|
|
|
|
// Set up a background event handler for SpendableOutputs events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
2021-08-17 11:12:18 -05:00
|
|
|
let event_handler = move |event: &Event| sender.send(event.clone()).unwrap();
|
2021-09-14 21:38:00 -05:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
// Force close the channel and check that the SpendableOutputs event was handled.
|
|
|
|
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
|
|
|
|
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
|
2021-05-31 20:04:36 +00:00
|
|
|
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
|
2021-05-11 08:34:57 -07:00
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("SpendableOutputs not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::SpendableOutputs { .. } => {},
|
2021-09-21 12:25:38 -04:00
|
|
|
Event::ChannelClosed { .. } => {},
|
2021-05-11 08:34:57 -07:00
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
2021-08-23 23:56:59 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_invoice_payer() {
|
|
|
|
let nodes = create_nodes(2, "test_invoice_payer".to_string());
|
|
|
|
|
|
|
|
// Initiate the background processors to watch each node.
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
|
|
|
let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
2021-11-01 13:14:14 -05:00
|
|
|
let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger));
|
2021-10-29 08:52:27 -05:00
|
|
|
let scorer = Arc::new(Mutex::new(test_utils::TestScorer::default()));
|
2021-08-23 23:56:59 -05:00
|
|
|
let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, scorer, Arc::clone(&nodes[0].logger), |_: &_| {}, RetryAttempts(2)));
|
|
|
|
let event_handler = Arc::clone(&invoice_payer);
|
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|