2021-03-17 14:18:37 -04:00
|
|
|
//! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
|
|
|
|
//! running properly, and (2) either can or should be run in the background. See docs for
|
|
|
|
//! [`BackgroundProcessor`] for more details on the nitty-gritty.
|
|
|
|
|
2021-03-17 14:05:09 -04:00
|
|
|
#![deny(broken_intra_doc_links)]
|
2021-03-17 14:18:37 -04:00
|
|
|
#![deny(missing_docs)]
|
2021-03-17 14:05:09 -04:00
|
|
|
#![deny(unsafe_code)]
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[macro_use] extern crate lightning;
|
|
|
|
|
|
|
|
use lightning::chain;
|
|
|
|
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
|
2021-05-11 08:34:57 -07:00
|
|
|
use lightning::chain::chainmonitor::ChainMonitor;
|
|
|
|
use lightning::chain::channelmonitor;
|
2021-02-16 16:30:08 -05:00
|
|
|
use lightning::chain::keysinterface::{Sign, KeysInterface};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::ln::channelmanager::ChannelManager;
|
2021-04-02 18:40:57 -04:00
|
|
|
use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
|
|
|
|
use lightning::ln::peer_handler::{PeerManager, SocketDescriptor};
|
2021-05-11 08:07:54 -07:00
|
|
|
use lightning::util::events::{EventHandler, EventsProvider};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::logger::Logger;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
use std::thread;
|
|
|
|
use std::thread::JoinHandle;
|
|
|
|
use std::time::{Duration, Instant};
|
2021-04-13 16:04:17 -04:00
|
|
|
use std::ops::Deref;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
/// BackgroundProcessor takes care of tasks that (1) need to happen periodically to keep
|
|
|
|
/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
|
|
|
|
/// responsibilities are:
|
|
|
|
/// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so,
|
|
|
|
/// writing it to disk/backups by invoking the callback given to it at startup.
|
|
|
|
/// ChannelManager persistence should be done in the background.
|
2021-04-09 16:58:31 -04:00
|
|
|
/// * Calling `ChannelManager::timer_tick_occurred()` and
|
|
|
|
/// `PeerManager::timer_tick_occurred()` every minute (can be done in the
|
2021-01-11 18:03:32 -05:00
|
|
|
/// background).
|
|
|
|
///
|
|
|
|
/// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date,
|
|
|
|
/// then there is a risk of channels force-closing on startup when the manager realizes it's
|
|
|
|
/// outdated. However, as long as `ChannelMonitor` backups are sound, no funds besides those used
|
|
|
|
/// for unilateral chain closure fees are at risk.
|
|
|
|
pub struct BackgroundProcessor {
|
|
|
|
stop_thread: Arc<AtomicBool>,
|
2021-07-19 12:50:56 -05:00
|
|
|
thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(test))]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 60;
|
2021-01-11 18:03:32 -05:00
|
|
|
#[cfg(test)]
|
2021-04-09 16:58:31 -04:00
|
|
|
const FRESHNESS_TIMER: u64 = 1;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-04-13 19:38:31 -04:00
|
|
|
/// Trait which handles persisting a [`ChannelManager`] to disk.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
pub trait ChannelManagerPersister<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|
|
|
where
|
|
|
|
M::Target: 'static + chain::Watch<Signer>,
|
|
|
|
T::Target: 'static + BroadcasterInterface,
|
|
|
|
K::Target: 'static + KeysInterface<Signer = Signer>,
|
|
|
|
F::Target: 'static + FeeEstimator,
|
|
|
|
L::Target: 'static + Logger,
|
|
|
|
{
|
|
|
|
/// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed
|
|
|
|
/// (which will cause the [`BackgroundProcessor`] which called this method to exit.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<Fun, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|
|
|
ChannelManagerPersister<Signer, M, T, K, F, L> for Fun where
|
|
|
|
M::Target: 'static + chain::Watch<Signer>,
|
|
|
|
T::Target: 'static + BroadcasterInterface,
|
|
|
|
K::Target: 'static + KeysInterface<Signer = Signer>,
|
|
|
|
F::Target: 'static + FeeEstimator,
|
|
|
|
L::Target: 'static + Logger,
|
|
|
|
Fun: Fn(&ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>,
|
|
|
|
{
|
|
|
|
fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error> {
|
|
|
|
self(channel_manager)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
impl BackgroundProcessor {
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Start a background thread that takes care of responsibilities enumerated in the [top-level
|
|
|
|
/// documentation].
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-07-19 12:50:56 -05:00
|
|
|
/// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
|
|
|
|
/// `persist_manager` returns an error. In case of an error, the error is retrieved by calling
|
|
|
|
/// either [`join`] or [`stop`].
|
|
|
|
///
|
|
|
|
/// Typically, users should either implement [`ChannelManagerPersister`] to never return an
|
|
|
|
/// error or call [`join`] and handle any error that may arise. For the latter case, the
|
|
|
|
/// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-03-17 15:53:29 -04:00
|
|
|
/// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
|
|
|
|
/// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
|
|
|
|
/// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
|
|
|
|
/// provided implementation.
|
2021-01-11 18:03:32 -05:00
|
|
|
///
|
2021-07-19 12:50:56 -05:00
|
|
|
/// [top-level documentation]: Self
|
|
|
|
/// [`join`]: Self::join
|
|
|
|
/// [`stop`]: Self::stop
|
2021-03-17 15:53:29 -04:00
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
/// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
|
|
|
|
/// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
|
2021-04-13 16:04:17 -04:00
|
|
|
pub fn start<
|
2021-04-13 19:38:31 -04:00
|
|
|
Signer: 'static + Sign,
|
2021-05-11 08:34:57 -07:00
|
|
|
CF: 'static + Deref + Send + Sync,
|
|
|
|
CW: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
T: 'static + Deref + Send + Sync,
|
|
|
|
K: 'static + Deref + Send + Sync,
|
|
|
|
F: 'static + Deref + Send + Sync,
|
|
|
|
L: 'static + Deref + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
P: 'static + Deref + Send + Sync,
|
2021-04-13 16:04:17 -04:00
|
|
|
Descriptor: 'static + SocketDescriptor + Send + Sync,
|
2021-04-13 19:38:31 -04:00
|
|
|
CMH: 'static + Deref + Send + Sync,
|
|
|
|
RMH: 'static + Deref + Send + Sync,
|
2021-05-11 08:07:54 -07:00
|
|
|
EH: 'static + EventHandler + Send + Sync,
|
2021-05-11 08:34:57 -07:00
|
|
|
CMP: 'static + Send + ChannelManagerPersister<Signer, CW, T, K, F, L>,
|
|
|
|
M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
|
|
|
|
CM: 'static + Deref<Target = ChannelManager<Signer, CW, T, K, F, L>> + Send + Sync,
|
2021-04-13 19:38:31 -04:00
|
|
|
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L>> + Send + Sync,
|
|
|
|
>
|
2021-05-11 08:34:57 -07:00
|
|
|
(persister: CMP, event_handler: EH, chain_monitor: M, channel_manager: CM, peer_manager: PM, logger: L) -> Self
|
2021-04-13 19:38:31 -04:00
|
|
|
where
|
2021-05-11 08:34:57 -07:00
|
|
|
CF::Target: 'static + chain::Filter,
|
|
|
|
CW::Target: 'static + chain::Watch<Signer>,
|
2021-04-13 16:04:17 -04:00
|
|
|
T::Target: 'static + BroadcasterInterface,
|
|
|
|
K::Target: 'static + KeysInterface<Signer = Signer>,
|
|
|
|
F::Target: 'static + FeeEstimator,
|
|
|
|
L::Target: 'static + Logger,
|
2021-05-11 08:34:57 -07:00
|
|
|
P::Target: 'static + channelmonitor::Persist<Signer>,
|
2021-04-13 19:38:31 -04:00
|
|
|
CMH::Target: 'static + ChannelMessageHandler,
|
|
|
|
RMH::Target: 'static + RoutingMessageHandler,
|
2021-01-11 18:03:32 -05:00
|
|
|
{
|
|
|
|
let stop_thread = Arc::new(AtomicBool::new(false));
|
|
|
|
let stop_thread_clone = stop_thread.clone();
|
|
|
|
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
|
|
|
|
let mut current_time = Instant::now();
|
|
|
|
loop {
|
2021-04-02 18:40:57 -04:00
|
|
|
peer_manager.process_events();
|
2021-05-11 08:07:54 -07:00
|
|
|
channel_manager.process_pending_events(&event_handler);
|
2021-05-11 08:34:57 -07:00
|
|
|
chain_monitor.process_pending_events(&event_handler);
|
2021-04-02 18:40:57 -04:00
|
|
|
let updates_available =
|
|
|
|
channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
|
2021-01-11 18:03:32 -05:00
|
|
|
if updates_available {
|
2021-05-11 08:07:54 -07:00
|
|
|
persister.persist_manager(&*channel_manager)?;
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
// Exit the loop if the background processor was requested to stop.
|
|
|
|
if stop_thread.load(Ordering::Acquire) == true {
|
|
|
|
log_trace!(logger, "Terminating background processor.");
|
2021-04-02 18:40:57 -04:00
|
|
|
return Ok(());
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-04-09 16:58:31 -04:00
|
|
|
if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
|
|
|
|
log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
|
2021-04-09 16:55:10 -04:00
|
|
|
channel_manager.timer_tick_occurred();
|
2021-04-09 16:58:31 -04:00
|
|
|
peer_manager.timer_tick_occurred();
|
2021-01-11 18:03:32 -05:00
|
|
|
current_time = Instant::now();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
2021-07-18 13:11:01 -05:00
|
|
|
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
2021-07-19 12:50:56 -05:00
|
|
|
/// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
|
|
|
pub fn join(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
|
|
|
|
/// [`ChannelManager`].
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// This function panics if the background thread has panicked such as while persisting or
|
|
|
|
/// handling events.
|
|
|
|
///
|
|
|
|
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
|
2021-07-18 13:11:01 -05:00
|
|
|
pub fn stop(mut self) -> Result<(), std::io::Error> {
|
|
|
|
assert!(self.thread_handle.is_some());
|
|
|
|
self.stop_and_join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-01-11 18:03:32 -05:00
|
|
|
self.stop_thread.store(true, Ordering::Release);
|
2021-07-19 12:50:56 -05:00
|
|
|
self.join_thread()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn join_thread(&mut self) -> Result<(), std::io::Error> {
|
2021-07-18 13:11:01 -05:00
|
|
|
match self.thread_handle.take() {
|
|
|
|
Some(handle) => handle.join().unwrap(),
|
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for BackgroundProcessor {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.stop_and_join_thread().unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-05-11 08:34:57 -07:00
|
|
|
use bitcoin::blockdata::block::BlockHeader;
|
2021-01-11 18:03:32 -05:00
|
|
|
use bitcoin::blockdata::constants::genesis_block;
|
|
|
|
use bitcoin::blockdata::transaction::{Transaction, TxOut};
|
|
|
|
use bitcoin::network::constants::Network;
|
2021-07-03 01:58:30 +00:00
|
|
|
use lightning::chain::{BestBlock, Confirm, chainmonitor};
|
2021-05-11 08:34:57 -07:00
|
|
|
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
|
2021-05-11 08:07:54 -07:00
|
|
|
use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::chain::transaction::OutPoint;
|
|
|
|
use lightning::get_event_msg;
|
2021-07-03 01:58:30 +00:00
|
|
|
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::ln::features::InitFeatures;
|
|
|
|
use lightning::ln::msgs::ChannelMessageHandler;
|
2021-04-02 18:40:57 -04:00
|
|
|
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::config::UserConfig;
|
2021-05-12 00:34:30 -07:00
|
|
|
use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
|
2021-01-11 18:03:32 -05:00
|
|
|
use lightning::util::ser::Writeable;
|
|
|
|
use lightning::util::test_utils;
|
|
|
|
use lightning_persister::FilesystemPersister;
|
|
|
|
use std::fs;
|
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use std::time::Duration;
|
2021-05-11 08:07:54 -07:00
|
|
|
use super::{BackgroundProcessor, FRESHNESS_TIMER};
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:34:57 -07:00
|
|
|
const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
|
|
|
|
|
2021-04-02 18:40:57 -04:00
|
|
|
#[derive(Clone, Eq, Hash, PartialEq)]
|
|
|
|
struct TestDescriptor{}
|
|
|
|
impl SocketDescriptor for TestDescriptor {
|
|
|
|
fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
|
|
|
|
fn disconnect_socket(&mut self) {}
|
|
|
|
}
|
|
|
|
|
2021-02-16 16:30:08 -05:00
|
|
|
type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
struct Node {
|
2021-02-26 17:55:13 -05:00
|
|
|
node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
|
2021-04-02 18:40:57 -04:00
|
|
|
peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>>>,
|
2021-05-11 08:34:57 -07:00
|
|
|
chain_monitor: Arc<ChainMonitor>,
|
2021-01-11 18:03:32 -05:00
|
|
|
persister: Arc<FilesystemPersister>,
|
2021-05-11 08:34:57 -07:00
|
|
|
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
|
2021-01-11 18:03:32 -05:00
|
|
|
logger: Arc<test_utils::TestLogger>,
|
2021-05-11 08:34:57 -07:00
|
|
|
best_block: BestBlock,
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Node {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let data_dir = self.persister.get_data_dir();
|
|
|
|
match fs::remove_dir_all(data_dir.clone()) {
|
|
|
|
Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_full_filepath(filepath: String, filename: String) -> String {
|
|
|
|
let mut path = PathBuf::from(filepath);
|
|
|
|
path.push(filename);
|
|
|
|
path.to_str().unwrap().to_string()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
|
|
|
|
let mut nodes = Vec::new();
|
|
|
|
for i in 0..num_nodes {
|
2021-05-26 19:05:00 +00:00
|
|
|
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
|
2021-01-11 18:03:32 -05:00
|
|
|
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
|
|
|
|
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
|
|
|
|
let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
|
|
|
|
let seed = [i as u8; 32];
|
|
|
|
let network = Network::Testnet;
|
2021-04-08 23:36:30 -07:00
|
|
|
let now = Duration::from_secs(genesis_block(network).header.time as u64);
|
2021-01-11 18:03:32 -05:00
|
|
|
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
|
|
|
|
let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
|
2021-05-11 08:34:57 -07:00
|
|
|
let best_block = BestBlock::from_genesis(network);
|
|
|
|
let params = ChainParameters { network, best_block };
|
|
|
|
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params));
|
2021-04-02 18:40:57 -04:00
|
|
|
let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
|
|
|
|
let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone()));
|
2021-05-11 08:34:57 -07:00
|
|
|
let node = Node { node: manager, peer_manager, chain_monitor, persister, tx_broadcaster, logger, best_block };
|
2021-01-11 18:03:32 -05:00
|
|
|
nodes.push(node);
|
|
|
|
}
|
|
|
|
nodes
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! open_channel {
|
2021-05-11 08:07:54 -07:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
begin_open_channel!($node_a, $node_b, $channel_value);
|
|
|
|
let events = $node_a.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
|
|
|
|
end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
|
|
|
|
tx
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! begin_open_channel {
|
2021-01-11 18:03:32 -05:00
|
|
|
($node_a: expr, $node_b: expr, $channel_value: expr) => {{
|
|
|
|
$node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
|
|
|
|
$node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
|
2021-05-11 08:07:54 -07:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! handle_funding_generation_ready {
|
|
|
|
($event: expr, $channel_value: expr) => {{
|
|
|
|
match $event {
|
2021-01-11 18:03:32 -05:00
|
|
|
Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
|
|
|
|
assert_eq!(*channel_value_satoshis, $channel_value);
|
|
|
|
assert_eq!(user_channel_id, 42);
|
|
|
|
|
|
|
|
let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
|
|
|
|
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
|
|
|
|
}]};
|
2021-03-26 18:07:24 -04:00
|
|
|
(*temporary_channel_id, tx)
|
2021-01-11 18:03:32 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
2021-05-11 08:07:54 -07:00
|
|
|
}
|
|
|
|
}}
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
macro_rules! end_open_channel {
|
|
|
|
($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
|
|
|
|
$node_a.node.funding_transaction_generated(&$temporary_channel_id, $tx.clone()).unwrap();
|
2021-01-11 18:03:32 -05:00
|
|
|
$node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
|
|
|
|
$node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
|
|
|
|
for i in 1..=depth {
|
2021-05-11 08:34:57 -07:00
|
|
|
let prev_blockhash = node.best_block.block_hash();
|
|
|
|
let height = node.best_block.height() + 1;
|
|
|
|
let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height, bits: 42, nonce: 42 };
|
|
|
|
let txdata = vec![(0, tx)];
|
|
|
|
node.best_block = BestBlock::new(header.block_hash(), height);
|
|
|
|
match i {
|
|
|
|
1 => {
|
|
|
|
node.node.transactions_confirmed(&header, &txdata, height);
|
|
|
|
node.chain_monitor.transactions_confirmed(&header, &txdata, height);
|
|
|
|
},
|
2021-05-31 20:04:36 +00:00
|
|
|
x if x == depth => {
|
2021-05-11 08:34:57 -07:00
|
|
|
node.node.best_block_updated(&header, height);
|
|
|
|
node.chain_monitor.best_block_updated(&header, height);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-31 20:04:36 +00:00
|
|
|
fn confirm_transaction(node: &mut Node, tx: &Transaction) {
|
|
|
|
confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
|
|
|
|
}
|
2021-05-11 08:34:57 -07:00
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
#[test]
|
|
|
|
fn test_background_processor() {
|
|
|
|
// Test that when a new channel is created, the ChannelManager needs to be re-persisted with
|
|
|
|
// updates. Also test that when new updates are available, the manager signals that it needs
|
|
|
|
// re-persistence and is successfully re-persisted.
|
|
|
|
let nodes = create_nodes(2, "test_background_processor".to_string());
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
// Go through the channel creation process so that each node has something to persist. Since
|
|
|
|
// open_channel consumes events, it must complete before starting BackgroundProcessor to
|
|
|
|
// avoid a race with processing events.
|
|
|
|
let tx = open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2021-01-11 18:03:32 -05:00
|
|
|
// Initiate the background processors to watch each node.
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2021-05-11 08:07:54 -07:00
|
|
|
let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
|
|
|
let event_handler = |_| {};
|
2021-05-11 08:34:57 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-01-11 18:03:32 -05:00
|
|
|
|
|
|
|
macro_rules! check_persisted_data {
|
|
|
|
($node: expr, $filepath: expr, $expected_bytes: expr) => {
|
|
|
|
match $node.write(&mut $expected_bytes) {
|
|
|
|
Ok(()) => {
|
|
|
|
loop {
|
|
|
|
match std::fs::read($filepath) {
|
|
|
|
Ok(bytes) => {
|
|
|
|
if bytes == $expected_bytes {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(_) => continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(e) => panic!("Unexpected error: {}", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the initial channel manager data is persisted as expected.
|
|
|
|
let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
|
|
|
|
let mut expected_bytes = Vec::new();
|
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
|
|
|
|
loop {
|
|
|
|
if !nodes[0].node.get_persistence_condvar_value() { break }
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force-close the channel.
|
|
|
|
nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
|
|
|
|
|
|
|
|
// Check that the force-close updates are persisted.
|
|
|
|
let mut expected_bytes = Vec::new();
|
|
|
|
check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
|
|
|
|
loop {
|
|
|
|
if !nodes[0].node.get_persistence_condvar_value() { break }
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-04-09 16:55:10 -04:00
|
|
|
fn test_timer_tick_called() {
|
2021-04-09 16:58:31 -04:00
|
|
|
// Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
|
|
|
|
// `FRESHNESS_TIMER`.
|
|
|
|
let nodes = create_nodes(1, "test_timer_tick_called".to_string());
|
2021-01-11 18:03:32 -05:00
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
2021-05-11 08:07:54 -07:00
|
|
|
let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
|
|
|
let event_handler = |_| {};
|
2021-05-11 08:34:57 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-01-11 18:03:32 -05:00
|
|
|
loop {
|
|
|
|
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
2021-04-09 16:58:31 -04:00
|
|
|
let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
|
2021-03-08 18:48:24 -05:00
|
|
|
if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
|
2021-01-11 18:03:32 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_persist_error() {
|
|
|
|
// Test that if we encounter an error during manager persistence, the thread panics.
|
|
|
|
let nodes = create_nodes(2, "test_persist_error".to_string());
|
|
|
|
open_channel!(nodes[0], nodes[1], 100000);
|
|
|
|
|
2021-05-11 08:07:54 -07:00
|
|
|
let persister = |_: &_| Err(std::io::Error::new(std::io::ErrorKind::Other, "test"));
|
|
|
|
let event_handler = |_| {};
|
2021-05-11 08:34:57 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-07-19 12:50:56 -05:00
|
|
|
match bg_processor.join() {
|
2021-07-18 12:59:27 -05:00
|
|
|
Ok(_) => panic!("Expected error persisting manager"),
|
|
|
|
Err(e) => {
|
|
|
|
assert_eq!(e.kind(), std::io::ErrorKind::Other);
|
|
|
|
assert_eq!(e.get_ref().unwrap().to_string(), "test");
|
|
|
|
},
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_background_event_handling() {
|
2021-05-11 08:34:57 -07:00
|
|
|
let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
|
2021-05-11 08:07:54 -07:00
|
|
|
let channel_value = 100000;
|
|
|
|
let data_dir = nodes[0].persister.get_data_dir();
|
|
|
|
let persister = move |node: &_| FilesystemPersister::persist_manager(data_dir.clone(), node);
|
|
|
|
|
|
|
|
// Set up a background event handler for FundingGenerationReady events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
|
|
|
let event_handler = move |event| {
|
|
|
|
sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
|
|
|
|
};
|
2021-05-11 08:34:57 -07:00
|
|
|
let bg_processor = BackgroundProcessor::start(persister.clone(), event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
// Open a channel and check that the FundingGenerationReady event was handled.
|
|
|
|
begin_open_channel!(nodes[0], nodes[1], channel_value);
|
2021-05-11 08:34:57 -07:00
|
|
|
let (temporary_channel_id, funding_tx) = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
2021-05-11 08:07:54 -07:00
|
|
|
.expect("FundingGenerationReady not handled within deadline");
|
2021-05-11 08:34:57 -07:00
|
|
|
end_open_channel!(nodes[0], nodes[1], temporary_channel_id, funding_tx);
|
|
|
|
|
|
|
|
// Confirm the funding transaction.
|
|
|
|
confirm_transaction(&mut nodes[0], &funding_tx);
|
2021-06-12 21:58:50 +00:00
|
|
|
let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
confirm_transaction(&mut nodes[1], &funding_tx);
|
2021-06-12 21:58:50 +00:00
|
|
|
let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
|
|
|
|
nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
|
|
|
|
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
|
|
|
|
nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
|
|
|
|
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
|
2021-05-11 08:34:57 -07:00
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
|
|
|
|
// Set up a background event handler for SpendableOutputs events.
|
|
|
|
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
|
|
|
let event_handler = move |event| sender.send(event).unwrap();
|
|
|
|
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
|
|
|
|
|
|
|
|
// Force close the channel and check that the SpendableOutputs event was handled.
|
|
|
|
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
|
|
|
|
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
|
2021-05-31 20:04:36 +00:00
|
|
|
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
|
2021-05-11 08:34:57 -07:00
|
|
|
let event = receiver
|
|
|
|
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
|
|
|
|
.expect("SpendableOutputs not handled within deadline");
|
|
|
|
match event {
|
|
|
|
Event::SpendableOutputs { .. } => {},
|
|
|
|
_ => panic!("Unexpected event: {:?}", event),
|
|
|
|
}
|
2021-05-11 08:07:54 -07:00
|
|
|
|
|
|
|
assert!(bg_processor.stop().is_ok());
|
|
|
|
}
|
2021-01-11 18:03:32 -05:00
|
|
|
}
|