Automatically prune NetworkGraph of stale channels hourly in BP

This commit is contained in:
Matt Corallo 2021-12-15 18:59:15 +00:00
parent cd43ff4a5e
commit 73e8dc41a6
2 changed files with 30 additions and 1 deletions

View file

@ -34,6 +34,8 @@ use std::ops::Deref;
/// [`ChannelManager`] persistence should be done in the background. /// [`ChannelManager`] persistence should be done in the background.
/// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`] /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
/// at the appropriate intervals. /// at the appropriate intervals.
/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to
/// [`BackgroundProcessor::start`]).
/// ///
/// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
/// upon as doing so may result in high latency. /// upon as doing so may result in high latency.
@ -68,6 +70,9 @@ const PING_TIMER: u64 = 30;
#[cfg(test)] #[cfg(test)]
const PING_TIMER: u64 = 1; const PING_TIMER: u64 = 1;
/// Prune the network graph of stale entries hourly.
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
/// Trait which handles persisting a [`ChannelManager`] to disk. /// Trait which handles persisting a [`ChannelManager`] to disk.
/// ///
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
@ -203,13 +208,16 @@ impl BackgroundProcessor {
let stop_thread = Arc::new(AtomicBool::new(false)); let stop_thread = Arc::new(AtomicBool::new(false));
let stop_thread_clone = stop_thread.clone(); let stop_thread_clone = stop_thread.clone();
let handle = thread::spawn(move || -> Result<(), std::io::Error> { let handle = thread::spawn(move || -> Result<(), std::io::Error> {
let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler }; let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) };
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup");
channel_manager.timer_tick_occurred(); channel_manager.timer_tick_occurred();
let mut last_freshness_call = Instant::now(); let mut last_freshness_call = Instant::now();
let mut last_ping_call = Instant::now(); let mut last_ping_call = Instant::now();
let mut last_prune_call = Instant::now();
let mut have_pruned = false;
loop { loop {
peer_manager.process_events(); peer_manager.process_events();
channel_manager.process_pending_events(&event_handler); channel_manager.process_pending_events(&event_handler);
@ -247,6 +255,19 @@ impl BackgroundProcessor {
peer_manager.timer_tick_occurred(); peer_manager.timer_tick_occurred();
last_ping_call = Instant::now(); last_ping_call = Instant::now();
} }
// Note that we want to run a graph prune once not long after startup before
// falling back to our usual hourly prunes. This avoids short-lived clients never
// pruning their network graph. We run once 60 seconds after startup before
// continuing our normal cadence.
if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { 60 } {
if let Some(ref handler) = net_graph_msg_handler {
log_trace!(logger, "Pruning network graph of stale entries");
handler.network_graph().remove_stale_channels();
last_prune_call = Instant::now();
have_pruned = true;
}
}
} }
}); });
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) } Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }

View file

@ -249,6 +249,12 @@ where C::Target: chain::Access, L::Target: Logger
self.chain_access = chain_access; self.chain_access = chain_access;
} }
/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
/// [`NetGraphMsgHandler::new`].
pub fn network_graph(&self) -> &G {
&self.network_graph
}
/// Returns true when a full routing table sync should be performed with a peer. /// Returns true when a full routing table sync should be performed with a peer.
fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool {
//TODO: Determine whether to request a full sync based on the network map. //TODO: Determine whether to request a full sync based on the network map.
@ -1074,6 +1080,8 @@ impl NetworkGraph {
/// updates every two weeks, the non-normative section of BOLT 7 currently suggests that /// updates every two weeks, the non-normative section of BOLT 7 currently suggests that
/// pruning occur for updates which are at least two weeks old, which we implement here. /// pruning occur for updates which are at least two weeks old, which we implement here.
/// ///
/// Note that for users of the `lightning-background-processor` crate this method may be
/// automatically called regularly for you.
/// ///
/// This method is only available with the `std` feature. See /// This method is only available with the `std` feature. See
/// [`NetworkGraph::remove_stale_channels_with_time`] for `no-std` use. /// [`NetworkGraph::remove_stale_channels_with_time`] for `no-std` use.