From 524981ddf4bae0da695bb40c650812dceeb4d408 Mon Sep 17 00:00:00 2001
From: Matt Corallo <git@bluematt.me>
Date: Sat, 29 Apr 2023 17:58:15 +0000
Subject: [PATCH 1/2] Move the `CustomMessageHandler` into the `MessageHandler`
 struct

`PeerManager` takes a `MessageHandler` struct which contains all
the known message handlers for it to pass messages to. It then,
separately, takes a `CustomMessageHandler`. This makes no sense, we
should simply include the `CustomMessageHandler` in the
`MessageHandler` struct for consistency.
---
 fuzz/src/full_stack.rs                    |  3 +-
 lightning-background-processor/src/lib.rs |  8 +++--
 lightning-net-tokio/src/lib.rs            |  9 +++--
 lightning/src/ln/peer_handler.rs          | 44 ++++++++++++++---------
 4 files changed, 41 insertions(+), 23 deletions(-)

diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs
index 876a412da..a3cce05b0 100644
--- a/fuzz/src/full_stack.rs
+++ b/fuzz/src/full_stack.rs
@@ -458,7 +458,8 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
 		chan_handler: channelmanager.clone(),
 		route_handler: gossip_sync.clone(),
 		onion_message_handler: IgnoringMessageHandler {},
-	}, 0, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), IgnoringMessageHandler{}, keys_manager.clone()));
+		custom_message_handler: IgnoringMessageHandler {},
+	}, 0, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), keys_manager.clone()));
 
 	let mut should_forward = false;
 	let mut payments_received: Vec<PaymentHash> = Vec::new();
diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs
index bc42c6eb6..f8c392786 100644
--- a/lightning-background-processor/src/lib.rs
+++ b/lightning-background-processor/src/lib.rs
@@ -1140,8 +1140,12 @@ mod tests {
 			let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
 			let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
 			let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
-			let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
-			let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
+			let msg_handler = MessageHandler {
+				chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()),
+				route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
+				onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
+			};
+			let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
 			let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
 			nodes.push(node);
 		}
diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs
index 48f1736d0..1b3c2563e 100644
--- a/lightning-net-tokio/src/lib.rs
+++ b/lightning-net-tokio/src/lib.rs
@@ -659,7 +659,8 @@ mod tests {
 			chan_handler: Arc::clone(&a_handler),
 			route_handler: Arc::clone(&a_handler),
 			onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
-		}, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key))));
+			custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+		}, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(a_key))));
 
 		let (b_connected_sender, mut b_connected) = mpsc::channel(1);
 		let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1);
@@ -674,7 +675,8 @@ mod tests {
 			chan_handler: Arc::clone(&b_handler),
 			route_handler: Arc::clone(&b_handler),
 			onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
-		}, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(b_key))));
+			custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+		}, 0, &[2; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(b_key))));
 
 		// We bind on localhost, hoping the environment is properly configured with a local
 		// address. This may not always be the case in containers and the like, so if this test is
@@ -727,7 +729,8 @@ mod tests {
 			chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
 			onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
 			route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
-		}, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), Arc::new(TestNodeSigner::new(a_key))));
+			custom_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+		}, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(TestNodeSigner::new(a_key))));
 
 		// Make two connections, one for an inbound and one for an outbound connection
 		let conn_a = {
diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs
index 040ccc655..38850b730 100644
--- a/lightning/src/ln/peer_handler.rs
+++ b/lightning/src/ln/peer_handler.rs
@@ -259,10 +259,11 @@ impl Deref for ErroringMessageHandler {
 }
 
 /// Provides references to trait impls which handle different types of messages.
-pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
-		CM::Target: ChannelMessageHandler,
-		RM::Target: RoutingMessageHandler,
-		OM::Target: OnionMessageHandler,
+pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref, CustomM: Deref> where
+	CM::Target: ChannelMessageHandler,
+	RM::Target: RoutingMessageHandler,
+	OM::Target: OnionMessageHandler,
+	CustomM::Target: CustomMessageHandler,
 {
 	/// A message handler which handles messages specific to channels. Usually this is just a
 	/// [`ChannelManager`] object or an [`ErroringMessageHandler`].
@@ -275,9 +276,15 @@ pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
 	/// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
 	pub route_handler: RM,
 
-	/// A message handler which handles onion messages. For now, this can only be an
-	/// [`IgnoringMessageHandler`].
+	/// A message handler which handles onion messages. This should generally be an
+	/// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
+	///
+	/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
 	pub onion_message_handler: OM,
+
+	/// A message handler which handles custom messages. The only LDK-provided implementation is
+	/// [`IgnoringMessageHandler`].
+	pub custom_message_handler: CustomM,
 }
 
 /// Provides an object which can be used to send data to and which uniquely identifies a connection
@@ -561,7 +568,7 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: D
 		L::Target: Logger,
 		CMH::Target: CustomMessageHandler,
 		NS::Target: NodeSigner {
-	message_handler: MessageHandler<CM, RM, OM>,
+	message_handler: MessageHandler<CM, RM, OM, CMH>,
 	/// Connection state for each connected peer - we have an outer read-write lock which is taken
 	/// as read while we're doing processing for a peer and taken write when a peer is being added
 	/// or removed.
@@ -591,7 +598,6 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: D
 	last_node_announcement_serial: AtomicU32,
 
 	ephemeral_key_midstate: Sha256Engine,
-	custom_message_handler: CMH,
 
 	peer_counter: AtomicCounter,
 
@@ -652,7 +658,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref, NS: Deref> Pe
 			chan_handler: channel_message_handler,
 			route_handler: IgnoringMessageHandler{},
 			onion_message_handler,
-		}, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{}, node_signer)
+			custom_message_handler: IgnoringMessageHandler{},
+		}, current_time, ephemeral_random_data, logger, node_signer)
 	}
 }
 
@@ -679,7 +686,8 @@ impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref, NS: Deref> PeerManager<D
 			chan_handler: ErroringMessageHandler::new(),
 			route_handler: routing_message_handler,
 			onion_message_handler: IgnoringMessageHandler{},
-		}, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{}, node_signer)
+			custom_message_handler: IgnoringMessageHandler{},
+		}, current_time, ephemeral_random_data, logger, node_signer)
 	}
 }
 
@@ -741,7 +749,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 	/// incremented irregularly internally. In general it is best to simply use the current UNIX
 	/// timestamp, however if it is not available a persistent counter that increases once per
 	/// minute should suffice.
-	pub fn new(message_handler: MessageHandler<CM, RM, OM>, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH, node_signer: NS) -> Self {
+	pub fn new(message_handler: MessageHandler<CM, RM, OM, CMH>, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
 		let mut ephemeral_key_midstate = Sha256::engine();
 		ephemeral_key_midstate.input(ephemeral_random_data);
 
@@ -761,7 +769,6 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 			gossip_processing_backlog_lifted: AtomicBool::new(false),
 			last_node_announcement_serial: AtomicU32::new(current_time),
 			logger,
-			custom_message_handler,
 			node_signer,
 			secp_ctx,
 		}
@@ -1232,7 +1239,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 									peer.pending_read_is_header = true;
 
 									let mut reader = io::Cursor::new(&msg_data[..]);
-									let message_result = wire::read(&mut reader, &*self.custom_message_handler);
+									let message_result = wire::read(&mut reader, &*self.message_handler.custom_message_handler);
 									let message = match message_result {
 										Ok(x) => x,
 										Err(e) => {
@@ -1543,7 +1550,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 				log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
 			},
 			wire::Message::Custom(custom) => {
-				self.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
+				self.message_handler.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
 			},
 		};
 		Ok(should_forward)
@@ -1896,7 +1903,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 				}
 			}
 
-			for (node_id, msg) in self.custom_message_handler.get_and_clear_pending_msg() {
+			for (node_id, msg) in self.message_handler.custom_message_handler.get_and_clear_pending_msg() {
 				if peers_to_disconnect.get(&node_id).is_some() { continue; }
 				self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
 			}
@@ -2264,8 +2271,11 @@ mod tests {
 		let mut peers = Vec::new();
 		for i in 0..peer_count {
 			let ephemeral_bytes = [i as u8; 32];
-			let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, onion_message_handler: IgnoringMessageHandler {} };
-			let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {}, &cfgs[i].node_signer);
+			let msg_handler = MessageHandler {
+				chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler,
+				onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: IgnoringMessageHandler {}
+			};
+			let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, &cfgs[i].node_signer);
 			peers.push(peer);
 		}
 

From 14c6810e48ea9a36fc8efc9f2ed788691acb9a02 Mon Sep 17 00:00:00 2001
From: Matt Corallo <git@bluematt.me>
Date: Sat, 29 Apr 2023 18:45:59 +0000
Subject: [PATCH 2/2] Expose a trait impl'd for all `PeerManager` for use as a
 bound

A while back, in tests, we added a `AChannelManager` trait, which
is implemented for all `ChannelManager`s, and can be used as a
bound when we need a `ChannelManager`, rather than having to
duplicate all the bounds of `ChannelManager` everywhere.

Here we do the same thing for `PeerManager`, but make it public and
use it to clean up `lightning-net-tokio` and
`lightning-background-processor`.

We should likely do the same for `AChannelManager`, but that's left
as a followup.
---
 lightning-background-processor/src/lib.rs |  35 ++-----
 lightning-net-tokio/src/lib.rs            | 115 +++++-----------------
 lightning/src/ln/peer_handler.rs          |  48 +++++++++
 3 files changed, 81 insertions(+), 117 deletions(-)

diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs
index f8c392786..a9a69de7a 100644
--- a/lightning-background-processor/src/lib.rs
+++ b/lightning-background-processor/src/lib.rs
@@ -30,8 +30,7 @@ use lightning::events::{Event, PathFailure};
 #[cfg(feature = "std")]
 use lightning::events::{EventHandler, EventsProvider};
 use lightning::ln::channelmanager::ChannelManager;
-use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
-use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
+use lightning::ln::peer_handler::APeerManager;
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::Router;
@@ -81,6 +80,8 @@ use alloc::vec::Vec;
 ///
 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
 /// [`Event`]: lightning::events::Event
+/// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
+/// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
 #[cfg(feature = "std")]
 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
 pub struct BackgroundProcessor {
@@ -295,7 +296,7 @@ macro_rules! define_run_body {
 			// ChannelManager, we want to minimize methods blocking on a ChannelManager
 			// generally, and as a fallback place such blocking only immediately before
 			// persistence.
-			$peer_manager.process_events();
+			$peer_manager.as_ref().process_events();
 
 			// Exit the loop if the background processor was requested to stop.
 			if $loop_exit_check {
@@ -340,11 +341,11 @@ macro_rules! define_run_body {
 				// more than a handful of seconds to complete, and shouldn't disconnect all our
 				// peers.
 				log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
-				$peer_manager.disconnect_all_peers();
+				$peer_manager.as_ref().disconnect_all_peers();
 				last_ping_call = $get_timer(PING_TIMER);
 			} else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
 				log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
-				$peer_manager.timer_tick_occurred();
+				$peer_manager.as_ref().timer_tick_occurred();
 				last_ping_call = $get_timer(PING_TIMER);
 			}
 
@@ -578,10 +579,6 @@ pub async fn process_events_async<
 	G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
 	L: 'static + Deref + Send + Sync,
 	P: 'static + Deref + Send + Sync,
-	Descriptor: 'static + SocketDescriptor + Send + Sync,
-	CMH: 'static + Deref + Send + Sync,
-	RMH: 'static + Deref + Send + Sync,
-	OMH: 'static + Deref + Send + Sync,
 	EventHandlerFuture: core::future::Future<Output = ()>,
 	EventHandler: Fn(Event) -> EventHandlerFuture,
 	PS: 'static + Deref + Send,
@@ -589,8 +586,8 @@ pub async fn process_events_async<
 	CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
 	PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
 	RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
-	UMH: 'static + Deref + Send + Sync,
-	PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
+	APM: APeerManager + Send + Sync,
+	PM: 'static + Deref<Target = APM> + Send + Sync,
 	S: 'static + Deref<Target = SC> + Send + Sync,
 	SC: for<'b> WriteableScore<'b>,
 	SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
@@ -612,10 +609,6 @@ where
 	R::Target: 'static + Router,
 	L::Target: 'static + Logger,
 	P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
-	CMH::Target: 'static + ChannelMessageHandler,
-	OMH::Target: 'static + OnionMessageHandler,
-	RMH::Target: 'static + RoutingMessageHandler,
-	UMH::Target: 'static + CustomMessageHandler,
 	PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
 {
 	let mut should_break = false;
@@ -721,18 +714,14 @@ impl BackgroundProcessor {
 		G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
 		L: 'static + Deref + Send + Sync,
 		P: 'static + Deref + Send + Sync,
-		Descriptor: 'static + SocketDescriptor + Send + Sync,
-		CMH: 'static + Deref + Send + Sync,
-		OMH: 'static + Deref + Send + Sync,
-		RMH: 'static + Deref + Send + Sync,
 		EH: 'static + EventHandler + Send,
 		PS: 'static + Deref + Send,
 		M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
 		CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
 		PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
 		RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
-		UMH: 'static + Deref + Send + Sync,
-		PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
+		APM: APeerManager + Send + Sync,
+		PM: 'static + Deref<Target = APM> + Send + Sync,
 		S: 'static + Deref<Target = SC> + Send + Sync,
 		SC: for <'b> WriteableScore<'b>,
 	>(
@@ -751,10 +740,6 @@ impl BackgroundProcessor {
 		R::Target: 'static + Router,
 		L::Target: 'static + Logger,
 		P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
-		CMH::Target: 'static + ChannelMessageHandler,
-		OMH::Target: 'static + OnionMessageHandler,
-		RMH::Target: 'static + RoutingMessageHandler,
-		UMH::Target: 'static + CustomMessageHandler,
 		PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
 	{
 		let stop_thread = Arc::new(AtomicBool::new(false));
diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs
index 1b3c2563e..2a93ca433 100644
--- a/lightning-net-tokio/src/lib.rs
+++ b/lightning-net-tokio/src/lib.rs
@@ -36,12 +36,10 @@ use tokio::{io, time};
 use tokio::sync::mpsc;
 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 
-use lightning::chain::keysinterface::NodeSigner;
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
-use lightning::ln::peer_handler::CustomMessageHandler;
-use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler};
-use lightning::util::logger::Logger;
+use lightning::ln::peer_handler::APeerManager;
+use lightning::ln::msgs::NetAddress;
 
 use std::ops::Deref;
 use std::task;
@@ -80,53 +78,25 @@ struct Connection {
 	id: u64,
 }
 impl Connection {
-	async fn poll_event_process<PM, CMH, RMH, OMH, L, UMH, NS>(
+	async fn poll_event_process<PM: Deref + 'static + Send + Sync>(
 		peer_manager: PM,
 		mut event_receiver: mpsc::Receiver<()>,
-	) where
-			PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync,
-			CMH: Deref + 'static + Send + Sync,
-			RMH: Deref + 'static + Send + Sync,
-			OMH: Deref + 'static + Send + Sync,
-			L: Deref + 'static + Send + Sync,
-			UMH: Deref + 'static + Send + Sync,
-			NS: Deref + 'static + Send + Sync,
-			CMH::Target: ChannelMessageHandler + Send + Sync,
-			RMH::Target: RoutingMessageHandler + Send + Sync,
-			OMH::Target: OnionMessageHandler + Send + Sync,
-			L::Target: Logger + Send + Sync,
-			UMH::Target: CustomMessageHandler + Send + Sync,
-			NS::Target: NodeSigner + Send + Sync,
-	{
+	) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
 		loop {
 			if event_receiver.recv().await.is_none() {
 				return;
 			}
-			peer_manager.process_events();
+			peer_manager.as_ref().process_events();
 		}
 	}
 
-	async fn schedule_read<PM, CMH, RMH, OMH, L, UMH, NS>(
+	async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
 		peer_manager: PM,
 		us: Arc<Mutex<Self>>,
 		mut reader: io::ReadHalf<TcpStream>,
 		mut read_wake_receiver: mpsc::Receiver<()>,
 		mut write_avail_receiver: mpsc::Receiver<()>,
-	) where
-			PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
-			CMH: Deref + 'static + Send + Sync,
-			RMH: Deref + 'static + Send + Sync,
-			OMH: Deref + 'static + Send + Sync,
-			L: Deref + 'static + Send + Sync,
-			UMH: Deref + 'static + Send + Sync,
-			NS: Deref + 'static + Send + Sync,
-			CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
-			RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
-			OMH::Target: OnionMessageHandler + 'static + Send + Sync,
-			L::Target: Logger + 'static + Send + Sync,
-			UMH::Target: CustomMessageHandler + 'static + Send + Sync,
-			NS::Target: NodeSigner + 'static + Send + Sync,
-		{
+	) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
 		// Create a waker to wake up poll_event_process, above
 		let (event_waker, event_receiver) = mpsc::channel(1);
 		tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver));
@@ -160,7 +130,7 @@ impl Connection {
 			tokio::select! {
 				v = write_avail_receiver.recv() => {
 					assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
-					if peer_manager.write_buffer_space_avail(&mut our_descriptor).is_err() {
+					if peer_manager.as_ref().write_buffer_space_avail(&mut our_descriptor).is_err() {
 						break Disconnect::CloseConnection;
 					}
 				},
@@ -168,7 +138,7 @@ impl Connection {
 				read = reader.read(&mut buf), if !read_paused => match read {
 					Ok(0) => break Disconnect::PeerDisconnected,
 					Ok(len) => {
-						let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]);
+						let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
 						let mut us_lock = us.lock().unwrap();
 						match read_res {
 							Ok(pause_read) => {
@@ -197,8 +167,8 @@ impl Connection {
 			let _ = writer.shutdown().await;
 		}
 		if let Disconnect::PeerDisconnected = disconnect_type {
-			peer_manager.socket_disconnected(&our_descriptor);
-			peer_manager.process_events();
+			peer_manager.as_ref().socket_disconnected(&our_descriptor);
+			peer_manager.as_ref().process_events();
 		}
 	}
 
@@ -245,30 +215,17 @@ fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH, NS>(
+pub fn setup_inbound<PM: Deref + 'static + Send + Sync + Clone>(
 	peer_manager: PM,
 	stream: StdTcpStream,
-) -> impl std::future::Future<Output=()> where
-		PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
-		CMH: Deref + 'static + Send + Sync,
-		RMH: Deref + 'static + Send + Sync,
-		OMH: Deref + 'static + Send + Sync,
-		L: Deref + 'static + Send + Sync,
-		UMH: Deref + 'static + Send + Sync,
-		NS: Deref + 'static + Send + Sync,
-		CMH::Target: ChannelMessageHandler + Send + Sync,
-		RMH::Target: RoutingMessageHandler + Send + Sync,
-		OMH::Target: OnionMessageHandler + Send + Sync,
-		L::Target: Logger + Send + Sync,
-		UMH::Target: CustomMessageHandler + Send + Sync,
-		NS::Target: NodeSigner + Send + Sync,
-{
+) -> impl std::future::Future<Output=()>
+where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
 	let remote_addr = get_addr_from_stream(&stream);
 	let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
 	#[cfg(test)]
 	let last_us = Arc::clone(&us);
 
-	let handle_opt = if peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() {
+	let handle_opt = if peer_manager.as_ref().new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() {
 		Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
 	} else {
 		// Note that we will skip socket_disconnected here, in accordance with the PeerManager
@@ -300,30 +257,17 @@ pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH, NS>(
 /// The returned future will complete when the peer is disconnected and associated handling
 /// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
 /// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
+pub fn setup_outbound<PM: Deref + 'static + Send + Sync + Clone>(
 	peer_manager: PM,
 	their_node_id: PublicKey,
 	stream: StdTcpStream,
-) -> impl std::future::Future<Output=()> where
-		PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
-		CMH: Deref + 'static + Send + Sync,
-		RMH: Deref + 'static + Send + Sync,
-		OMH: Deref + 'static + Send + Sync,
-		L: Deref + 'static + Send + Sync,
-		UMH: Deref + 'static + Send + Sync,
-		NS: Deref + 'static + Send + Sync,
-		CMH::Target: ChannelMessageHandler + Send + Sync,
-		RMH::Target: RoutingMessageHandler + Send + Sync,
-		OMH::Target: OnionMessageHandler + Send + Sync,
-		L::Target: Logger + Send + Sync,
-		UMH::Target: CustomMessageHandler + Send + Sync,
-		NS::Target: NodeSigner + Send + Sync,
-{
+) -> impl std::future::Future<Output=()>
+where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
 	let remote_addr = get_addr_from_stream(&stream);
 	let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
 	#[cfg(test)]
 	let last_us = Arc::clone(&us);
-	let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
+	let handle_opt = if let Ok(initial_send) = peer_manager.as_ref().new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
 		Some(tokio::spawn(async move {
 			// We should essentially always have enough room in a TCP socket buffer to send the
 			// initial 10s of bytes. However, tokio running in single-threaded mode will always
@@ -342,7 +286,7 @@ pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
 						},
 						_ => {
 							eprintln!("Failed to write first full message to socket!");
-							peer_manager.socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
+							peer_manager.as_ref().socket_disconnected(&SocketDescriptor::new(Arc::clone(&us)));
 							break Err(());
 						}
 					}
@@ -385,25 +329,12 @@ pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
 /// disconnected and associated handling futures are freed, though, because all processing in said
 /// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
 /// make progress.
-pub async fn connect_outbound<PM, CMH, RMH, OMH, L, UMH, NS>(
+pub async fn connect_outbound<PM: Deref + 'static + Send + Sync + Clone>(
 	peer_manager: PM,
 	their_node_id: PublicKey,
 	addr: SocketAddr,
-) -> Option<impl std::future::Future<Output=()>> where
-		PM: Deref<Target = peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH, NS>> + 'static + Send + Sync + Clone,
-		CMH: Deref + 'static + Send + Sync,
-		RMH: Deref + 'static + Send + Sync,
-		OMH: Deref + 'static + Send + Sync,
-		L: Deref + 'static + Send + Sync,
-		UMH: Deref + 'static + Send + Sync,
-		NS: Deref + 'static + Send + Sync,
-		CMH::Target: ChannelMessageHandler + Send + Sync,
-		RMH::Target: RoutingMessageHandler + Send + Sync,
-		OMH::Target: OnionMessageHandler + Send + Sync,
-		L::Target: Logger + Send + Sync,
-		UMH::Target: CustomMessageHandler + Send + Sync,
-		NS::Target: NodeSigner + Send + Sync,
-{
+) -> Option<impl std::future::Future<Output=()>>
+where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
 	if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }).await {
 		Some(setup_outbound(peer_manager, their_node_id, stream))
 	} else { None }
diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs
index 38850b730..25ac234a8 100644
--- a/lightning/src/ln/peer_handler.rs
+++ b/lightning/src/ln/peer_handler.rs
@@ -542,6 +542,54 @@ pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArc
 /// This is not exported to bindings users as general type aliases don't make sense in bindings.
 pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k, 'l, 'm, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'm, M, T, F, L>, &'f P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'i SimpleRefOnionMessenger<'j, 'k, L>, &'f L, IgnoringMessageHandler, &'c KeysManager>;
 
+
+/// A generic trait which is implemented for all [`PeerManager`]s. This makes bounding functions or
+/// structs on any [`PeerManager`] much simpler as only this trait is needed as a bound, rather
+/// than the full set of bounds on [`PeerManager`] itself.
+#[allow(missing_docs)]
+pub trait APeerManager {
+	type Descriptor: SocketDescriptor;
+	type CMT: ChannelMessageHandler + ?Sized;
+	type CM: Deref<Target=Self::CMT>;
+	type RMT: RoutingMessageHandler + ?Sized;
+	type RM: Deref<Target=Self::RMT>;
+	type OMT: OnionMessageHandler + ?Sized;
+	type OM: Deref<Target=Self::OMT>;
+	type LT: Logger + ?Sized;
+	type L: Deref<Target=Self::LT>;
+	type CMHT: CustomMessageHandler + ?Sized;
+	type CMH: Deref<Target=Self::CMHT>;
+	type NST: NodeSigner + ?Sized;
+	type NS: Deref<Target=Self::NST>;
+	/// Gets a reference to the underlying [`PeerManager`].
+	fn as_ref(&self) -> &PeerManager<Self::Descriptor, Self::CM, Self::RM, Self::OM, Self::L, Self::CMH, Self::NS>;
+}
+
+impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref>
+APeerManager for PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> where
+	CM::Target: ChannelMessageHandler,
+	RM::Target: RoutingMessageHandler,
+	OM::Target: OnionMessageHandler,
+	L::Target: Logger,
+	CMH::Target: CustomMessageHandler,
+	NS::Target: NodeSigner,
+{
+	type Descriptor = Descriptor;
+	type CMT = <CM as Deref>::Target;
+	type CM = CM;
+	type RMT = <RM as Deref>::Target;
+	type RM = RM;
+	type OMT = <OM as Deref>::Target;
+	type OM = OM;
+	type LT = <L as Deref>::Target;
+	type L = L;
+	type CMHT = <CMH as Deref>::Target;
+	type CMH = CMH;
+	type NST = <NS as Deref>::Target;
+	type NS = NS;
+	fn as_ref(&self) -> &PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> { self }
+}
+
 /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
 /// socket events into messages which it passes on to its [`MessageHandler`].
 ///