From be6f263825e0c75d32d6d48fd5dff9986ca6b011 Mon Sep 17 00:00:00 2001
From: Matt Corallo <git@bluematt.me>
Date: Tue, 21 Feb 2023 19:10:43 +0000
Subject: [PATCH] Remove the `peer_disconnected` `no_connection_possible` flag

Long ago, we used the `no_connection_possible` to signal that a
peer has some unknown feature set or some other condition prevents
us from ever connecting to the given peer. In that case we'd
automatically force-close all channels with the given peer. This
was somewhat surprising to users so we removed the automatic
force-close, leaving the flag serving no LDK-internal purpose.

Distilling the concept of "can we connect to this peer again in the
future" to a simple flag turns out to be ripe with edge cases, so
users actually using the flag to force-close channels would likely
cause surprising behavior.

Thus, there's really not a lot of reason to keep the flag,
especially given its untested and likely to be broken in subtle
ways anyway.
---
 fuzz/src/chanmon_consistency.rs               | 16 ++--
 lightning-invoice/src/utils.rs                |  4 +-
 lightning-net-tokio/src/lib.rs                |  2 +-
 lightning/src/ln/chanmon_update_fail_tests.rs | 52 ++++++-------
 lightning/src/ln/channelmanager.rs            | 12 +--
 lightning/src/ln/functional_tests.rs          | 58 +++++++-------
 lightning/src/ln/msgs.rs                      | 15 +---
 lightning/src/ln/onion_route_tests.rs         |  4 +-
 lightning/src/ln/payment_tests.rs             | 22 +++---
 lightning/src/ln/peer_handler.rs              | 77 ++++++++-----------
 lightning/src/ln/priv_short_conf_tests.rs     |  4 +-
 lightning/src/ln/reload_tests.rs              | 38 ++++-----
 lightning/src/ln/shutdown_tests.rs            |  8 +-
 lightning/src/onion_message/messenger.rs      |  2 +-
 lightning/src/util/test_utils.rs              |  2 +-
 15 files changed, 145 insertions(+), 171 deletions(-)

diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs
index a088f64f4..457e1e45b 100644
--- a/fuzz/src/chanmon_consistency.rs
+++ b/fuzz/src/chanmon_consistency.rs
@@ -978,16 +978,16 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
 			0x0c => {
 				if !chan_a_disconnected {
-					nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
-					nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+					nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
+					nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
 					chan_a_disconnected = true;
 					drain_msg_events_on_disconnect!(0);
 				}
 			},
 			0x0d => {
 				if !chan_b_disconnected {
-					nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
-					nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+					nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
+					nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
 					chan_b_disconnected = true;
 					drain_msg_events_on_disconnect!(2);
 				}
@@ -1039,7 +1039,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
 			0x2c => {
 				if !chan_a_disconnected {
-					nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+					nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
 					chan_a_disconnected = true;
 					drain_msg_events_on_disconnect!(0);
 				}
@@ -1053,14 +1053,14 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 			},
 			0x2d => {
 				if !chan_a_disconnected {
-					nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
+					nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
 					chan_a_disconnected = true;
 					nodes[0].get_and_clear_pending_msg_events();
 					ab_events.clear();
 					ba_events.clear();
 				}
 				if !chan_b_disconnected {
-					nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+					nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
 					chan_b_disconnected = true;
 					nodes[2].get_and_clear_pending_msg_events();
 					bc_events.clear();
@@ -1072,7 +1072,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 			},
 			0x2e => {
 				if !chan_b_disconnected {
-					nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
+					nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
 					chan_b_disconnected = true;
 					drain_msg_events_on_disconnect!(2);
 				}
diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs
index f6cc87fa7..868f0b297 100644
--- a/lightning-invoice/src/utils.rs
+++ b/lightning-invoice/src/utils.rs
@@ -842,13 +842,13 @@ mod test {
 
 		// With only one sufficient-value peer connected we should only get its hint
 		scid_aliases.remove(&chan_b.0.short_channel_id_alias.unwrap());
-		nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
 		match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases.clone());
 
 		// If we don't have any sufficient-value peers connected we should get all hints with
 		// sufficient value, even though there is a connected insufficient-value peer.
 		scid_aliases.insert(chan_b.0.short_channel_id_alias.unwrap());
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 		match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases);
 	}
 
diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs
index b259f77ef..58d3d0a8a 100644
--- a/lightning-net-tokio/src/lib.rs
+++ b/lightning-net-tokio/src/lib.rs
@@ -643,7 +643,7 @@ mod tests {
 		fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
 		fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
 		fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
-		fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+		fn peer_disconnected(&self, their_node_id: &PublicKey) {
 			if *their_node_id == self.expected_pubkey {
 				self.disconnected_flag.store(true, Ordering::SeqCst);
 				self.pubkey_disconnected.clone().try_send(()).unwrap();
diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs
index e1ea256a6..4385fb718 100644
--- a/lightning/src/ln/chanmon_update_fail_tests.rs
+++ b/lightning/src/ln/chanmon_update_fail_tests.rs
@@ -181,8 +181,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 	assert_eq!(nodes[0].node.list_channels().len(), 1);
 
 	if disconnect {
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 		reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 	}
 
@@ -234,8 +234,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 	assert_eq!(nodes[0].node.list_channels().len(), 1);
 
 	if disconnect {
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 		reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 	}
 
@@ -337,8 +337,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
 	};
 
 	if disconnect_count & !disconnect_flags > 0 {
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	}
 
 	// Now fix monitor updating...
@@ -348,8 +348,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
 	check_added_monitors!(nodes[0], 0);
 
 	macro_rules! disconnect_reconnect_peers { () => { {
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 		nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 		let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
@@ -1110,8 +1110,8 @@ fn test_monitor_update_fail_reestablish() {
 
 	let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	nodes[2].node.claim_funds(payment_preimage);
 	check_added_monitors!(nodes[2], 1);
@@ -1146,8 +1146,8 @@ fn test_monitor_update_fail_reestablish() {
 	nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
 	check_added_monitors!(nodes[1], 1);
 
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
@@ -1315,8 +1315,8 @@ fn claim_while_disconnected_monitor_update_fail() {
 	// Forward a payment for B to claim
 	let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[1].node.claim_funds(payment_preimage_1);
 	check_added_monitors!(nodes[1], 1);
@@ -1451,8 +1451,8 @@ fn monitor_failed_no_reestablish_response() {
 
 	// Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
 	// is still failing to update monitors.
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
@@ -1873,8 +1873,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 	}
 
 	// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 	assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 	assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -2041,8 +2041,8 @@ fn test_pending_update_fee_ack_on_reconnect() {
 	let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 	// bs_first_raa is not delivered until it is re-generated after reconnect
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
@@ -2169,8 +2169,8 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
 		assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
@@ -2303,9 +2303,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
 			let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 			reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 		} else {
-			nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+			nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 		}
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 		// Now reconnect the two
 		nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
@@ -2493,8 +2493,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
 		assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 	}
 
-	nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	if second_fails {
 		reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs
index 1831cf057..097312dd1 100644
--- a/lightning/src/ln/channelmanager.rs
+++ b/lightning/src/ln/channelmanager.rs
@@ -6270,13 +6270,13 @@ where
 		let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
 	}
 
-	fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
+	fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
 		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 		let mut failed_channels = Vec::new();
 		let mut per_peer_state = self.per_peer_state.write().unwrap();
 		let remove_peer = {
-			log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
-				log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+			log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
+				log_pubkey!(counterparty_node_id));
 			if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
 				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 				let peer_state = &mut *peer_state_lock;
@@ -6332,7 +6332,7 @@ where
 
 	fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
 		if !init_msg.features.supports_static_remote_key() {
-			log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
+			log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
 			return Err(());
 		}
 
@@ -8210,8 +8210,8 @@ mod tests {
 
 		let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 		nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
 		check_closed_broadcast!(nodes[0], true);
diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs
index 9055d4e76..7e2532f1f 100644
--- a/lightning/src/ln/functional_tests.rs
+++ b/lightning/src/ln/functional_tests.rs
@@ -3509,8 +3509,8 @@ fn test_dup_events_on_peer_disconnect() {
 	nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
 	expect_payment_sent_without_paths!(nodes[0], payment_preimage);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
 	expect_payment_path_successful!(nodes[0]);
@@ -3550,8 +3550,8 @@ fn test_peer_disconnected_before_funding_broadcasted() {
 
 	// Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
 	// disconnected before the funding transaction was broadcasted.
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
 	check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
@@ -3567,8 +3567,8 @@ fn test_simple_peer_disconnect() {
 	create_announced_chan_between_nodes(&nodes, 0, 1);
 	create_announced_chan_between_nodes(&nodes, 1, 2);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
 	let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@@ -3576,8 +3576,8 @@ fn test_simple_peer_disconnect() {
 	fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
 	claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
 	let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
@@ -3585,8 +3585,8 @@ fn test_simple_peer_disconnect() {
 	let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
 	let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
 	fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
@@ -3680,8 +3680,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 		}
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	if messages_delivered < 3 {
 		if simulate_broken_lnd {
 			// lnd has a long-standing bug where they send a channel_ready prior to a
@@ -3730,8 +3730,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 		};
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
 	nodes[1].node.process_pending_htlc_forwards();
@@ -3813,8 +3813,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 		}
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	if messages_delivered < 2 {
 		reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
 		if messages_delivered < 1 {
@@ -3840,8 +3840,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 		expect_payment_path_successful!(nodes[0]);
 	}
 	if messages_delivered <= 5 {
-		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+		nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+		nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	}
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -3955,8 +3955,8 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
 		_ => panic!("Unexpected event"),
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
@@ -6257,8 +6257,8 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
 	nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 
 	//Disconnect and Reconnect
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 	assert_eq!(reestablish_1.len(), 1);
@@ -6990,8 +6990,8 @@ fn test_announce_disable_channels() {
 	create_announced_chan_between_nodes(&nodes, 0, 1);
 
 	// Disconnect peers
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged
 	nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
@@ -8790,13 +8790,11 @@ fn test_error_chans_closed() {
 		_ => panic!("Unexpected event"),
 	}
 	// Note that at this point users of a standard PeerHandler will end up calling
-	// peer_disconnected with no_connection_possible set to false, duplicating the
-	// close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
-	// users with their own peer handling logic. We duplicate the call here, however.
+	// peer_disconnected.
 	assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
 	assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 	assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
 	assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
 }
@@ -8912,8 +8910,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 	create_announced_chan_between_nodes(&nodes, 0, 1);
 	let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
 	let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
-	nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
 	check_closed_broadcast!(nodes[1], true);
diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs
index e8b40c71d..d43dff6e1 100644
--- a/lightning/src/ln/msgs.rs
+++ b/lightning/src/ln/msgs.rs
@@ -993,14 +993,8 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider {
 	fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures);
 
 	// Connection loss/reestablish:
-	/// Indicates a connection to the peer failed/an existing connection was lost. If no connection
-	/// is believed to be possible in the future (eg they're sending us messages we don't
-	/// understand or indicate they require unknown feature bits), `no_connection_possible` is set
-	/// and any outstanding channels should be failed.
-	///
-	/// Note that in some rare cases this may be called without a corresponding
-	/// [`Self::peer_connected`].
-	fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+	/// Indicates a connection to the peer failed/an existing connection was lost.
+	fn peer_disconnected(&self, their_node_id: &PublicKey);
 
 	/// Handle a peer reconnecting, possibly generating `channel_reestablish` message(s).
 	///
@@ -1115,10 +1109,7 @@ pub trait OnionMessageHandler : OnionMessageProvider {
 	fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
 	/// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
 	/// drop and refuse to forward onion messages to this peer.
-	///
-	/// Note that in some rare cases this may be called without a corresponding
-	/// [`Self::peer_connected`].
-	fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+	fn peer_disconnected(&self, their_node_id: &PublicKey);
 
 	// Handler information:
 	/// Gets the node feature flags which this handler itself supports. All available handlers are
diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs
index fbc8e1c9e..8f266bc17 100644
--- a/lightning/src/ln/onion_route_tests.rs
+++ b/lightning/src/ln/onion_route_tests.rs
@@ -579,8 +579,8 @@ fn test_onion_failure() {
 	let short_channel_id = channels[1].0.contents.short_channel_id;
 	run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {
 		// disconnect event to the channel between nodes[1] ~ nodes[2]
-		nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-		nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+		nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+		nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 	}, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
 	reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs
index 4fce8198d..4f51d2f81 100644
--- a/lightning/src/ln/payment_tests.rs
+++ b/lightning/src/ln/payment_tests.rs
@@ -350,8 +350,8 @@ fn no_pending_leak_on_initial_send_failure() {
 
 	let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
 		true, APIError::ChannelUnavailable { ref err },
@@ -401,8 +401,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
 	// We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
 	// to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
 	// which would prevent retry.
-	nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
 	commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
@@ -431,7 +431,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
 	assert_eq!(as_broadcasted_txn.len(), 1);
 	assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
 
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -587,7 +587,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
 	let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 
 	reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
 	// force-close the channel.
@@ -679,7 +679,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
 	assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
 	reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -699,7 +699,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
 
 	// Ensure that after reload we cannot retry the payment.
 	reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -741,8 +741,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 	check_added_monitors!(nodes[0], 1);
 	check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
 	connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
@@ -893,7 +893,7 @@ fn test_fulfill_restart_failure() {
 	// Now reload nodes[1]...
 	reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
 	nodes[1].node.fail_htlc_backwards(&payment_hash);
diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs
index be778708c..995901a3b 100644
--- a/lightning/src/ln/peer_handler.rs
+++ b/lightning/src/ln/peer_handler.rs
@@ -89,7 +89,7 @@ impl OnionMessageProvider for IgnoringMessageHandler {
 impl OnionMessageHandler for IgnoringMessageHandler {
 	fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
 	fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
-	fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+	fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
 	fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
 	fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
 		InitFeatures::empty()
@@ -223,7 +223,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
 	}
 	// msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
 	fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
-	fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+	fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
 	fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
 	fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
 	fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
@@ -315,16 +315,7 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the
 /// descriptor.
 #[derive(Clone)]
-pub struct PeerHandleError {
-	/// Used to indicate that we probably can't make any future connections to this peer (e.g.
-	/// because we required features that our peer was missing, or vice versa).
-	///
-	/// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
-	/// any channels with this peer or check for new versions of LDK.
-	///
-	/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-	pub no_connection_possible: bool,
-}
+pub struct PeerHandleError { }
 impl fmt::Debug for PeerHandleError {
 	fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
 		formatter.write_str("Peer Sent Invalid Data")
@@ -1009,7 +1000,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 				// This is most likely a simple race condition where the user found that the socket
 				// was writeable, then we told the user to `disconnect_socket()`, then they called
 				// this method. Return an error to make sure we get disconnected.
-				return Err(PeerHandleError { no_connection_possible: false });
+				return Err(PeerHandleError { });
 			},
 			Some(peer_mutex) => {
 				let mut peer = peer_mutex.lock().unwrap();
@@ -1042,7 +1033,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 			Ok(res) => Ok(res),
 			Err(e) => {
 				log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error");
-				self.disconnect_event_internal(peer_descriptor, e.no_connection_possible);
+				self.disconnect_event_internal(peer_descriptor);
 				Err(e)
 			}
 		}
@@ -1075,7 +1066,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 				// This is most likely a simple race condition where the user read some bytes
 				// from the socket, then we told the user to `disconnect_socket()`, then they
 				// called this method. Return an error to make sure we get disconnected.
-				return Err(PeerHandleError { no_connection_possible: false });
+				return Err(PeerHandleError { });
 			},
 			Some(peer_mutex) => {
 				let mut read_pos = 0;
@@ -1089,7 +1080,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 										msgs::ErrorAction::DisconnectPeer { msg: _ } => {
 											//TODO: Try to push msg
 											log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
-											return Err(PeerHandleError{ no_connection_possible: false });
+											return Err(PeerHandleError { });
 										},
 										msgs::ErrorAction::IgnoreAndLog(level) => {
 											log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
@@ -1142,7 +1133,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 									hash_map::Entry::Occupied(_) => {
 										log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
 										peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
-										return Err(PeerHandleError{ no_connection_possible: false })
+										return Err(PeerHandleError { })
 									},
 									hash_map::Entry::Vacant(entry) => {
 										log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
@@ -1199,7 +1190,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 									if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
 									peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
 									if msg_len < 2 { // Need at least the message type tag
-										return Err(PeerHandleError{ no_connection_possible: false });
+										return Err(PeerHandleError { });
 									}
 									peer.pending_read_is_header = false;
 								} else {
@@ -1242,19 +1233,19 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 												(msgs::DecodeError::UnknownRequiredFeature, ty) => {
 													log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
 													self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
-													return Err(PeerHandleError { no_connection_possible: false });
+													return Err(PeerHandleError { });
 												}
-												(msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
+												(msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
 												(msgs::DecodeError::InvalidValue, _) => {
 													log_debug!(self.logger, "Got an invalid value while deserializing message");
-													return Err(PeerHandleError { no_connection_possible: false });
+													return Err(PeerHandleError { });
 												}
 												(msgs::DecodeError::ShortRead, _) => {
 													log_debug!(self.logger, "Deserialization failed due to shortness of message");
-													return Err(PeerHandleError { no_connection_possible: false });
+													return Err(PeerHandleError { });
 												}
-												(msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
-												(msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
+												(msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
+												(msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }),
 											}
 										}
 									};
@@ -1306,10 +1297,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 		if let wire::Message::Init(msg) = message {
 			if msg.features.requires_unknown_bits() {
 				log_debug!(self.logger, "Peer features required unknown version bits");
-				return Err(PeerHandleError{ no_connection_possible: true }.into());
+				return Err(PeerHandleError { }.into());
 			}
 			if peer_lock.their_features.is_some() {
-				return Err(PeerHandleError{ no_connection_possible: false }.into());
+				return Err(PeerHandleError { }.into());
 			}
 
 			log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
@@ -1321,22 +1312,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
 			if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg) {
 				log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-				return Err(PeerHandleError{ no_connection_possible: true }.into());
+				return Err(PeerHandleError { }.into());
 			}
 			if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg) {
 				log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-				return Err(PeerHandleError{ no_connection_possible: true }.into());
+				return Err(PeerHandleError { }.into());
 			}
 			if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg) {
 				log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-				return Err(PeerHandleError{ no_connection_possible: true }.into());
+				return Err(PeerHandleError { }.into());
 			}
 
 			peer_lock.their_features = Some(msg.features);
 			return Ok(None);
 		} else if peer_lock.their_features.is_none() {
 			log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
-			return Err(PeerHandleError{ no_connection_possible: false }.into());
+			return Err(PeerHandleError { }.into());
 		}
 
 		if let wire::Message::GossipTimestampFilter(_msg) = message {
@@ -1388,7 +1379,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 				}
 				self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
 				if msg.channel_id == [0; 32] {
-					return Err(PeerHandleError{ no_connection_possible: true }.into());
+					return Err(PeerHandleError { }.into());
 				}
 			},
 			wire::Message::Warning(msg) => {
@@ -1518,8 +1509,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 			// Unknown messages:
 			wire::Message::Unknown(type_id) if message.is_even() => {
 				log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
-				// Fail the channel if message is an even, unknown type as per BOLT #1.
-				return Err(PeerHandleError{ no_connection_possible: true }.into());
+				return Err(PeerHandleError { }.into());
 			},
 			wire::Message::Unknown(type_id) => {
 				log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
@@ -1920,7 +1910,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
 	/// Indicates that the given socket descriptor's connection is now closed.
 	pub fn socket_disconnected(&self, descriptor: &Descriptor) {
-		self.disconnect_event_internal(descriptor, false);
+		self.disconnect_event_internal(descriptor);
 	}
 
 	fn do_disconnect(&self, mut descriptor: Descriptor, peer: &Peer, reason: &'static str) {
@@ -1933,13 +1923,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 		debug_assert!(peer.their_node_id.is_some());
 		if let Some((node_id, _)) = peer.their_node_id {
 			log_trace!(self.logger, "Disconnecting peer with id {} due to {}", node_id, reason);
-			self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-			self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
+			self.message_handler.chan_handler.peer_disconnected(&node_id);
+			self.message_handler.onion_message_handler.peer_disconnected(&node_id);
 		}
 		descriptor.disconnect_socket();
 	}
 
-	fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
+	fn disconnect_event_internal(&self, descriptor: &Descriptor) {
 		let mut peers = self.peers.write().unwrap();
 		let peer_option = peers.remove(descriptor);
 		match peer_option {
@@ -1953,12 +1943,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 				if !peer.handshake_complete() { return; }
 				debug_assert!(peer.their_node_id.is_some());
 				if let Some((node_id, _)) = peer.their_node_id {
-					log_trace!(self.logger,
-						"Handling disconnection of peer {}, with {}future connection to the peer possible.",
-						log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
+					log_trace!(self.logger, "Handling disconnection of peer {}", log_pubkey!(node_id));
 					self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
-					self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
-					self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
+					self.message_handler.chan_handler.peer_disconnected(&node_id);
+					self.message_handler.onion_message_handler.peer_disconnected(&node_id);
 				}
 			}
 		};
@@ -1966,14 +1954,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
 	/// Disconnect a peer given its node id.
 	///
-	/// Set `no_connection_possible` to true to prevent any further connection with this peer,
-	/// force-closing any channels we have with it.
-	///
 	/// If a peer is connected, this will call [`disconnect_socket`] on the descriptor for the
 	/// peer. Thus, be very careful about reentrancy issues.
 	///
 	/// [`disconnect_socket`]: SocketDescriptor::disconnect_socket
-	pub fn disconnect_by_node_id(&self, node_id: PublicKey, _no_connection_possible: bool) {
+	pub fn disconnect_by_node_id(&self, node_id: PublicKey) {
 		let mut peers_lock = self.peers.write().unwrap();
 		if let Some(descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
 			let peer_opt = peers_lock.remove(&descriptor);
diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs
index adc2b59ab..b47380dfd 100644
--- a/lightning/src/ln/priv_short_conf_tests.rs
+++ b/lightning/src/ln/priv_short_conf_tests.rs
@@ -90,8 +90,8 @@ fn test_priv_forwarding_rejection() {
 	// Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
 	// to true. Sadly there is currently no way to change it at runtime.
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	let nodes_1_serialized = nodes[1].node.encode();
 	let monitor_a_serialized = get_monitor!(nodes[1], chan_id_1).encode();
diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs
index 8e5056dc6..6643d34ca 100644
--- a/lightning/src/ln/reload_tests.rs
+++ b/lightning/src/ln/reload_tests.rs
@@ -44,8 +44,8 @@ fn test_funding_peer_disconnect() {
 	let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 	let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	confirm_transaction(&nodes[0], &tx);
 	let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
@@ -53,8 +53,8 @@ fn test_funding_peer_disconnect() {
 
 	reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	confirm_transaction(&nodes[1], &tx);
 	let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
@@ -169,7 +169,7 @@ fn test_funding_peer_disconnect() {
 
 	// Check that after deserialization and reconnection we can still generate an identical
 	// channel_announcement from the cached signatures.
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 
@@ -190,7 +190,7 @@ fn test_no_txn_manager_serialize_deserialize() {
 
 	let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	let chan_0_monitor_serialized =
 		get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
@@ -267,7 +267,7 @@ fn test_manager_serialize_deserialize_events() {
 	let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode();
 	reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	// After deserializing, make sure the funding_transaction is still held by the channel manager
 	let events_4 = nodes[0].node.get_and_clear_pending_events();
@@ -313,7 +313,7 @@ fn test_simple_manager_serialize_deserialize() {
 	let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 	let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 	reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
@@ -353,9 +353,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 	let nodes_0_serialized = nodes[0].node.encode();
 
 	route_payment(&nodes[0], &[&nodes[3]], 1000000);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-	nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+	nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	// Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
 	// nodes[3])
@@ -488,8 +488,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
 	send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 	send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
 
@@ -627,8 +627,8 @@ fn test_forwardable_regen() {
 	assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
 	// Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
 	let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
 	let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
@@ -753,8 +753,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
 	assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
 	assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
 
-	nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
-	nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+	nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+	nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id());
 
 	// During deserialization, we should have closed one channel and broadcast its latest
 	// commitment transaction. We should also still have the original PaymentClaimable event we
@@ -923,7 +923,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
 	let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
 	assert_eq!(bs_commitment_tx.len(), 1);
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
 	if use_cs_commitment {
@@ -1038,7 +1038,7 @@ fn removed_payment_no_manager_persistence() {
 	// Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
 	// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
 	// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 	reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
 	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs
index 5a5f05484..59c9911df 100644
--- a/lightning/src/ln/shutdown_tests.rs
+++ b/lightning/src/ln/shutdown_tests.rs
@@ -243,8 +243,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
 		}
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
 	let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
@@ -305,8 +305,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
 		assert!(node_0_2nd_closing_signed.is_some());
 	}
 
-	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+	nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+	nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
 	nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
 	let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs
index 497628607..b174e5229 100644
--- a/lightning/src/onion_message/messenger.rs
+++ b/lightning/src/onion_message/messenger.rs
@@ -426,7 +426,7 @@ impl<ES: Deref, NS: Deref, L: Deref, CMH: Deref> OnionMessageHandler for OnionMe
 		Ok(())
 	}
 
-	fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+	fn peer_disconnected(&self, their_node_id: &PublicKey) {
 		let mut pending_msgs = self.pending_messages.lock().unwrap();
 		pending_msgs.remove(their_node_id);
 	}
diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs
index e50576e1a..55abf6818 100644
--- a/lightning/src/util/test_utils.rs
+++ b/lightning/src/util/test_utils.rs
@@ -423,7 +423,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
 	fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
 		self.received_msg(wire::Message::ChannelReestablish(msg.clone()));
 	}
-	fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+	fn peer_disconnected(&self, their_node_id: &PublicKey) {
 		assert!(self.connected_peers.lock().unwrap().remove(their_node_id));
 	}
 	fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> {