From e142f67917c56bee74af1e0f33bbede2803e4b15 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 15 Nov 2022 23:35:31 +0000 Subject: [PATCH 1/3] Expose the full set of outbound HTLCs from a `ChannelMonitor` This expands the outbound-HTLC-listing support in `ChannelMonitor` to include not only the set of outbound HTLCs which have not yet been resolved but to also include the full set of HTLCs which the `ChannelMonitor` is currently able to to or has already finalized. This will be used in the next commit to fail-back HTLCs which were removed from a channel in the ChannelMonitor but not in a Channel. Using the existing `get_pending_outbound_htlcs` for this purpose is subtly broken - if the channel is already closed, an HTLC fail may have completed on chain and is no longer "pending" to the monitor, but the fail event is still in the monitor waiting to be handed back to the `ChannelMonitor` when polled. --- lightning/src/chain/channelmonitor.rs | 108 +++++++++++++++----------- 1 file changed, 62 insertions(+), 46 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index acca4ca8f..5f08c24b7 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1828,12 +1828,60 @@ impl ChannelMonitor { res } + /// Gets the set of outbound HTLCs which can be (or have been) resolved by this + /// `ChannelMonitor`. This is used to determine if an HTLC was removed from the channel prior + /// to the `ChannelManager` having been persisted. + /// + /// This is similar to [`Self::get_pending_outbound_htlcs`] except it includes HTLCs which were + /// resolved by this `ChannelMonitor`. + pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap { + let mut res = HashMap::new(); + // Just examine the available counterparty commitment transactions. See docs on + // `fail_unbroadcast_htlcs`, below, for justification. + let us = self.inner.lock().unwrap(); + macro_rules! walk_counterparty_commitment { + ($txid: expr) => { + if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) { + for &(ref htlc, ref source_option) in latest_outpoints.iter() { + if let &Some(ref source) = source_option { + res.insert((**source).clone(), htlc.clone()); + } + } + } + } + } + if let Some(ref txid) = us.current_counterparty_commitment_txid { + walk_counterparty_commitment!(txid); + } + if let Some(ref txid) = us.prev_counterparty_commitment_txid { + walk_counterparty_commitment!(txid); + } + res + } + /// Gets the set of outbound HTLCs which are pending resolution in this channel. /// This is used to reconstruct pending outbound payments on restart in the ChannelManager. pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap { - let mut res = HashMap::new(); let us = self.inner.lock().unwrap(); + // We're only concerned with the confirmation count of HTLC transactions, and don't + // actually care how many confirmations a commitment transaction may or may not have. Thus, + // we look for either a FundingSpendConfirmation event or a funding_spend_confirmed. + let confirmed_txid = us.funding_spend_confirmed.or_else(|| { + us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| { + if let OnchainEvent::FundingSpendConfirmation { .. } = event.event { + Some(event.txid) + } else { None } + }) + }); + if confirmed_txid.is_none() { + // If we have not seen a commitment transaction on-chain (ie the channel is not yet + // closed), just get the full set. + mem::drop(us); + return self.get_all_current_outbound_htlcs(); + } + + let mut res = HashMap::new(); macro_rules! walk_htlcs { ($holder_commitment: expr, $htlc_iter: expr) => { for (htlc, source) in $htlc_iter { @@ -1869,54 +1917,22 @@ impl ChannelMonitor { } } - // We're only concerned with the confirmation count of HTLC transactions, and don't - // actually care how many confirmations a commitment transaction may or may not have. Thus, - // we look for either a FundingSpendConfirmation event or a funding_spend_confirmed. - let confirmed_txid = us.funding_spend_confirmed.or_else(|| { - us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| { - if let OnchainEvent::FundingSpendConfirmation { .. } = event.event { - Some(event.txid) + let txid = confirmed_txid.unwrap(); + if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid { + walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| { + if let &Some(ref source) = b { + Some((a, &**source)) } else { None } - }) - }); - if let Some(txid) = confirmed_txid { - if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid { - walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| { - if let &Some(ref source) = b { - Some((a, &**source)) - } else { None } - })); - } else if txid == us.current_holder_commitment_tx.txid { - walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| { + })); + } else if txid == us.current_holder_commitment_tx.txid { + walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| { + if let Some(source) = c { Some((a, source)) } else { None } + })); + } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx { + if txid == prev_commitment.txid { + walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| { if let Some(source) = c { Some((a, source)) } else { None } })); - } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx { - if txid == prev_commitment.txid { - walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| { - if let Some(source) = c { Some((a, source)) } else { None } - })); - } - } - } else { - // If we have not seen a commitment transaction on-chain (ie the channel is not yet - // closed), just examine the available counterparty commitment transactions. See docs - // on `fail_unbroadcast_htlcs`, below, for justification. - macro_rules! walk_counterparty_commitment { - ($txid: expr) => { - if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) { - for &(ref htlc, ref source_option) in latest_outpoints.iter() { - if let &Some(ref source) = source_option { - res.insert((**source).clone(), htlc.clone()); - } - } - } - } - } - if let Some(ref txid) = us.current_counterparty_commitment_txid { - walk_counterparty_commitment!(txid); - } - if let Some(ref txid) = us.prev_counterparty_commitment_txid { - walk_counterparty_commitment!(txid); } } From 0bb87ddad71d2e33199ebad79e9f709f869f2130 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 17 Nov 2022 05:55:45 +0000 Subject: [PATCH 2/3] Avoid attempting to forward to a closed chan on stale-data reload If, after forwarding a payment to our counterparty, we restart with a ChannelMonitor update having been persisted, but the corresponding ChannelManager update was not persisted, we'll still have the forwarded HTLC in the `forward_htlcs` map on start. This will cause us to generate a (spurious) `PendingHTLCsForwardable` event. However, when we go to forward said HTLC, we'll notice the channel has been closed and leave it up to the `ChannelMontior` to finalize the HTLC. This is all fine today - we won't lose any funds, we'll just generate an excess forwardable event and then fail to forward. However, in the future when we allow for forward-time channel changes this could break. Thus, its worth adding tests for this behavior today, and, while we're at it, removing the spurious forwardable HTLCs event. --- lightning/src/ln/channelmanager.rs | 44 ++++++-- lightning/src/ln/functional_test_utils.rs | 6 +- lightning/src/ln/reload_tests.rs | 124 +++++++++++++++++++++- 3 files changed, 160 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 352672647..2d84bb9cd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7286,16 +7286,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } - if forward_htlcs_count > 0 { - // If we have pending HTLCs to forward, assume we either dropped a - // `PendingHTLCsForwardable` or the user received it but never processed it as they - // shut down before the timer hit. Either way, set the time_forwardable to a small - // constant as enough time has likely passed that we should simply handle the forwards - // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push(events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }); - } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -7404,10 +7394,44 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } } + for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { + if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, we were + // apparently not persisted after the monitor was when forwarding + // the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id && + htlc_info.prev_htlc_id == prev_hop_data.htlc_id + { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }) + } + } } } } + if !forward_htlcs.is_empty() { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 677fdccfd..bc7f62775 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1091,7 +1091,7 @@ macro_rules! check_closed_event { use $crate::util::events::Event; let events = $node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), $events); + assert_eq!(events.len(), $events, "{:?}", events); let expected_reason = $reason; let mut issues_discard_funding = false; for event in events { @@ -1350,7 +1350,7 @@ macro_rules! expect_pending_htlcs_forwardable_conditions { let events = $node.node.get_and_clear_pending_events(); match events[0] { $crate::util::events::Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {:?}", events), }; let count = expected_failures.len() + 1; @@ -1560,7 +1560,7 @@ macro_rules! expect_payment_forwarded { if !$downstream_force_closed { assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap())); } - assert_eq!(claim_from_onchain_tx, $upstream_force_closed); + assert_eq!(claim_from_onchain_tx, $downstream_force_closed); }, _ => panic!("Unexpected event"), } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index dc3d53922..7e10f970a 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -10,14 +10,16 @@ //! Functional tests which test for correct behavior across node restarts. use crate::chain::{ChannelMonitorUpdateStatus, Watch}; +use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::ChannelMonitor; +use crate::chain::keysinterface::KeysInterface; use crate::chain::transaction::OutPoint; use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::test_utils; -use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; +use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; @@ -811,3 +813,123 @@ fn test_partial_claim_before_restart() { do_test_partial_claim_before_restart(false); do_test_partial_claim_before_restart(true); } + +fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool) { + if !use_cs_commitment { assert!(!claim_htlc); } + // If we go to forward a payment, and the ChannelMonitor persistence completes, but the + // ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail + // it back until the ChannelMonitor decides the fate of the HTLC. + // This was never an issue, but it may be easy to regress here going forward. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + + let persister; + let new_chain_monitor; + let nodes_1_deserialized; + + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap(); + check_added_monitors!(nodes[0], 1); + + let payment_event = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + let node_encoded = nodes[1].node.encode(); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let payment_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors!(nodes[2], 1); + + if claim_htlc { + get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage, + &nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger); + } + assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + + let _ = nodes[2].node.get_and_clear_pending_msg_events(); + + nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap(); + let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); + + check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); + check_closed_broadcast!(nodes[2], true); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager); + + let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_commitment_tx.len(), 1); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + if use_cs_commitment { + // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait + // for an HTLC-spending transaction before it does anything with the HTLC upstream. + confirm_transaction(&nodes[1], &cs_commitment_tx[0]); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if claim_htlc { + confirm_transaction(&nodes[1], &cs_commitment_tx[1]); + } else { + connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); + let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_htlc_timeout_tx.len(), 1); + confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]); + } + } else { + confirm_transaction(&nodes[1], &bs_commitment_tx[0]); + } + + if !claim_htlc { + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + } else { + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); + } + check_added_monitors!(nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => { + if claim_htlc { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); + } else { + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + } + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + if claim_htlc { + expect_payment_sent!(nodes[0], payment_preimage); + } else { + expect_payment_failed!(nodes[0], payment_hash, false); + } +} + +#[test] +fn forwarded_payment_no_manager_persistence() { + do_forwarded_payment_no_manager_persistence(true, true); + do_forwarded_payment_no_manager_persistence(true, false); + do_forwarded_payment_no_manager_persistence(false, false); +} From dbe4aadb8958b20aac0225cb17c7faa3fab31386 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 16 Nov 2022 02:20:03 +0000 Subject: [PATCH 3/3] Fail HTLCs which were removed from a channel but not persisted When a channel is force-closed, if a `ChannelMonitor` update is completed but a `ChannelManager` persist has not yet happened, HTLCs which were removed in the latest (persisted) `ChannelMonitor` update will not be failed even though they do not appear in the commitment transaction which went on chain. This is because the `ChannelManager` thinks the `ChannelMonitor` is responsible for them (as it is stale), but the `ChannelMonitor` has no knowledge of the HTLC at all (as it is not stale). The fix for this is relatively simple - we need to check for this specific case and fail back such HTLCs when deserializing a `ChannelManager` --- lightning/src/ln/channel.rs | 9 ++-- lightning/src/ln/channelmanager.rs | 27 +++++++++++- lightning/src/ln/reload_tests.rs | 69 ++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 95ae69877..561b398b9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5951,15 +5951,16 @@ impl Channel { (monitor_update, dropped_outbound_htlcs) } - pub fn inflight_htlc_sources(&self) -> impl Iterator { + pub fn inflight_htlc_sources(&self) -> impl Iterator { self.holding_cell_htlc_updates.iter() .flat_map(|htlc_update| { match htlc_update { - HTLCUpdateAwaitingACK::AddHTLC { source, .. } => { Some(source) } - _ => None + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } + => Some((source, payment_hash)), + _ => None, } }) - .chain(self.pending_outbound_htlcs.iter().map(|htlc| &htlc.source)) + .chain(self.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2d84bb9cd..7f65719a5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5728,7 +5728,7 @@ impl ChannelManager ChannelManager Option { + let mut events = self.pending_events.lock().unwrap(); + if events.is_empty() { None } else { Some(events.remove(0)) } + } + #[cfg(test)] pub fn has_pending_payments(&self) -> bool { !self.pending_outbound_payments.lock().unwrap().is_empty() @@ -7206,6 +7212,25 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> user_channel_id: channel.get_user_id(), reason: ClosureReason::OutdatedChannelManager }); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + log_info!(args.logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + } + } } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 7e10f970a..881853151 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -933,3 +933,72 @@ fn forwarded_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(true, false); do_forwarded_payment_no_manager_persistence(false, false); } + +#[test] +fn removed_payment_no_manager_persistence() { + // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but + // the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is + // still failed back to the previous hop even though the ChannelMonitor now no longer is aware + // of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs + // were left dangling when a channel was force-closed due to a stale ChannelManager. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + + let persister; + let new_chain_monitor; + let nodes_1_deserialized; + + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + + let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + + let node_encoded = nodes[1].node.encode(); + + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => { + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + match nodes[1].node.pop_pending_event().unwrap() { + Event::ChannelClosed { ref reason, .. } => { + assert_eq!(*reason, ClosureReason::OutdatedChannelManager); + }, + _ => panic!("Unexpected event"), + } + + // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is + // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload, + // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + check_added_monitors!(nodes[1], 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => { + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + expect_payment_failed!(nodes[0], payment_hash, false); +}