mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-24 23:08:36 +01:00
Merge pull request #1857 from TheBlueMatt/2022-11-reload-htlc
Fail HTLCs which were removed from a channel but not persisted
This commit is contained in:
commit
f4ab077a69
5 changed files with 322 additions and 65 deletions
|
@ -1837,12 +1837,60 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
|
|||
res
|
||||
}
|
||||
|
||||
/// Gets the set of outbound HTLCs which can be (or have been) resolved by this
|
||||
/// `ChannelMonitor`. This is used to determine if an HTLC was removed from the channel prior
|
||||
/// to the `ChannelManager` having been persisted.
|
||||
///
|
||||
/// This is similar to [`Self::get_pending_outbound_htlcs`] except it includes HTLCs which were
|
||||
/// resolved by this `ChannelMonitor`.
|
||||
pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
|
||||
let mut res = HashMap::new();
|
||||
// Just examine the available counterparty commitment transactions. See docs on
|
||||
// `fail_unbroadcast_htlcs`, below, for justification.
|
||||
let us = self.inner.lock().unwrap();
|
||||
macro_rules! walk_counterparty_commitment {
|
||||
($txid: expr) => {
|
||||
if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
|
||||
for &(ref htlc, ref source_option) in latest_outpoints.iter() {
|
||||
if let &Some(ref source) = source_option {
|
||||
res.insert((**source).clone(), htlc.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(ref txid) = us.current_counterparty_commitment_txid {
|
||||
walk_counterparty_commitment!(txid);
|
||||
}
|
||||
if let Some(ref txid) = us.prev_counterparty_commitment_txid {
|
||||
walk_counterparty_commitment!(txid);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Gets the set of outbound HTLCs which are pending resolution in this channel.
|
||||
/// This is used to reconstruct pending outbound payments on restart in the ChannelManager.
|
||||
pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
|
||||
let mut res = HashMap::new();
|
||||
let us = self.inner.lock().unwrap();
|
||||
// We're only concerned with the confirmation count of HTLC transactions, and don't
|
||||
// actually care how many confirmations a commitment transaction may or may not have. Thus,
|
||||
// we look for either a FundingSpendConfirmation event or a funding_spend_confirmed.
|
||||
let confirmed_txid = us.funding_spend_confirmed.or_else(|| {
|
||||
us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
|
||||
if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
|
||||
Some(event.txid)
|
||||
} else { None }
|
||||
})
|
||||
});
|
||||
|
||||
if confirmed_txid.is_none() {
|
||||
// If we have not seen a commitment transaction on-chain (ie the channel is not yet
|
||||
// closed), just get the full set.
|
||||
mem::drop(us);
|
||||
return self.get_all_current_outbound_htlcs();
|
||||
}
|
||||
|
||||
let mut res = HashMap::new();
|
||||
macro_rules! walk_htlcs {
|
||||
($holder_commitment: expr, $htlc_iter: expr) => {
|
||||
for (htlc, source) in $htlc_iter {
|
||||
|
@ -1878,54 +1926,22 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
|
|||
}
|
||||
}
|
||||
|
||||
// We're only concerned with the confirmation count of HTLC transactions, and don't
|
||||
// actually care how many confirmations a commitment transaction may or may not have. Thus,
|
||||
// we look for either a FundingSpendConfirmation event or a funding_spend_confirmed.
|
||||
let confirmed_txid = us.funding_spend_confirmed.or_else(|| {
|
||||
us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
|
||||
if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
|
||||
Some(event.txid)
|
||||
let txid = confirmed_txid.unwrap();
|
||||
if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
|
||||
walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| {
|
||||
if let &Some(ref source) = b {
|
||||
Some((a, &**source))
|
||||
} else { None }
|
||||
})
|
||||
});
|
||||
if let Some(txid) = confirmed_txid {
|
||||
if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
|
||||
walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| {
|
||||
if let &Some(ref source) = b {
|
||||
Some((a, &**source))
|
||||
} else { None }
|
||||
}));
|
||||
} else if txid == us.current_holder_commitment_tx.txid {
|
||||
walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| {
|
||||
}));
|
||||
} else if txid == us.current_holder_commitment_tx.txid {
|
||||
walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| {
|
||||
if let Some(source) = c { Some((a, source)) } else { None }
|
||||
}));
|
||||
} else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
|
||||
if txid == prev_commitment.txid {
|
||||
walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| {
|
||||
if let Some(source) = c { Some((a, source)) } else { None }
|
||||
}));
|
||||
} else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
|
||||
if txid == prev_commitment.txid {
|
||||
walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| {
|
||||
if let Some(source) = c { Some((a, source)) } else { None }
|
||||
}));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If we have not seen a commitment transaction on-chain (ie the channel is not yet
|
||||
// closed), just examine the available counterparty commitment transactions. See docs
|
||||
// on `fail_unbroadcast_htlcs`, below, for justification.
|
||||
macro_rules! walk_counterparty_commitment {
|
||||
($txid: expr) => {
|
||||
if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
|
||||
for &(ref htlc, ref source_option) in latest_outpoints.iter() {
|
||||
if let &Some(ref source) = source_option {
|
||||
res.insert((**source).clone(), htlc.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(ref txid) = us.current_counterparty_commitment_txid {
|
||||
walk_counterparty_commitment!(txid);
|
||||
}
|
||||
if let Some(ref txid) = us.prev_counterparty_commitment_txid {
|
||||
walk_counterparty_commitment!(txid);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5922,15 +5922,16 @@ impl<Signer: Sign> Channel<Signer> {
|
|||
(monitor_update, dropped_outbound_htlcs)
|
||||
}
|
||||
|
||||
pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=&HTLCSource> {
|
||||
pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
|
||||
self.holding_cell_htlc_updates.iter()
|
||||
.flat_map(|htlc_update| {
|
||||
match htlc_update {
|
||||
HTLCUpdateAwaitingACK::AddHTLC { source, .. } => { Some(source) }
|
||||
_ => None
|
||||
HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
|
||||
=> Some((source, payment_hash)),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.chain(self.pending_outbound_htlcs.iter().map(|htlc| &htlc.source))
|
||||
.chain(self.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5909,7 +5909,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
|
|||
let mut inflight_htlcs = InFlightHtlcs::new();
|
||||
|
||||
for chan in self.channel_state.lock().unwrap().by_id.values() {
|
||||
for htlc_source in chan.inflight_htlc_sources() {
|
||||
for (htlc_source, _) in chan.inflight_htlc_sources() {
|
||||
if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
|
||||
inflight_htlcs.process_path(path, self.get_our_node_id());
|
||||
}
|
||||
|
@ -5927,6 +5927,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
|
|||
events.into_inner()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn pop_pending_event(&self) -> Option<events::Event> {
|
||||
let mut events = self.pending_events.lock().unwrap();
|
||||
if events.is_empty() { None } else { Some(events.remove(0)) }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn has_pending_payments(&self) -> bool {
|
||||
!self.pending_outbound_payments.lock().unwrap().is_empty()
|
||||
|
@ -7420,6 +7426,25 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|||
user_channel_id: channel.get_user_id(),
|
||||
reason: ClosureReason::OutdatedChannelManager
|
||||
});
|
||||
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
|
||||
let mut found_htlc = false;
|
||||
for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
|
||||
if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
|
||||
}
|
||||
if !found_htlc {
|
||||
// If we have some HTLCs in the channel which are not present in the newer
|
||||
// ChannelMonitor, they have been removed and should be failed back to
|
||||
// ensure we don't forget them entirely. Note that if the missing HTLC(s)
|
||||
// were actually claimed we'd have generated and ensured the previous-hop
|
||||
// claim update ChannelMonitor updates were persisted prior to persising
|
||||
// the ChannelMonitor update for the forward leg, so attempting to fail the
|
||||
// backwards leg of the HTLC will simply be rejected.
|
||||
log_info!(args.logger,
|
||||
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
|
||||
log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
|
||||
failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
|
||||
if let Some(short_channel_id) = channel.get_short_channel_id() {
|
||||
|
@ -7500,16 +7525,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|||
None => continue,
|
||||
}
|
||||
}
|
||||
if forward_htlcs_count > 0 {
|
||||
// If we have pending HTLCs to forward, assume we either dropped a
|
||||
// `PendingHTLCsForwardable` or the user received it but never processed it as they
|
||||
// shut down before the timer hit. Either way, set the time_forwardable to a small
|
||||
// constant as enough time has likely passed that we should simply handle the forwards
|
||||
// now, or at least after the user gets a chance to reconnect to our peers.
|
||||
pending_events_read.push(events::Event::PendingHTLCsForwardable {
|
||||
time_forwardable: Duration::from_secs(2),
|
||||
});
|
||||
}
|
||||
|
||||
let background_event_count: u64 = Readable::read(reader)?;
|
||||
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
|
||||
|
@ -7620,10 +7635,44 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|||
}
|
||||
}
|
||||
}
|
||||
for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
|
||||
if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
|
||||
// The ChannelMonitor is now responsible for this HTLC's
|
||||
// failure/success and will let us know what its outcome is. If we
|
||||
// still have an entry for this HTLC in `forward_htlcs`, we were
|
||||
// apparently not persisted after the monitor was when forwarding
|
||||
// the payment.
|
||||
forward_htlcs.retain(|_, forwards| {
|
||||
forwards.retain(|forward| {
|
||||
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
|
||||
if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id &&
|
||||
htlc_info.prev_htlc_id == prev_hop_data.htlc_id
|
||||
{
|
||||
log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
|
||||
log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
|
||||
false
|
||||
} else { true }
|
||||
} else { true }
|
||||
});
|
||||
!forwards.is_empty()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !forward_htlcs.is_empty() {
|
||||
// If we have pending HTLCs to forward, assume we either dropped a
|
||||
// `PendingHTLCsForwardable` or the user received it but never processed it as they
|
||||
// shut down before the timer hit. Either way, set the time_forwardable to a small
|
||||
// constant as enough time has likely passed that we should simply handle the forwards
|
||||
// now, or at least after the user gets a chance to reconnect to our peers.
|
||||
pending_events_read.push(events::Event::PendingHTLCsForwardable {
|
||||
time_forwardable: Duration::from_secs(2),
|
||||
});
|
||||
}
|
||||
|
||||
let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
|
||||
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
|
||||
|
||||
|
|
|
@ -1127,7 +1127,7 @@ macro_rules! check_closed_event {
|
|||
use $crate::util::events::Event;
|
||||
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), $events);
|
||||
assert_eq!(events.len(), $events, "{:?}", events);
|
||||
let expected_reason = $reason;
|
||||
let mut issues_discard_funding = false;
|
||||
for event in events {
|
||||
|
@ -1386,7 +1386,7 @@ macro_rules! expect_pending_htlcs_forwardable_conditions {
|
|||
let events = $node.node.get_and_clear_pending_events();
|
||||
match events[0] {
|
||||
$crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
_ => panic!("Unexpected event {:?}", events),
|
||||
};
|
||||
|
||||
let count = expected_failures.len() + 1;
|
||||
|
@ -1596,7 +1596,7 @@ macro_rules! expect_payment_forwarded {
|
|||
if !$downstream_force_closed {
|
||||
assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
|
||||
}
|
||||
assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
|
||||
assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
|
|
@ -10,14 +10,16 @@
|
|||
//! Functional tests which test for correct behavior across node restarts.
|
||||
|
||||
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
|
||||
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
|
||||
use crate::chain::channelmonitor::ChannelMonitor;
|
||||
use crate::chain::keysinterface::KeysInterface;
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
|
||||
use crate::ln::msgs;
|
||||
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
|
||||
use crate::util::enforcing_trait_impls::EnforcingSigner;
|
||||
use crate::util::test_utils;
|
||||
use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
|
||||
use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
|
||||
use crate::util::ser::{Writeable, ReadableArgs};
|
||||
use crate::util::config::UserConfig;
|
||||
|
||||
|
@ -811,3 +813,192 @@ fn test_partial_claim_before_restart() {
|
|||
do_test_partial_claim_before_restart(false);
|
||||
do_test_partial_claim_before_restart(true);
|
||||
}
|
||||
|
||||
fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool) {
|
||||
if !use_cs_commitment { assert!(!claim_htlc); }
|
||||
// If we go to forward a payment, and the ChannelMonitor persistence completes, but the
|
||||
// ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail
|
||||
// it back until the ChannelMonitor decides the fate of the HTLC.
|
||||
// This was never an issue, but it may be easy to regress here going forward.
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
|
||||
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
|
||||
|
||||
let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
|
||||
let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
|
||||
let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
|
||||
nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
|
||||
let payment_event = SendEvent::from_node(&nodes[0]);
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
|
||||
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
|
||||
|
||||
let node_encoded = nodes[1].node.encode();
|
||||
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
|
||||
let payment_event = SendEvent::from_node(&nodes[1]);
|
||||
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
|
||||
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
if claim_htlc {
|
||||
get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
|
||||
&nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
|
||||
}
|
||||
assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
|
||||
|
||||
let _ = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
|
||||
nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
|
||||
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
|
||||
check_closed_broadcast!(nodes[2], true);
|
||||
|
||||
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
|
||||
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
|
||||
reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
|
||||
|
||||
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(bs_commitment_tx.len(), 1);
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
|
||||
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
||||
|
||||
if use_cs_commitment {
|
||||
// If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
|
||||
// for an HTLC-spending transaction before it does anything with the HTLC upstream.
|
||||
confirm_transaction(&nodes[1], &cs_commitment_tx[0]);
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
if claim_htlc {
|
||||
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
|
||||
} else {
|
||||
connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
|
||||
let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(bs_htlc_timeout_tx.len(), 1);
|
||||
confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
|
||||
}
|
||||
} else {
|
||||
confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
|
||||
}
|
||||
|
||||
if !claim_htlc {
|
||||
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
|
||||
} else {
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true);
|
||||
}
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match &events[0] {
|
||||
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => {
|
||||
if claim_htlc {
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
|
||||
} else {
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
|
||||
}
|
||||
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
if claim_htlc {
|
||||
expect_payment_sent!(nodes[0], payment_preimage);
|
||||
} else {
|
||||
expect_payment_failed!(nodes[0], payment_hash, false);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarded_payment_no_manager_persistence() {
|
||||
do_forwarded_payment_no_manager_persistence(true, true);
|
||||
do_forwarded_payment_no_manager_persistence(true, false);
|
||||
do_forwarded_payment_no_manager_persistence(false, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn removed_payment_no_manager_persistence() {
|
||||
// If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but
|
||||
// the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is
|
||||
// still failed back to the previous hop even though the ChannelMonitor now no longer is aware
|
||||
// of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs
|
||||
// were left dangling when a channel was force-closed due to a stale ChannelManager.
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
|
||||
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
|
||||
|
||||
let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
|
||||
|
||||
let node_encoded = nodes[1].node.encode();
|
||||
|
||||
nodes[2].node.fail_htlc_backwards(&payment_hash);
|
||||
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
let events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match &events[0] {
|
||||
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
|
||||
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]);
|
||||
commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
|
||||
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
|
||||
reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
match nodes[1].node.pop_pending_event().unwrap() {
|
||||
Event::ChannelClosed { ref reason, .. } => {
|
||||
assert_eq!(*reason, ClosureReason::OutdatedChannelManager);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
// Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
|
||||
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
|
||||
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
|
||||
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
||||
|
||||
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match &events[0] {
|
||||
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
|
||||
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
expect_payment_failed!(nodes[0], payment_hash, false);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue