mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-24 23:08:36 +01:00
Avoid attempting to forward to a closed chan on stale-data reload
If, after forwarding a payment to our counterparty, we restart with a ChannelMonitor update having been persisted, but the corresponding ChannelManager update was not persisted, we'll still have the forwarded HTLC in the `forward_htlcs` map on start. This will cause us to generate a (spurious) `PendingHTLCsForwardable` event. However, when we go to forward said HTLC, we'll notice the channel has been closed and leave it up to the `ChannelMontior` to finalize the HTLC. This is all fine today - we won't lose any funds, we'll just generate an excess forwardable event and then fail to forward. However, in the future when we allow for forward-time channel changes this could break. Thus, its worth adding tests for this behavior today, and, while we're at it, removing the spurious forwardable HTLCs event.
This commit is contained in:
parent
e142f67917
commit
0bb87ddad7
3 changed files with 160 additions and 14 deletions
|
@ -7286,16 +7286,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|||
None => continue,
|
||||
}
|
||||
}
|
||||
if forward_htlcs_count > 0 {
|
||||
// If we have pending HTLCs to forward, assume we either dropped a
|
||||
// `PendingHTLCsForwardable` or the user received it but never processed it as they
|
||||
// shut down before the timer hit. Either way, set the time_forwardable to a small
|
||||
// constant as enough time has likely passed that we should simply handle the forwards
|
||||
// now, or at least after the user gets a chance to reconnect to our peers.
|
||||
pending_events_read.push(events::Event::PendingHTLCsForwardable {
|
||||
time_forwardable: Duration::from_secs(2),
|
||||
});
|
||||
}
|
||||
|
||||
let background_event_count: u64 = Readable::read(reader)?;
|
||||
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
|
||||
|
@ -7404,9 +7394,43 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
|||
}
|
||||
}
|
||||
}
|
||||
for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
|
||||
if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
|
||||
// The ChannelMonitor is now responsible for this HTLC's
|
||||
// failure/success and will let us know what its outcome is. If we
|
||||
// still have an entry for this HTLC in `forward_htlcs`, we were
|
||||
// apparently not persisted after the monitor was when forwarding
|
||||
// the payment.
|
||||
forward_htlcs.retain(|_, forwards| {
|
||||
forwards.retain(|forward| {
|
||||
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
|
||||
if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id &&
|
||||
htlc_info.prev_htlc_id == prev_hop_data.htlc_id
|
||||
{
|
||||
log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
|
||||
log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
|
||||
false
|
||||
} else { true }
|
||||
} else { true }
|
||||
});
|
||||
!forwards.is_empty()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !forward_htlcs.is_empty() {
|
||||
// If we have pending HTLCs to forward, assume we either dropped a
|
||||
// `PendingHTLCsForwardable` or the user received it but never processed it as they
|
||||
// shut down before the timer hit. Either way, set the time_forwardable to a small
|
||||
// constant as enough time has likely passed that we should simply handle the forwards
|
||||
// now, or at least after the user gets a chance to reconnect to our peers.
|
||||
pending_events_read.push(events::Event::PendingHTLCsForwardable {
|
||||
time_forwardable: Duration::from_secs(2),
|
||||
});
|
||||
}
|
||||
|
||||
let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
|
||||
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
|
||||
|
|
|
@ -1091,7 +1091,7 @@ macro_rules! check_closed_event {
|
|||
use $crate::util::events::Event;
|
||||
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), $events);
|
||||
assert_eq!(events.len(), $events, "{:?}", events);
|
||||
let expected_reason = $reason;
|
||||
let mut issues_discard_funding = false;
|
||||
for event in events {
|
||||
|
@ -1350,7 +1350,7 @@ macro_rules! expect_pending_htlcs_forwardable_conditions {
|
|||
let events = $node.node.get_and_clear_pending_events();
|
||||
match events[0] {
|
||||
$crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
_ => panic!("Unexpected event {:?}", events),
|
||||
};
|
||||
|
||||
let count = expected_failures.len() + 1;
|
||||
|
@ -1560,7 +1560,7 @@ macro_rules! expect_payment_forwarded {
|
|||
if !$downstream_force_closed {
|
||||
assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
|
||||
}
|
||||
assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
|
||||
assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
|
|
@ -10,14 +10,16 @@
|
|||
//! Functional tests which test for correct behavior across node restarts.
|
||||
|
||||
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
|
||||
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
|
||||
use crate::chain::channelmonitor::ChannelMonitor;
|
||||
use crate::chain::keysinterface::KeysInterface;
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
|
||||
use crate::ln::msgs;
|
||||
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
|
||||
use crate::util::enforcing_trait_impls::EnforcingSigner;
|
||||
use crate::util::test_utils;
|
||||
use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
|
||||
use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
|
||||
use crate::util::ser::{Writeable, ReadableArgs};
|
||||
use crate::util::config::UserConfig;
|
||||
|
||||
|
@ -811,3 +813,123 @@ fn test_partial_claim_before_restart() {
|
|||
do_test_partial_claim_before_restart(false);
|
||||
do_test_partial_claim_before_restart(true);
|
||||
}
|
||||
|
||||
fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool) {
|
||||
if !use_cs_commitment { assert!(!claim_htlc); }
|
||||
// If we go to forward a payment, and the ChannelMonitor persistence completes, but the
|
||||
// ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail
|
||||
// it back until the ChannelMonitor decides the fate of the HTLC.
|
||||
// This was never an issue, but it may be easy to regress here going forward.
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
|
||||
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
|
||||
|
||||
let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
|
||||
let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
|
||||
let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
|
||||
nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
|
||||
let payment_event = SendEvent::from_node(&nodes[0]);
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
|
||||
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
|
||||
|
||||
let node_encoded = nodes[1].node.encode();
|
||||
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
|
||||
let payment_event = SendEvent::from_node(&nodes[1]);
|
||||
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
|
||||
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
if claim_htlc {
|
||||
get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
|
||||
&nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
|
||||
}
|
||||
assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
|
||||
|
||||
let _ = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
|
||||
nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
|
||||
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
|
||||
check_closed_broadcast!(nodes[2], true);
|
||||
|
||||
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
|
||||
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
|
||||
reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
|
||||
|
||||
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(bs_commitment_tx.len(), 1);
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
|
||||
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
||||
|
||||
if use_cs_commitment {
|
||||
// If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
|
||||
// for an HTLC-spending transaction before it does anything with the HTLC upstream.
|
||||
confirm_transaction(&nodes[1], &cs_commitment_tx[0]);
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
if claim_htlc {
|
||||
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
|
||||
} else {
|
||||
connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
|
||||
let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(bs_htlc_timeout_tx.len(), 1);
|
||||
confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
|
||||
}
|
||||
} else {
|
||||
confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
|
||||
}
|
||||
|
||||
if !claim_htlc {
|
||||
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
|
||||
} else {
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true);
|
||||
}
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match &events[0] {
|
||||
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => {
|
||||
if claim_htlc {
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
|
||||
} else {
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
|
||||
}
|
||||
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
if claim_htlc {
|
||||
expect_payment_sent!(nodes[0], payment_preimage);
|
||||
} else {
|
||||
expect_payment_failed!(nodes[0], payment_hash, false);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarded_payment_no_manager_persistence() {
|
||||
do_forwarded_payment_no_manager_persistence(true, true);
|
||||
do_forwarded_payment_no_manager_persistence(true, false);
|
||||
do_forwarded_payment_no_manager_persistence(false, false);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue