Merge pull request #2112 from TheBlueMatt/2023-02-sent-persist-order

Delay RAA-after-next processing until PaymentSent is are handled
This commit is contained in:
Matt Corallo 2023-08-21 18:02:56 +00:00 committed by GitHub
commit cb923c604e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 327 additions and 106 deletions

View file

@ -1373,22 +1373,7 @@ mod test {
assert_eq!(other_events.borrow().len(), 1);
check_payment_claimable(&other_events.borrow()[0], payment_hash, payment_secret, payment_amt, payment_preimage_opt, invoice.recover_payee_pub_key());
do_claim_payment_along_route(&nodes[0], &[&vec!(&nodes[fwd_idx])[..]], false, payment_preimage);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => {
assert_eq!(payment_preimage, *ev_preimage);
assert_eq!(payment_hash, *ev_hash);
assert_eq!(fee_paid_msat, &Some(0));
},
_ => panic!("Unexpected event")
}
match events[1] {
Event::PaymentPathSuccessful { payment_hash: hash, .. } => {
assert_eq!(hash, Some(payment_hash));
},
_ => panic!("Unexpected event")
}
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
}
#[test]

View file

@ -805,7 +805,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
#[cfg(test)]
mod tests {
use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
use crate::{expect_payment_claimed, expect_payment_path_successful, get_event_msg};
use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
@ -888,7 +888,7 @@ mod tests {
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@ -901,7 +901,7 @@ mod tests {
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
@ -985,7 +985,7 @@ mod tests {
}
}
expect_payment_sent!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
}
#[test]

View file

@ -507,6 +507,11 @@ pub enum Event {
/// payment is no longer retryable, due either to the [`Retry`] provided or
/// [`ChannelManager::abandon_payment`] having been called for the corresponding payment.
///
/// In exceedingly rare cases, it is possible that an [`Event::PaymentFailed`] is generated for
/// a payment after an [`Event::PaymentSent`] event for this same payment has already been
/// received and processed. In this case, the [`Event::PaymentFailed`] event MUST be ignored,
/// and the payment MUST be treated as having succeeded.
///
/// [`Retry`]: crate::ln::channelmanager::Retry
/// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
PaymentFailed {

View file

@ -1403,6 +1403,7 @@ fn claim_while_disconnected_monitor_update_fail() {
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
@ -1440,7 +1441,7 @@ fn claim_while_disconnected_monitor_update_fail() {
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
expect_payment_sent!(nodes[0], payment_preimage_1);
expect_payment_path_successful!(nodes[0]);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
}
@ -2196,7 +2197,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
expect_payment_path_successful!(nodes[0]);
}
@ -2449,7 +2450,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
assert!(updates.update_fee.is_none());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false);
assert_eq!(updates.update_add_htlcs.len(), 1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
updates.commitment_signed
@ -2466,7 +2467,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
check_added_monitors!(nodes[1], 1);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
@ -2567,7 +2568,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
if htlc_status == HTLCStatusAtDupClaim::Cleared {
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
expect_payment_path_successful!(nodes[0]);
@ -2598,7 +2599,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
}
if htlc_status != HTLCStatusAtDupClaim::Cleared {
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
@ -2797,7 +2798,7 @@ fn double_temp_error() {
assert_eq!(node_id, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
check_added_monitors!(nodes[0], 0);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
check_added_monitors!(nodes[0], 1);
nodes[0].node.process_pending_htlc_forwards();
@ -3024,3 +3025,67 @@ fn test_inbound_reload_without_init_mon() {
do_test_inbound_reload_without_init_mon(false, true);
do_test_inbound_reload_without_init_mon(false, false);
}
#[test]
fn test_blocked_chan_preimage_release() {
// Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
// be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1).2;
create_announced_chan_between_nodes(&nodes, 1, 2).2;
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
// Tee up two payments in opposite directions across nodes[1], one it sent to generate a
// PaymentSent event and one it forwards.
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
// Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
nodes[2].node.claim_funds(payment_preimage_1);
check_added_monitors(&nodes[2], 1);
expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false);
check_added_monitors(&nodes[1], 0);
// Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to
// claim an HTLC on its channel with nodes[2], but that channel is blocked on the above
// `PaymentSent` event.
nodes[0].node.claim_funds(payment_preimage_2);
check_added_monitors(&nodes[0], 1);
expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);
let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Finish the CS dance between nodes[0] and nodes[1].
do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
check_added_monitors(&nodes[1], 0);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 3);
if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
// The event processing should release the last RAA update.
check_added_monitors(&nodes[1], 1);
// When we fetch the next update the message getter will generate the next update for nodes[2],
// generating a further monitor update.
let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
check_added_monitors(&nodes[1], 1);
nodes[2].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
}

View file

@ -3201,7 +3201,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger,
{
@ -3382,6 +3382,22 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
}
}
let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
let release_state_str =
if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
macro_rules! return_with_htlcs_to_fail {
($htlcs_to_fail: expr) => {
if !release_monitor {
self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
update: monitor_update,
});
return Ok(($htlcs_to_fail, None));
} else {
return Ok(($htlcs_to_fail, Some(monitor_update)));
}
}
}
if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
// We can't actually generate a new commitment transaction (incl by freeing holding
// cells) while we can't update the monitor, so we just return what we have.
@ -3400,7 +3416,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.monitor_pending_failures.append(&mut revoked_htlcs);
self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
return_with_htlcs_to_fail!(Vec::new());
}
match self.free_holding_cell_htlcs(fee_estimator, logger) {
@ -3410,8 +3426,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
log_bytes!(self.context.channel_id()), release_state_str);
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
return_with_htlcs_to_fail!(htlcs_to_fail);
},
(None, htlcs_to_fail) => {
if require_commitment {
@ -3422,14 +3441,19 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.context.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
log_bytes!(self.context.channel_id()),
update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
release_state_str);
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
return_with_htlcs_to_fail!(htlcs_to_fail);
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
log_bytes!(self.context.channel_id()), release_state_str);
self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
return_with_htlcs_to_fail!(htlcs_to_fail);
}
}
}

View file

@ -1144,7 +1144,11 @@ where
/// could be in the middle of being processed without the direct mutex held.
///
/// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(not(any(test, feature = "_test_utils")))]
pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
pending_events_processor: AtomicBool,
@ -5083,12 +5087,18 @@ where
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
"We don't support claim_htlc claims during startup - monitors may not be available yet");
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: next_channel_outpoint,
counterparty_node_id: path.hops[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
@ -5104,7 +5114,7 @@ where
fee_earned_msat,
claim_from_onchain_tx: from_onchain,
prev_channel_id: Some(prev_outpoint.to_channel_id()),
next_channel_id: Some(next_channel_id),
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: None,
@ -5879,6 +5889,7 @@ where
}
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
let (htlc_source, forwarded_htlc_value) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@ -5890,12 +5901,14 @@ where
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded");
res
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
Ok(())
}
@ -6092,10 +6105,18 @@ where
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().context.get_funding_txo();
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan);
let funding_txo_opt = chan.get().context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
&peer_state.actions_blocking_raa_monitor_updates, funding_txo,
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan);
let res = if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
handle_new_monitor_update!(self, funding_txo, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
} else { Ok(()) };
(htlcs_to_fail, res)
@ -6270,7 +6291,7 @@ where
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
@ -8979,7 +9000,13 @@ where
// generating a `PaymentPathSuccessful` event but regenerating
// it and the `PaymentSent` on every restart until the
// `ChannelMonitor` is removed.
pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
path, false, compl_action, &pending_events, &args.logger);
pending_events_read = pending_events.into_inner().unwrap();
}
},
@ -9000,7 +9027,7 @@ where
// downstream chan is closed (because we don't have a
// channel_id -> peer map entry).
counterparty_opt.is_none(),
monitor.get_funding_txo().0.to_channel_id()))
monitor.get_funding_txo().0))
} else { None }
} else {
// If it was an outbound payment, we've handled it above - if a preimage
@ -9269,12 +9296,12 @@ where
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
downstream_closed, downstream_chan_id);
downstream_closed, downstream_funding);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
@ -9452,6 +9479,7 @@ mod tests {
let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@ -9479,16 +9507,8 @@ mod tests {
// Note that successful MPP payments will generate a single PaymentSent event upon the first
// path's success and a PaymentPathSuccessful event for each path's success.
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 3);
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
assert_eq!(Some(payment_id), *id);
assert_eq!(payment_preimage, *preimage);
assert_eq!(our_payment_hash, *hash);
},
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
@ -9496,7 +9516,7 @@ mod tests {
},
_ => panic!("Unexpected event"),
}
match events[2] {
match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());

View file

@ -17,7 +17,7 @@ use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
use crate::routing::router::{self, PaymentParameters, Route};
use crate::ln::features::InitFeatures;
@ -1684,8 +1684,8 @@ macro_rules! commitment_signed_dance {
bs_revoke_and_ack
}
};
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
assert!($crate::ln::functional_test_utils::commitment_signed_dance_through_cp_raa(&$node_a, &$node_b, $fail_backwards).is_none());
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */, $incl_claim: expr) => {
assert!($crate::ln::functional_test_utils::commitment_signed_dance_through_cp_raa(&$node_a, &$node_b, $fail_backwards, $incl_claim).is_none());
};
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
$crate::ln::functional_test_utils::do_commitment_signed_dance(&$node_a, &$node_b, &$commitment_signed, $fail_backwards, false);
@ -1696,11 +1696,16 @@ macro_rules! commitment_signed_dance {
/// the initiator's `revoke_and_ack` response. i.e. [`do_main_commitment_signed_dance`] plus the
/// `revoke_and_ack` response to it.
///
/// An HTLC claim on one channel blocks the RAA channel monitor update for the outbound edge
/// channel until the inbound edge channel preimage monitor update completes. Thus, when checking
/// for channel monitor updates, we need to know if an `update_fulfill_htlc` was included in the
/// the commitment we're exchanging. `includes_claim` provides that information.
///
/// Returns any additional message `node_b` generated in addition to the `revoke_and_ack` response.
pub fn commitment_signed_dance_through_cp_raa(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool) -> Option<MessageSendEvent> {
pub fn commitment_signed_dance_through_cp_raa(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool) -> Option<MessageSendEvent> {
let (extra_msg_option, bs_revoke_and_ack) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards);
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
check_added_monitors(node_a, 1);
check_added_monitors(node_a, if includes_claim { 0 } else { 1 });
extra_msg_option
}
@ -1747,7 +1752,23 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '
node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), commitment_signed);
check_added_monitors!(node_a, 1);
commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false);
// If this commitment signed dance was due to a claim, don't check for an RAA monitor update.
let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| {
let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate
{ channel_funding_outpoint, counterparty_node_id }) = action
{
if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id {
assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id());
true
} else { false }
} else { false };
if matching_action {
if let Event::PaymentSent { .. } = ev {} else { panic!(); }
}
matching_action
});
if fail_backwards { assert!(!got_claim); }
commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim);
if skip_last_step { return; }
@ -1892,7 +1913,7 @@ macro_rules! expect_payment_claimed {
pub fn expect_payment_sent<CM: AChannelManager, H: NodeHolder<CM=CM>>(node: &H,
expected_payment_preimage: PaymentPreimage, expected_fee_msat_opt: Option<Option<u64>>,
expect_per_path_claims: bool,
expect_per_path_claims: bool, expect_post_ev_mon_update: bool,
) {
let events = node.node().get_and_clear_pending_events();
let expected_payment_hash = PaymentHash(
@ -1902,6 +1923,9 @@ pub fn expect_payment_sent<CM: AChannelManager, H: NodeHolder<CM=CM>>(node: &H,
} else {
assert_eq!(events.len(), 1);
}
if expect_post_ev_mon_update {
check_added_monitors(node, 1);
}
let expected_payment_id = match events[0] {
Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => {
assert_eq!(expected_payment_preimage, *payment_preimage);
@ -1928,17 +1952,6 @@ pub fn expect_payment_sent<CM: AChannelManager, H: NodeHolder<CM=CM>>(node: &H,
}
}
#[cfg(test)]
#[macro_export]
macro_rules! expect_payment_sent_without_paths {
($node: expr, $expected_payment_preimage: expr) => {
expect_payment_sent!($node, $expected_payment_preimage, None::<u64>, false);
};
($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr) => {
expect_payment_sent!($node, $expected_payment_preimage, $expected_fee_msat_opt, false);
}
}
#[macro_export]
macro_rules! expect_payment_sent {
($node: expr, $expected_payment_preimage: expr) => {
@ -1949,7 +1962,7 @@ macro_rules! expect_payment_sent {
};
($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr, $expect_paths: expr) => {
$crate::ln::functional_test_utils::expect_payment_sent(&$node, $expected_payment_preimage,
$expected_fee_msat_opt.map(|o| Some(o)), $expect_paths);
$expected_fee_msat_opt.map(|o| Some(o)), $expect_paths, true);
}
}

View file

@ -2124,7 +2124,7 @@ fn channel_reserve_in_flight_removes() {
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
check_added_monitors!(nodes[0], 1);
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
@ -2153,7 +2153,7 @@ fn channel_reserve_in_flight_removes() {
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
check_added_monitors!(nodes[0], 1);
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
@ -3584,7 +3584,7 @@ fn test_dup_events_on_peer_disconnect() {
check_added_monitors!(nodes[1], 1);
let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@ -3706,6 +3706,7 @@ fn test_simple_peer_disconnect() {
_ => panic!("Unexpected event"),
}
}
check_added_monitors(&nodes[0], 1);
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
@ -4943,7 +4944,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
expect_payment_sent(&nodes[0], our_payment_preimage, None, true);
expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
}
#[test]
@ -5486,7 +5487,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
@ -9359,7 +9360,7 @@ fn test_inconsistent_mpp_params() {
pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true);
expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
}
#[test]
@ -10035,3 +10036,89 @@ fn test_remove_expired_inbound_unfunded_channels() {
}
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
}
fn do_test_multi_post_event_actions(do_reload: bool) {
// Tests handling multiple post-Event actions at once.
// There is specific code in ChannelManager to handle channels where multiple post-Event
// `ChannelMonitorUpdates` are pending at once. This test exercises that code.
//
// Specifically, we test calling `get_and_clear_pending_events` while there are two
// PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
// - one from an RAA and one from an inbound commitment_signed.
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let (persister, chain_monitor, nodes_0_deserialized);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
nodes[1].node.claim_funds(our_payment_preimage);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
nodes[2].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[2], 1);
expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
for dest in &[1, 2] {
let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
check_added_monitors(&nodes[0], 0);
}
let (route, payment_hash_3, _, payment_secret_3) =
get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
let payment_id = PaymentId(payment_hash_3.0);
nodes[1].node.send_payment_with_route(&route, payment_hash_3,
RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
check_added_monitors(&nodes[1], 1);
let send_event = SendEvent::from_node(&nodes[1]);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
if do_reload {
let nodes_0_serialized = nodes[0].node.encode();
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
}
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 4);
if let Event::PaymentSent { payment_preimage, .. } = events[0] {
assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
} else { panic!(); }
if let Event::PaymentSent { payment_preimage, .. } = events[1] {
assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
} else { panic!(); }
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
// After the events are processed, the ChannelMonitorUpdates will be released and, upon their
// completion, we'll respond to nodes[1] with an RAA + CS.
get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
check_added_monitors(&nodes[0], 3);
}
#[test]
fn test_multi_post_event_actions() {
do_test_multi_post_event_actions(true);
do_test_multi_post_event_actions(false);
}

View file

@ -345,7 +345,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
if prev_commitment_tx {
// To build a previous commitment transaction, deliver one round of commitment messages.
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@ -454,7 +454,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
if prev_commitment_tx {
expect_payment_path_successful!(nodes[0]);
} else {
expect_payment_sent!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
}
assert_eq!(sorted_vec(vec![sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]),
sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
@ -681,7 +681,7 @@ fn test_balances_on_local_commitment_htlcs() {
// Now confirm nodes[1]'s HTLC claim, giving nodes[0] the preimage. Note that the "maybe
// claimable" balance remains until we see ANTI_REORG_DELAY blocks.
mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
expect_payment_sent!(nodes[0], payment_preimage_2);
expect_payment_sent(&nodes[0], payment_preimage_2, None, true, false);
assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
amount_satoshis: 1_000_000 - 10_000 - 20_000 - chan_feerate *
(channel::commitment_tx_base_weight(&channel_type_features) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
@ -1538,7 +1538,7 @@ fn test_revoked_counterparty_aggregated_claims() {
// Confirm A's HTLC-Success tranasction which presumably raced B's claim, causing B to create a
// new claim.
mine_transaction(&nodes[1], &as_revoked_txn[1]);
expect_payment_sent!(nodes[1], claimed_payment_preimage);
expect_payment_sent(&nodes[1], claimed_payment_preimage, None, true, false);
let mut claim_txn_2: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
claim_txn_2.sort_unstable_by_key(|tx| if tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid()) { 0 } else { 1 });
// Once B sees the HTLC-Success transaction it splits its claim transaction into two, though in

View file

@ -1177,7 +1177,7 @@ impl OutboundPayments {
pub(super) fn claim_htlc<L: Deref>(
&self, payment_id: PaymentId, payment_preimage: PaymentPreimage, session_priv: SecretKey,
path: Path, from_onchain: bool,
path: Path, from_onchain: bool, ev_completion_action: EventCompletionAction,
pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
logger: &L,
) where L::Target: Logger {
@ -1194,7 +1194,7 @@ impl OutboundPayments {
payment_preimage,
payment_hash,
fee_paid_msat,
}, None));
}, Some(ev_completion_action.clone())));
payment.get_mut().mark_fulfilled();
}
@ -1211,7 +1211,7 @@ impl OutboundPayments {
payment_id,
payment_hash,
path,
}, None));
}, Some(ev_completion_action)));
}
}
} else {

View file

@ -643,7 +643,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
mine_transaction(&nodes[0], &as_commitment_tx);
}
mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
expect_payment_sent!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false);
connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
let (first_htlc_timeout_tx, second_htlc_timeout_tx) = {
let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
@ -1005,7 +1005,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
if payment_timeout {
expect_payment_failed!(nodes[0], payment_hash, false);
} else {
expect_payment_sent!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
}
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
@ -1022,7 +1022,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
} else if payment_timeout {
expect_payment_failed!(nodes[0], payment_hash, false);
} else {
expect_payment_sent!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
}
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
@ -1074,7 +1074,7 @@ fn test_fulfill_restart_failure() {
let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
// Now reload nodes[1]...
reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
@ -1775,6 +1775,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
},
_ => panic!("Unexpected event")
}
check_added_monitors(&nodes[0], 1);
} else if test == InterceptTest::Timeout {
let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
connect_block(&nodes[0], &block);
@ -1929,7 +1930,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) {
payment_preimage);
// The sender doesn't know that the penultimate hop took an extra fee.
expect_payment_sent(&nodes[0], payment_preimage,
Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true);
Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true);
}
#[derive(PartialEq)]
@ -2353,6 +2354,7 @@ fn auto_retry_partial_failure() {
assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@ -2367,6 +2369,7 @@ fn auto_retry_partial_failure() {
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
check_added_monitors!(nodes[0], 1);
expect_payment_path_successful!(nodes[0]);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]);
@ -2383,7 +2386,10 @@ fn auto_retry_partial_failure() {
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
check_added_monitors!(nodes[0], 1);
expect_payment_sent!(nodes[0], payment_preimage);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
if let Event::PaymentPathSuccessful { .. } = events[0] {} else { panic!(); }
if let Event::PaymentPathSuccessful { .. } = events[1] {} else { panic!(); }
}
#[test]
@ -3172,7 +3178,7 @@ fn test_threaded_payment_retries() {
}
}
fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: bool) {
// Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
// receive the PaymentSent event even if the ChannelManager had no idea about the payment when
// it was last persisted.
@ -3201,10 +3207,20 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
if at_midpoint {
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
} else {
let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill_updates.commitment_signed, false);
// Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd
// be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we
// expect to get the PaymentSent again later.
check_added_monitors(&nodes[0], 0);
}
// The ChannelMonitor should always be the latest version, as we're required to persist it
// during the commitment signed handling.
@ -3250,8 +3266,14 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
#[test]
fn no_missing_sent_on_midpoint_reload() {
do_no_missing_sent_on_midpoint_reload(false);
do_no_missing_sent_on_midpoint_reload(true);
do_no_missing_sent_on_reload(false, true);
do_no_missing_sent_on_reload(true, true);
}
#[test]
fn no_missing_sent_on_reload() {
do_no_missing_sent_on_reload(false, false);
do_no_missing_sent_on_reload(true, false);
}
fn do_claim_from_closed_chan(fail_payment: bool) {
@ -3691,7 +3713,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec<u8>)>, second_tlvs:
do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]],
false, our_payment_preimage);
expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true);
expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true);
} else {
// Expect fail back
let expected_destinations = vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }];

View file

@ -230,7 +230,7 @@ fn test_counterparty_revoked_reorg() {
// Connect the HTLC claim transaction for HTLC 3
mine_transaction(&nodes[1], &unrevoked_local_txn[2]);
expect_payment_sent!(nodes[1], payment_preimage_3);
expect_payment_sent(&nodes[1], payment_preimage_3, None, true, false);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Connect blocks to confirm the unrevoked commitment transaction

View file

@ -345,7 +345,7 @@ fn htlc_fail_async_shutdown() {
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[1], 1);
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates_2.update_add_htlcs.is_empty());