mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-03-14 07:06:42 +01:00
Consider quiescence state when freeing holding cells upon revoke_and_ack
We previously would avoid freeing our holding cells upon a `revoke_and_ack` if a monitor update was in progress, which we checked explicitly. With quiescence, if we've already sent `stfu`, we're not allowed to make further commitment updates, so we must also avoid freeing our holding cells in such cases. Along the way, we also remove the special handling of in-progress monitor updates now that it behaves the same as the handling of being quiescent.
This commit is contained in:
parent
99670ecd0e
commit
20877b3e22
2 changed files with 153 additions and 27 deletions
|
@ -6081,29 +6081,7 @@ impl<SP: Deref> FundedChannel<SP> where
|
|||
|
||||
self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
|
||||
|
||||
if self.context.channel_state.is_monitor_update_in_progress() {
|
||||
// We can't actually generate a new commitment transaction (incl by freeing holding
|
||||
// cells) while we can't update the monitor, so we just return what we have.
|
||||
if require_commitment {
|
||||
self.context.monitor_pending_commitment_signed = true;
|
||||
// When the monitor updating is restored we'll call
|
||||
// get_last_commitment_update_for_send(), which does not update state, but we're
|
||||
// definitely now awaiting a remote revoke before we can step forward any more, so
|
||||
// set it here.
|
||||
let mut additional_update = self.build_commitment_no_status_check(logger);
|
||||
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
|
||||
// strictly increasing by one, so decrement it here.
|
||||
self.context.latest_monitor_update_id = monitor_update.update_id;
|
||||
monitor_update.updates.append(&mut additional_update.updates);
|
||||
}
|
||||
self.context.monitor_pending_forwards.append(&mut to_forward_infos);
|
||||
self.context.monitor_pending_failures.append(&mut revoked_htlcs);
|
||||
self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
|
||||
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
|
||||
return_with_htlcs_to_fail!(Vec::new());
|
||||
}
|
||||
|
||||
match self.free_holding_cell_htlcs(fee_estimator, logger) {
|
||||
match self.maybe_free_holding_cell_htlcs(fee_estimator, logger) {
|
||||
(Some(mut additional_update), htlcs_to_fail) => {
|
||||
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
|
||||
// strictly increasing by one, so decrement it here.
|
||||
|
@ -6118,6 +6096,11 @@ impl<SP: Deref> FundedChannel<SP> where
|
|||
},
|
||||
(None, htlcs_to_fail) => {
|
||||
if require_commitment {
|
||||
// We can't generate a new commitment transaction yet so we just return what we
|
||||
// have. When the monitor updating is restored we'll call
|
||||
// get_last_commitment_update_for_send(), which does not update state, but we're
|
||||
// definitely now awaiting a remote revoke before we can step forward any more,
|
||||
// so set it here.
|
||||
let mut additional_update = self.build_commitment_no_status_check(logger);
|
||||
|
||||
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
|
||||
|
@ -6125,10 +6108,24 @@ impl<SP: Deref> FundedChannel<SP> where
|
|||
self.context.latest_monitor_update_id = monitor_update.update_id;
|
||||
monitor_update.updates.append(&mut additional_update.updates);
|
||||
|
||||
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
|
||||
&self.context.channel_id(),
|
||||
update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
|
||||
release_state_str);
|
||||
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. {} monitor update.",
|
||||
&self.context.channel_id(), release_state_str);
|
||||
if self.context.channel_state.can_generate_new_commitment() {
|
||||
log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}",
|
||||
update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
|
||||
&self.context.channel_id);
|
||||
} else {
|
||||
debug_assert!(htlcs_to_fail.is_empty());
|
||||
let reason = if self.context.channel_state.is_local_stfu_sent() {
|
||||
"exits quiescence"
|
||||
} else if self.context.channel_state.is_monitor_update_in_progress() {
|
||||
"completes pending monitor update"
|
||||
} else {
|
||||
"can continue progress"
|
||||
};
|
||||
log_debug!(logger, "Holding back commitment update until channel {} {}",
|
||||
&self.context.channel_id, reason);
|
||||
}
|
||||
|
||||
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
|
||||
return_with_htlcs_to_fail!(htlcs_to_fail);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use crate::chain::ChannelMonitorUpdateStatus;
|
||||
use crate::events::Event;
|
||||
use crate::events::HTLCDestination;
|
||||
use crate::events::MessageSendEvent;
|
||||
use crate::events::MessageSendEventsProvider;
|
||||
|
@ -284,3 +285,131 @@ fn test_quiescence_tracks_monitor_update_in_progress_and_waits_for_async_signer(
|
|||
let _ = get_htlc_update_msgs!(&nodes[0], node_id_1);
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_quiescence_updates_go_to_holding_cell() {
|
||||
quiescence_updates_go_to_holding_cell(false);
|
||||
quiescence_updates_go_to_holding_cell(true);
|
||||
}
|
||||
|
||||
fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) {
|
||||
// Test that any updates made to a channel while quiescent go to the holding cell.
|
||||
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
|
||||
let node_id_0 = nodes[0].node.get_our_node_id();
|
||||
let node_id_1 = nodes[1].node.get_our_node_id();
|
||||
|
||||
// Send enough to be able to pay from both directions.
|
||||
let payment_amount = 1_000_000;
|
||||
send_payment(&nodes[0], &[&nodes[1]], payment_amount * 4);
|
||||
|
||||
// Propose quiescence from nodes[1], and immediately try to send a payment. Since its `stfu` has
|
||||
// already gone out first, the outbound HTLC will go into the holding cell.
|
||||
nodes[1].node.maybe_propose_quiescence(&node_id_0, &chan_id).unwrap();
|
||||
let stfu = get_event_msg!(&nodes[1], MessageSendEvent::SendStfu, node_id_0);
|
||||
|
||||
let (route1, payment_hash1, payment_preimage1, payment_secret1) =
|
||||
get_route_and_payment_hash!(&nodes[1], &nodes[0], payment_amount);
|
||||
let onion1 = RecipientOnionFields::secret_only(payment_secret1);
|
||||
let payment_id1 = PaymentId(payment_hash1.0);
|
||||
nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap();
|
||||
check_added_monitors!(&nodes[1], 0);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's
|
||||
// allowed to make updates.
|
||||
let (route2, payment_hash2, payment_preimage2, payment_secret2) =
|
||||
get_route_and_payment_hash!(&nodes[0], &nodes[1], payment_amount);
|
||||
let onion2 = RecipientOnionFields::secret_only(payment_secret2);
|
||||
let payment_id2 = PaymentId(payment_hash2.0);
|
||||
nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap();
|
||||
check_added_monitors!(&nodes[0], 1);
|
||||
|
||||
let update_add = get_htlc_update_msgs!(&nodes[0], node_id_1);
|
||||
nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]);
|
||||
commitment_signed_dance!(&nodes[1], &nodes[0], update_add.commitment_signed, false);
|
||||
expect_pending_htlcs_forwardable!(&nodes[1]);
|
||||
expect_payment_claimable!(nodes[1], payment_hash2, payment_secret2, payment_amount);
|
||||
|
||||
// Have nodes[1] attempt to fail/claim nodes[0]'s payment. Since nodes[1] already sent out
|
||||
// `stfu`, the `update_fail/fulfill` will go into the holding cell.
|
||||
if fail_htlc {
|
||||
nodes[1].node.fail_htlc_backwards(&payment_hash2);
|
||||
let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash2 };
|
||||
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failed_payment]);
|
||||
} else {
|
||||
nodes[1].node.claim_funds(payment_preimage2);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
}
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Finish the quiescence handshake.
|
||||
nodes[0].node.handle_stfu(node_id_1, &stfu);
|
||||
let stfu = get_event_msg!(&nodes[0], MessageSendEvent::SendStfu, node_id_1);
|
||||
nodes[1].node.handle_stfu(node_id_0, &stfu);
|
||||
|
||||
nodes[0].node.exit_quiescence(&node_id_1, &chan_id).unwrap();
|
||||
nodes[1].node.exit_quiescence(&node_id_0, &chan_id).unwrap();
|
||||
|
||||
// Now that quiescence is over, nodes are allowed to make updates again. nodes[1] will have its
|
||||
// outbound HTLC finally go out, along with the fail/claim of nodes[0]'s payment.
|
||||
let update = get_htlc_update_msgs!(&nodes[1], node_id_0);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
nodes[0].node.handle_update_add_htlc(node_id_1, &update.update_add_htlcs[0]);
|
||||
if fail_htlc {
|
||||
nodes[0].node.handle_update_fail_htlc(node_id_1, &update.update_fail_htlcs[0]);
|
||||
} else {
|
||||
nodes[0].node.handle_update_fulfill_htlc(node_id_1, &update.update_fulfill_htlcs[0]);
|
||||
}
|
||||
commitment_signed_dance!(&nodes[0], &nodes[1], update.commitment_signed, false);
|
||||
|
||||
if !fail_htlc {
|
||||
expect_payment_claimed!(nodes[1], payment_hash2, payment_amount);
|
||||
}
|
||||
|
||||
// The payment from nodes[0] should now be seen as failed/successful.
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 3);
|
||||
assert!(events.iter().find(|e| matches!(e, Event::PendingHTLCsForwardable { .. })).is_some());
|
||||
if fail_htlc {
|
||||
assert!(events.iter().find(|e| matches!(e, Event::PaymentFailed { .. })).is_some());
|
||||
assert!(events.iter().find(|e| matches!(e, Event::PaymentPathFailed { .. })).is_some());
|
||||
} else {
|
||||
assert!(events.iter().find(|e| matches!(e, Event::PaymentSent { .. })).is_some());
|
||||
assert!(events.iter().find(|e| matches!(e, Event::PaymentPathSuccessful { .. })).is_some());
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
nodes[0].node.process_pending_htlc_forwards();
|
||||
expect_payment_claimable!(nodes[0], payment_hash1, payment_secret1, payment_amount);
|
||||
|
||||
// Have nodes[0] fail/claim nodes[1]'s payment.
|
||||
if fail_htlc {
|
||||
nodes[0].node.fail_htlc_backwards(&payment_hash1);
|
||||
let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash1 };
|
||||
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], vec![failed_payment]);
|
||||
} else {
|
||||
nodes[0].node.claim_funds(payment_preimage1);
|
||||
}
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
|
||||
let update = get_htlc_update_msgs!(&nodes[0], node_id_1);
|
||||
if fail_htlc {
|
||||
nodes[1].node.handle_update_fail_htlc(node_id_0, &update.update_fail_htlcs[0]);
|
||||
} else {
|
||||
nodes[1].node.handle_update_fulfill_htlc(node_id_0, &update.update_fulfill_htlcs[0]);
|
||||
}
|
||||
commitment_signed_dance!(&nodes[1], &nodes[0], update.commitment_signed, false);
|
||||
|
||||
// The payment from nodes[1] should now be seen as failed/successful.
|
||||
if fail_htlc {
|
||||
let conditions = PaymentFailedConditions::new();
|
||||
expect_payment_failed_conditions(&nodes[1], payment_hash1, true, conditions);
|
||||
} else {
|
||||
expect_payment_claimed!(nodes[0], payment_hash1, payment_amount);
|
||||
expect_payment_sent(&nodes[1], payment_preimage1, None, true, true);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue