Wait to free the holding cell during channel_reestablish handling

When we process a `channel_reestablish` message we free the HTLC
update holding cell as things may have changed while we were
disconnected. However, some time ago, to handle freeing from the
holding cell when a monitor update completes, we added a holding
cell freeing check in `get_and_clear_pending_msg_events`. This
leaves the in-`channel_reestablish` holding cell clear redundant,
as doing it immediately or is `get_and_clear_pending_msg_events` is
not a user-visible difference.

Thus, we remove the redundant code here, substantially simplifying
`handle_chan_restoration_locked` while we're at it.
This commit is contained in:
Matt Corallo 2022-11-17 05:48:21 +00:00
parent c8c0997862
commit 7e9b88a5cd
5 changed files with 30 additions and 113 deletions

View file

@ -1128,6 +1128,7 @@ fn test_monitor_update_fail_reestablish() {
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
.contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);

View file

@ -439,8 +439,6 @@ pub(super) struct ReestablishResponses {
pub raa: Option<msgs::RevokeAndACK>, pub raa: Option<msgs::RevokeAndACK>,
pub commitment_update: Option<msgs::CommitmentUpdate>, pub commitment_update: Option<msgs::CommitmentUpdate>,
pub order: RAACommitmentOrder, pub order: RAACommitmentOrder,
pub mon_update: Option<ChannelMonitorUpdate>,
pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>, pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
pub shutdown_msg: Option<msgs::Shutdown>, pub shutdown_msg: Option<msgs::Shutdown>,
} }
@ -3955,9 +3953,8 @@ impl<Signer: Sign> Channel<Signer> {
// Short circuit the whole handler as there is nothing we can resend them // Short circuit the whole handler as there is nothing we can resend them
return Ok(ReestablishResponses { return Ok(ReestablishResponses {
channel_ready: None, channel_ready: None,
raa: None, commitment_update: None, mon_update: None, raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst, order: RAACommitmentOrder::CommitmentFirst,
holding_cell_failed_htlcs: Vec::new(),
shutdown_msg, announcement_sigs, shutdown_msg, announcement_sigs,
}); });
} }
@ -3970,9 +3967,8 @@ impl<Signer: Sign> Channel<Signer> {
next_per_commitment_point, next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias), short_channel_id_alias: Some(self.outbound_scid_alias),
}), }),
raa: None, commitment_update: None, mon_update: None, raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst, order: RAACommitmentOrder::CommitmentFirst,
holding_cell_failed_htlcs: Vec::new(),
shutdown_msg, announcement_sigs, shutdown_msg, announcement_sigs,
}); });
} }
@ -4015,46 +4011,12 @@ impl<Signer: Sign> Channel<Signer> {
log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
} }
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
// We're up-to-date and not waiting on a remote revoke (if we are our
// channel_reestablish should result in them sending a revoke_and_ack), but we may
// have received some updates while we were disconnected. Free the holding cell
// now!
match self.free_holding_cell_htlcs(logger) {
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke,
commitment_update: Some(commitment_update),
order: self.resend_order.clone(),
mon_update: Some(monitor_update),
holding_cell_failed_htlcs,
})
},
Ok((None, holding_cell_failed_htlcs)) => {
Ok(ReestablishResponses { Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs, channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke, raa: required_revoke,
commitment_update: None, commitment_update: None,
order: self.resend_order.clone(), order: self.resend_order.clone(),
mon_update: None,
holding_cell_failed_htlcs,
}) })
},
}
} else {
Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke,
commitment_update: None,
order: self.resend_order.clone(),
mon_update: None,
holding_cell_failed_htlcs: Vec::new(),
})
}
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
if required_revoke.is_some() { if required_revoke.is_some() {
log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
@ -4066,9 +4028,8 @@ impl<Signer: Sign> Channel<Signer> {
self.monitor_pending_commitment_signed = true; self.monitor_pending_commitment_signed = true;
Ok(ReestablishResponses { Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs, channel_ready, shutdown_msg, announcement_sigs,
commitment_update: None, raa: None, mon_update: None, commitment_update: None, raa: None,
order: self.resend_order.clone(), order: self.resend_order.clone(),
holding_cell_failed_htlcs: Vec::new(),
}) })
} else { } else {
Ok(ReestablishResponses { Ok(ReestablishResponses {
@ -4076,8 +4037,6 @@ impl<Signer: Sign> Channel<Signer> {
raa: required_revoke, raa: required_revoke,
commitment_update: Some(self.get_last_commitment_update(logger)), commitment_update: Some(self.get_last_commitment_update(logger)),
order: self.resend_order.clone(), order: self.resend_order.clone(),
mon_update: None,
holding_cell_failed_htlcs: Vec::new(),
}) })
} }
} else { } else {

View file

@ -1518,13 +1518,11 @@ macro_rules! emit_channel_ready_event {
} }
macro_rules! handle_chan_restoration_locked { macro_rules! handle_chan_restoration_locked {
($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, ($self: ident, $channel_state: expr, $channel_entry: expr,
$raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, $raa: expr, $commitment_update: expr, $order: expr,
$pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { { $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
let mut htlc_forwards = None; let mut htlc_forwards = None;
let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
let chanmon_update_is_none = chanmon_update.is_none();
let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
let res = loop { let res = loop {
let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
@ -1533,24 +1531,7 @@ macro_rules! handle_chan_restoration_locked {
$channel_entry.get().get_funding_txo().unwrap(), forwards)); $channel_entry.get().get_funding_txo().unwrap(), forwards));
} }
if chanmon_update.is_some() {
// On reconnect, we, by definition, only resend a channel_ready if there have been
// no commitment updates, so the only channel monitor update which could also be
// associated with a channel_ready would be the funding_created/funding_signed
// monitor update. That monitor update failing implies that we won't send
// channel_ready until it's been updated, so we can't have a channel_ready and a
// monitor update here (so we don't bother to handle it correctly below).
assert!($channel_ready.is_none());
// A channel monitor update makes no sense without either a channel_ready or a
// commitment update to process after it. Since we can't have a channel_ready, we
// only bother to handle the monitor-update + commitment_update case below.
assert!($commitment_update.is_some());
}
if let Some(msg) = $channel_ready { if let Some(msg) = $channel_ready {
// Similar to the above, this implies that we're letting the channel_ready fly
// before it should be allowed to.
assert!(chanmon_update.is_none());
send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg); send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
} }
if let Some(msg) = $announcement_sigs { if let Some(msg) = $announcement_sigs {
@ -1562,33 +1543,6 @@ macro_rules! handle_chan_restoration_locked {
emit_channel_ready_event!($self, $channel_entry.get_mut()); emit_channel_ready_event!($self, $channel_entry.get_mut());
let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
if let Some(monitor_update) = chanmon_update {
// We only ever broadcast a funding transaction in response to a funding_signed
// message and the resulting monitor update. Thus, on channel_reestablish
// message handling we can't have a funding transaction to broadcast. When
// processing a monitor update finishing resulting in a funding broadcast, we
// cannot have a second monitor update, thus this case would indicate a bug.
assert!(funding_broadcastable.is_none());
// Given we were just reconnected or finished updating a channel monitor, the
// only case where we can get a new ChannelMonitorUpdate would be if we also
// have some commitment updates to send as well.
assert!($commitment_update.is_some());
match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
ChannelMonitorUpdateStatus::Completed => {},
e => {
// channel_reestablish doesn't guarantee the order it returns is sensical
// for the messages it returns, but if we're setting what messages to
// re-transmit on monitor update success, we need to make sure it is sane.
let mut order = $order;
if $raa.is_none() {
order = RAACommitmentOrder::CommitmentFirst;
}
break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
}
}
}
macro_rules! handle_cs { () => { macro_rules! handle_cs { () => {
if let Some(update) = $commitment_update { if let Some(update) = $commitment_update {
$channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@ -1615,6 +1569,8 @@ macro_rules! handle_chan_restoration_locked {
handle_cs!(); handle_cs!();
}, },
} }
let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
if let Some(tx) = funding_broadcastable { if let Some(tx) = funding_broadcastable {
log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
$self.tx_broadcaster.broadcast_transaction(&tx); $self.tx_broadcaster.broadcast_transaction(&tx);
@ -1622,13 +1578,6 @@ macro_rules! handle_chan_restoration_locked {
break Ok(()); break Ok(());
}; };
if chanmon_update_is_none {
// If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
// above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
// should *never* end up calling back to `chain_monitor.update_channel()`.
assert!(res.is_ok());
}
(htlc_forwards, res, counterparty_node_id) (htlc_forwards, res, counterparty_node_id)
} } } }
} }
@ -4520,7 +4469,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
}) })
} else { None } } else { None }
} else { None }; } else { None };
chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); chan_restoration_res = handle_chan_restoration_locked!(self, channel_state, channel, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update { if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd); channel_state.pending_msg_events.push(upd);
} }
@ -5279,7 +5228,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
let chan_restoration_res; let chan_restoration_res;
let (htlcs_failed_forward, need_lnd_workaround) = { let need_lnd_workaround = {
let mut channel_state_lock = self.channel_state.lock().unwrap(); let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock; let channel_state = &mut *channel_state_lock;
@ -5314,18 +5263,17 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
} }
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
chan_restoration_res = handle_chan_restoration_locked!( chan_restoration_res = handle_chan_restoration_locked!(
self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order, self, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
if let Some(upd) = channel_update { if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd); channel_state.pending_msg_events.push(upd);
} }
(responses.holding_cell_failed_htlcs, need_lnd_workaround) need_lnd_workaround
}, },
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
} }
}; };
post_handle_chan_restoration!(self, chan_restoration_res); post_handle_chan_restoration!(self, chan_restoration_res);
self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
if let Some(channel_ready_msg) = need_lnd_workaround { if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;

View file

@ -2413,6 +2413,14 @@ macro_rules! handle_chan_reestablish_msgs {
assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert_eq!(*node_id, $dst_node.node.get_our_node_id());
} }
let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both
if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
idx += 1;
assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
had_channel_update = true;
}
let mut revoke_and_ack = None; let mut revoke_and_ack = None;
let mut commitment_update = None; let mut commitment_update = None;
let order = if let Some(ev) = msg_events.get(idx) { let order = if let Some(ev) = msg_events.get(idx) {
@ -2457,6 +2465,7 @@ macro_rules! handle_chan_reestablish_msgs {
assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert_eq!(*node_id, $dst_node.node.get_our_node_id());
idx += 1; idx += 1;
assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
assert!(!had_channel_update);
} }
assert_eq!(msg_events.len(), idx); assert_eq!(msg_events.len(), idx);

View file

@ -786,9 +786,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
check_added_monitors!(nodes[3], 1); check_added_monitors!(nodes[3], 1);
assert_eq!(ds_msgs.len(), 2); assert_eq!(ds_msgs.len(), 2);
if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); } if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
let cs_updates = match ds_msgs[0] { let cs_updates = match ds_msgs[1] {
MessageSendEvent::UpdateHTLCs { ref updates, .. } => { MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
check_added_monitors!(nodes[2], 1); check_added_monitors!(nodes[2], 1);