Add HTLCsTimedOut closing reason

Before a force closure from timed out HTLCs was treated the same as when
the user manually force closed the channel. This leads to various UX
issues. This adds a new `ClosureReason` called `HTLCsTimedOut` that
signifies that the closure was caused because the HTLCs timed out. To go
along with this, previously we'd always send "Channel force-closed" when
force closing the channel in the error message which was ambigous, now
we send the force closure reason so the peer can know why the channel
was closed.
This commit is contained in:
benthecarman 2024-02-20 18:47:50 +00:00
parent 2c9dbb959d
commit 9b5ebc4bec
No known key found for this signature in database
GPG key ID: D7CC770B81FD22A8
6 changed files with 79 additions and 20 deletions

View file

@ -50,7 +50,7 @@ use crate::chain::Filter;
use crate::util::logger::{Logger, Record}; use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48}; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils; use crate::util::byte_utils;
use crate::events::{Event, EventHandler}; use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent}; use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
use crate::prelude::*; use crate::prelude::*;
@ -155,6 +155,17 @@ pub enum MonitorEvent {
/// A monitor event containing an HTLCUpdate. /// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate), HTLCEvent(HTLCUpdate),
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel. Holds information about the channel and why it was closed.
HolderForceClosedWithInfo {
/// The reason the channel was closed.
reason: ClosureReason,
/// The funding outpoint of the channel.
outpoint: OutPoint,
/// The channel ID of the channel.
channel_id: ChannelId,
},
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel. /// channel.
HolderForceClosed(OutPoint), HolderForceClosed(OutPoint),
@ -184,6 +195,11 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
(2, monitor_update_id, required), (2, monitor_update_id, required),
(4, channel_id, required), (4, channel_id, required),
}, },
(5, HolderForceClosedWithInfo) => {
(0, reason, upgradable_required),
(2, outpoint, required),
(4, channel_id, required),
},
; ;
(2, HTLCEvent), (2, HTLCEvent),
(4, HolderForceClosed), (4, HolderForceClosed),
@ -1059,6 +1075,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev { writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true, MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true, MonitorEvent::HolderForceClosed(_) => true,
MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false, _ => false,
}).count() as u64).to_be_bytes())?; }).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() { for event in self.pending_monitor_events.iter() {
@ -1068,6 +1085,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
upd.write(writer)?; upd.write(writer)?;
}, },
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?, MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
// backwards compatibility, we write a `HolderForceClosed` event along with the
// `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below _ => {}, // Covered in the TLV writes below
} }
} }
@ -1099,10 +1120,23 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
self.lockdown_from_offchain.write(writer)?; self.lockdown_from_offchain.write(writer)?;
self.holder_tx_signed.write(writer)?; self.holder_tx_signed.write(writer)?;
// If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev {
MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}) {
Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
let mut pending_monitor_events = self.pending_monitor_events.clone();
pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
pending_monitor_events
}
_ => self.pending_monitor_events.clone(),
};
write_tlv_fields!(writer, { write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option), (1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, required_vec), (3, self.htlcs_resolved_on_chain, required_vec),
(5, self.pending_monitor_events, required_vec), (5, pending_monitor_events, required_vec),
(7, self.funding_spend_seen, required), (7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option), (9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option), (11, self.confirmed_commitment_tx_counterparty_output, option),
@ -2727,7 +2761,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
} }
} }
fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) { fn generate_claimable_outpoints_and_watch_outputs(&mut self, reason: ClosureReason) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build( let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(), self.funding_redeemscript.clone(),
self.channel_value_satoshis, self.channel_value_satoshis,
@ -2739,7 +2773,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
self.best_block.height, self.best_block.height self.best_block.height, self.best_block.height
); );
let mut claimable_outpoints = vec![commitment_package]; let mut claimable_outpoints = vec![commitment_package];
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); let event = MonitorEvent::HolderForceClosedWithInfo {
reason,
outpoint: self.funding_info.0,
channel_id: self.channel_id,
};
self.pending_monitor_events.push(event);
// Although we aren't signing the transaction directly here, the transaction will be signed // Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates. // new channel updates.
@ -2775,7 +2815,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
F::Target: FeeEstimator, F::Target: FeeEstimator,
L::Target: Logger, L::Target: Logger,
{ {
let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(); let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed);
self.onchain_tx_handler.update_claims_view_from_requests( self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster, claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
fee_estimator, logger fee_estimator, logger
@ -3778,7 +3818,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast { if should_broadcast {
let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(); let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HTLCsTimedOut);
claimable_outpoints.append(&mut new_outpoints); claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs); watch_outputs.append(&mut new_outputs);
} }
@ -4605,6 +4645,16 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
(19, channel_id, option), (19, channel_id, option),
}); });
// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
// events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
if let Some(ref mut pending_monitor_events) = pending_monitor_events {
if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
{
pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
}
}
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to // wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output. // give them a chance to recognize the spendable output.

View file

@ -232,6 +232,8 @@ pub enum ClosureReason {
/// Another channel in the same funding batch closed before the funding transaction /// Another channel in the same funding batch closed before the funding transaction
/// was ready to be broadcast. /// was ready to be broadcast.
FundingBatchClosure, FundingBatchClosure,
/// One of our HTLCs timed out in a channel, causing us to force close the channel.
HTLCsTimedOut,
} }
impl core::fmt::Display for ClosureReason { impl core::fmt::Display for ClosureReason {
@ -241,7 +243,7 @@ impl core::fmt::Display for ClosureReason {
ClosureReason::CounterpartyForceClosed { peer_msg } => { ClosureReason::CounterpartyForceClosed { peer_msg } => {
f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg)) f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
}, },
ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"), ClosureReason::HolderForceClosed => f.write_str("user force-closed the channel"),
ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"), ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"),
ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"), ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"),
ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"), ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"),
@ -255,6 +257,7 @@ impl core::fmt::Display for ClosureReason {
ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"), ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"), ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"), ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"),
ClosureReason::HTLCsTimedOut => f.write_str("htlcs on the channel timed out"),
} }
} }
} }
@ -272,6 +275,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
(15, FundingBatchClosure) => {}, (15, FundingBatchClosure) => {},
(17, CounterpartyInitiatedCooperativeClosure) => {}, (17, CounterpartyInitiatedCooperativeClosure) => {},
(19, LocallyInitiatedCooperativeClosure) => {}, (19, LocallyInitiatedCooperativeClosure) => {},
(21, HTLCsTimedOut) => {},
); );
/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`]. /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].

View file

@ -7374,7 +7374,7 @@ where
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
} }
}, },
MonitorEvent::HolderForceClosed(_funding_outpoint) => { MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id { let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id), Some(cp_id) => Some(cp_id),
None => { None => {
@ -7392,7 +7392,12 @@ where
let pending_msg_events = &mut peer_state.pending_msg_events; let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) { if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
reason
} else {
ClosureReason::HolderForceClosed
};
failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update msg: update
@ -7401,7 +7406,7 @@ where
pending_msg_events.push(events::MessageSendEvent::HandleError { pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(), node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer { action: msgs::ErrorAction::DisconnectPeer {
msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }) msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
}, },
}); });
} }

View file

@ -2417,7 +2417,7 @@ fn channel_monitor_network_test() {
} }
check_added_monitors!(nodes[4], 1); check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000); check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
mine_transaction(&nodes[4], &node_txn[0]); mine_transaction(&nodes[4], &node_txn[0]);
check_preimage_claim(&nodes[4], &node_txn); check_preimage_claim(&nodes[4], &node_txn);
@ -2430,7 +2430,7 @@ fn channel_monitor_network_test() {
assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon), assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
Ok(ChannelMonitorUpdateStatus::Completed)); Ok(ChannelMonitorUpdateStatus::Completed));
check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000); check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
} }
#[test] #[test]
@ -5682,7 +5682,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true); check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
} }
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@ -5713,7 +5713,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true); check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1); check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
} }
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@ -5759,7 +5759,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true); check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1); check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
} else { } else {
expect_payment_failed!(nodes[0], our_payment_hash, true); expect_payment_failed!(nodes[0], our_payment_hash, true);
} }
@ -8654,7 +8654,7 @@ fn test_concurrent_monitor_claim() {
let height = HTLC_TIMEOUT_BROADCAST + 1; let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true); check_closed_broadcast(&nodes[0], 1, true);
check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false, check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
[nodes[1].node.get_our_node_id()], 100000); [nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 1);

View file

@ -1188,14 +1188,14 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_
assert!(failed_payments.is_empty()); assert!(failed_payments.is_empty());
if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); } if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
match &events[1] { match &events[1] {
Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {}, Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {},
_ => panic!(), _ => panic!(),
} }
connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
check_closed_broadcast!(nodes[1], true); check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000); check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
// Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only // Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
// lists the two on-chain timeout-able HTLCs as claimable balances. // lists the two on-chain timeout-able HTLCs as claimable balances.

View file

@ -465,7 +465,7 @@ fn test_set_outpoints_partial_claiming() {
// Connect blocks on node B // Connect blocks on node B
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[1], true); check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000); check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
// Verify node B broadcast 2 HTLC-timeout txn // Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = { let partial_claim_tx = {
@ -807,7 +807,7 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true); check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 1);
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000); check_closed_event(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, &[nodes[1].node.get_our_node_id()], 100_000);
{ {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); let mut txn = nodes[0].tx_broadcaster.txn_broadcast();