Merge pull request #2703 from wpaulino/retryable-commitment-broadcast

Refactor commitment broadcast to always go through OnchainTxHandler
This commit is contained in:
Matt Corallo 2023-12-13 05:15:54 +00:00 committed by GitHub
commit 0dbf17b781
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 361 additions and 136 deletions

View file

@ -2666,18 +2666,59 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}
fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &WithChannelMonitor<L>)
where B::Target: BroadcasterInterface,
L::Target: Logger,
{
let commit_txs = self.get_latest_holder_commitment_txn(logger);
let mut txs = vec![];
for tx in commit_txs.iter() {
log_info!(logger, "Broadcasting local {}", log_tx!(tx));
txs.push(tx);
}
broadcaster.broadcast_transactions(&txs);
fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(),
self.channel_value_satoshis,
self.onchain_tx_handler.channel_type_features().clone()
);
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_outp),
self.best_block.height(), self.best_block.height()
);
let mut claimable_outpoints = vec![commitment_package];
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
self.holder_tx_signed = true;
let mut watch_outputs = Vec::new();
// We can't broadcast our HTLC transactions while the commitment transaction is
// unconfirmed. We'll delay doing so until we detect the confirmed commitment in
// `transactions_confirmed`.
if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
&self.current_holder_commitment_tx, self.best_block.height()
);
let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
let new_outputs = self.get_broadcasted_holder_watch_outputs(
&self.current_holder_commitment_tx, &unsigned_commitment_tx
);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
}
claimable_outpoints.append(&mut new_outpoints);
}
(claimable_outpoints, watch_outputs)
}
pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
&mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>
)
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
fee_estimator, logger
);
}
fn update_monitor<B: Deref, F: Deref, L: Deref>(
@ -2767,26 +2808,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
continue;
}
self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
// If the channel supports anchor outputs, we'll need to emit an external
// event to be consumed such that a child transaction is broadcast with a
// high enough feerate for the parent commitment transaction to confirm.
if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
let funding_output = HolderFundingOutput::build(
self.funding_redeemscript.clone(), self.channel_value_satoshis,
self.onchain_tx_handler.channel_type_features().clone(),
);
let best_block_height = self.best_block.height();
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_output),
best_block_height, best_block_height
);
self.onchain_tx_handler.update_claims_view_from_requests(
vec![commitment_package], best_block_height, best_block_height,
broadcaster, &bounded_fee_estimator, logger,
);
}
self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
@ -3363,6 +3385,58 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}
/// Cancels any existing pending claims for a commitment that previously confirmed and has now
/// been replaced by another.
pub fn cancel_prev_commitment_claims<L: Deref>(
&mut self, logger: &L, confirmed_commitment_txid: &Txid
) where L::Target: Logger {
for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
// Cancel any pending claims for counterparty commitments we've seen confirm.
if counterparty_commitment_txid == confirmed_commitment_txid {
continue;
}
for (htlc, _) in self.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
log_trace!(logger, "Canceling claims for previously confirmed counterparty commitment {}",
counterparty_commitment_txid);
let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
if let Some(vout) = htlc.transaction_output_index {
outpoint.vout = vout;
self.onchain_tx_handler.abandon_claim(&outpoint);
}
}
}
if self.holder_tx_signed {
// If we've signed, we may have broadcast either commitment (prev or current), and
// attempted to claim from it immediately without waiting for a confirmation.
if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid {
log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
self.current_holder_commitment_tx.txid);
let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 };
for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs {
if let Some(vout) = htlc.transaction_output_index {
outpoint.vout = vout;
self.onchain_tx_handler.abandon_claim(&outpoint);
}
}
}
if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
if prev_holder_commitment_tx.txid != *confirmed_commitment_txid {
log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
prev_holder_commitment_tx.txid);
let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 };
for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs {
if let Some(vout) = htlc.transaction_output_index {
outpoint.vout = vout;
self.onchain_tx_handler.abandon_claim(&outpoint);
}
}
}
}
} else {
// No previous claim.
}
}
fn get_latest_holder_commitment_txn<L: Deref>(
&mut self, logger: &WithChannelMonitor<L>,
) -> Vec<Transaction> where L::Target: Logger {
@ -3578,6 +3652,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
commitment_tx_to_counterparty_output,
},
});
// Now that we've detected a confirmed commitment transaction, attempt to cancel
// pending claims for any commitments that were previously confirmed such that
// we don't continue claiming inputs that no longer exist.
self.cancel_prev_commitment_claims(&logger, &txid);
}
}
if tx.input.len() >= 1 {
@ -3643,29 +3721,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
claimable_outpoints.push(commitment_package);
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
self.holder_tx_signed = true;
// We can't broadcast our HTLC transactions while the commitment transaction is
// unconfirmed. We'll delay doing so until we detect the confirmed commitment in
// `transactions_confirmed`.
if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
}
claimable_outpoints.append(&mut new_outpoints);
}
let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
}
// Find which on-chain events have reached their confirmation threshold.

View file

@ -676,6 +676,25 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
None
}
pub fn abandon_claim(&mut self, outpoint: &BitcoinOutPoint) {
let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id)
.or_else(|| {
self.pending_claim_requests.iter()
.find(|(_, claim)| claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint))
.map(|(claim_id, _)| *claim_id)
});
if let Some(claim_id) = claim_id {
if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
for outpoint in claim.outpoints() {
self.claimable_outpoints.remove(&outpoint);
}
}
} else {
self.locktimed_packages.values_mut().for_each(|claims|
claims.retain(|claim| !claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint)));
}
}
/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
/// Together with `update_claims_view_from_matched_txn` this used to be named

View file

@ -2273,9 +2273,15 @@ fn channel_monitor_network_test() {
nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[1], &node_txn[0]);
if nodes[1].connect_style.borrow().updates_best_block_first() {
let _ = nodes[1].tx_broadcaster.txn_broadcast();
}
mine_transaction(&nodes[0], &node_txn[0]);
check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
@ -2284,7 +2290,6 @@ fn channel_monitor_network_test() {
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// One pending HTLC is discarded by the force-close:
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
@ -3556,7 +3561,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
// connect_style.
return;
}
create_announced_chan_between_nodes(&nodes, 0, 1);
let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
@ -3565,11 +3570,12 @@ fn test_htlc_ignore_latest_remote_commitment() {
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0].txid(), node_txn[1].txid());
let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(node_txn.len(), 2);
check_spends!(node_txn[0], funding_tx);
check_spends!(node_txn[1], node_txn[0]);
let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
@ -3626,7 +3632,7 @@ fn test_force_close_fail_back() {
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let tx = {
let commitment_tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// have a use for it unless nodes[2] learns the preimage somehow, the funds will go
@ -3635,7 +3641,7 @@ fn test_force_close_fail_back() {
node_txn.remove(0)
};
mine_transaction(&nodes[1], &tx);
mine_transaction(&nodes[1], &commitment_tx);
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
@ -3647,15 +3653,16 @@ fn test_force_close_fail_back() {
get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
}
mine_transaction(&nodes[2], &tx);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
assert_eq!(node_txn[0].input.len(), 1);
assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
mine_transaction(&nodes[2], &commitment_tx);
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
let htlc_tx = node_txn.pop().unwrap();
assert_eq!(htlc_tx.input.len(), 1);
assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
check_spends!(node_txn[0], tx);
check_spends!(htlc_tx, commitment_tx);
}
#[test]
@ -8568,10 +8575,11 @@ fn test_concurrent_monitor_claim() {
watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
// Watchtower Alice should have broadcast a commitment/HTLC-timeout
let alice_state = {
{
let mut txn = alice_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
txn.remove(0)
check_spends!(txn[0], chan_1.3);
check_spends!(txn[1], txn[0]);
};
// Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
@ -8640,11 +8648,8 @@ fn test_concurrent_monitor_claim() {
check_added_monitors(&nodes[0], 1);
{
let htlc_txn = alice_broadcaster.txn_broadcast();
assert_eq!(htlc_txn.len(), 2);
assert_eq!(htlc_txn.len(), 1);
check_spends!(htlc_txn[0], bob_state_y);
// Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
// it. However, she should, because it now has an invalid parent.
check_spends!(htlc_txn[1], alice_state);
}
}
@ -8883,7 +8888,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
} else {
assert_eq!(bob_txn.len(), 2);
if nodes[1].connect_style.borrow().updates_best_block_first() {
assert_eq!(bob_txn.len(), 3);
assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
} else {
assert_eq!(bob_txn.len(), 2);
}
check_spends!(bob_txn[0], chan_ab.3);
}
}
@ -8899,15 +8909,16 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
// If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
// Bob force-closed and broadcasts the commitment transaction along with a
// HTLC-output-claiming transaction.
let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
if broadcast_alice {
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
} else {
assert_eq!(bob_txn.len(), 2);
check_spends!(bob_txn[1], txn_to_broadcast[0]);
assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
let htlc_tx = bob_txn.pop().unwrap();
check_spends!(htlc_tx, txn_to_broadcast[0]);
assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
}
}
}
@ -9383,8 +9394,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
// We should broadcast an HTLC transaction spending our funding transaction first
let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(spending_txn.len(), 2);
assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
check_spends!(spending_txn[1], node_txn[0]);
let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
&spending_txn[1]
} else {
&spending_txn[0]
};
check_spends!(htlc_tx, node_txn[0]);
// We should also generate a SpendableOutputs event with the to_self output (as its
// timelock is up).
let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@ -9394,7 +9409,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
// should immediately fail-backwards the HTLC to the previous hop, without waiting for an
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
&nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);

View file

@ -737,7 +737,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) {
commitment_tx
};
let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
if anchors && nodes[0].connect_style.borrow().updates_best_block_first() {
if nodes[0].connect_style.borrow().updates_best_block_first() {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
assert_eq!(txn[0].txid(), commitment_tx.txid());
@ -1998,6 +1998,11 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
};
mine_transaction(&nodes[0], &commitment_tx);
if nodes[0].connect_style.borrow().updates_best_block_first() {
let txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
assert_eq!(txn[0].txid(), commitment_tx.txid());
}
// Connect blocks until the HTLC's expiration is met, expecting a transaction broadcast.
connect_blocks(&nodes[0], TEST_FINAL_CLTV);
@ -2401,26 +2406,12 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
nodes[1].node.timer_tick_occurred();
check_added_monitors(&nodes[1], 2);
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager, [nodes[0].node.get_our_node_id(); 2], 1000000);
let (revoked_commitment_a, revoked_commitment_b) = {
let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
assert_eq!(txn.len(), 2);
assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
check_spends!(&txn[0], &chan_a.3);
check_spends!(&txn[1], &chan_b.3);
(txn[0].clone(), txn[1].clone())
} else {
check_spends!(&txn[1], &chan_a.3);
check_spends!(&txn[0], &chan_b.3);
(txn[1].clone(), txn[0].clone())
}
};
// Bob should now receive two events to bump his revoked commitment transaction fees.
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
let mut revoked_commitment_txs = Vec::with_capacity(events.len());
let mut anchor_txs = Vec::with_capacity(events.len());
for (idx, event) in events.into_iter().enumerate() {
let utxo_value = Amount::ONE_BTC.to_sat() * (idx + 1) as u64;
@ -2440,13 +2431,21 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
};
let txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
check_spends!(&txn[0], &chan_a.3);
} else {
check_spends!(&txn[0], &chan_b.3);
}
let (commitment_tx, anchor_tx) = (&txn[0], &txn[1]);
check_spends!(anchor_tx, coinbase_tx, commitment_tx);
revoked_commitment_txs.push(commitment_tx.clone());
anchor_txs.push(anchor_tx.clone());
};
for node in &nodes {
mine_transactions(node, &[&revoked_commitment_a, &anchor_txs[0], &revoked_commitment_b, &anchor_txs[1]]);
mine_transactions(node, &[&revoked_commitment_txs[0], &anchor_txs[0], &revoked_commitment_txs[1], &anchor_txs[1]]);
}
check_added_monitors!(&nodes[0], 2);
check_closed_broadcast(&nodes[0], 2, true);
@ -2458,7 +2457,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 4);
let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_txs[0].txid() {
(if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
} else {
(if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
@ -2466,10 +2465,10 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
assert_eq!(revoked_htlc_claim_a.output.len(), 1);
check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
check_spends!(revoked_htlc_claim_a, revoked_commitment_txs[0]);
assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
assert_eq!(revoked_htlc_claim_b.output.len(), 1);
check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
check_spends!(revoked_htlc_claim_b, revoked_commitment_txs[1]);
}
// Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
@ -2549,7 +2548,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
sig
};
htlc_tx.input[0].witness = Witness::from_slice(&[fee_utxo_sig, public_key.to_bytes()]);
check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
check_spends!(htlc_tx, coinbase_tx, revoked_commitment_txs[0], revoked_commitment_txs[1]);
htlc_tx
};
@ -2608,7 +2607,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
).unwrap();
if let SpendableOutputDescriptor::StaticPaymentOutput(_) = &outputs[0] {
check_spends!(spend_tx, &revoked_commitment_a, &revoked_commitment_b);
check_spends!(spend_tx, &revoked_commitment_txs[0], &revoked_commitment_txs[1]);
} else {
check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
}
@ -2778,7 +2777,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp
// If we update the best block to the new height before providing the confirmed transactions,
// we'll see another broadcast of the commitment transaction.
if anchors && !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
if !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
let _ = nodes[0].tx_broadcaster.txn_broadcast();
}
@ -2796,11 +2795,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp
let htlc_timeout_tx = {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
let tx = if txn[0].input[0].previous_output.txid == commitment_tx.txid() {
txn[0].clone()
} else {
txn[1].clone()
};
let tx = txn.pop().unwrap();
check_spends!(tx, commitment_tx, coinbase_tx);
tx
};

View file

@ -638,7 +638,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
let nodes_0_deserialized;
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
// Serialize the ChannelManager prior to sending payments
@ -750,14 +750,21 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
mine_transaction(&nodes[1], &as_commitment_tx);
let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_htlc_claim_txn.len(), 1);
check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
let bs_htlc_claim_txn = {
let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
assert_eq!(txn.len(), 2);
check_spends!(txn[0], funding_tx);
check_spends!(txn[1], as_commitment_tx);
txn.pop().unwrap()
};
if !confirm_before_reload {
mine_transaction(&nodes[0], &as_commitment_tx);
let txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(txn.len(), 1);
assert_eq!(txn[0].txid(), as_commitment_tx.txid());
}
mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
mine_transaction(&nodes[0], &bs_htlc_claim_txn);
expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false);
connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
let (first_htlc_timeout_tx, second_htlc_timeout_tx) = {
@ -767,7 +774,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
};
check_spends!(first_htlc_timeout_tx, as_commitment_tx);
check_spends!(second_htlc_timeout_tx, as_commitment_tx);
if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output {
confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
} else {
confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
@ -919,19 +926,23 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
// the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
// after the commitment transaction, so always connect the commitment transaction.
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
if nodes[0].connect_style.borrow().updates_best_block_first() {
let _ = nodes[0].tx_broadcaster.txn_broadcast();
}
mine_transaction(&nodes[1], &bs_commitment_tx[0]);
if !use_dust {
connect_blocks(&nodes[0], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
connect_blocks(&nodes[1], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
assert_eq!(as_htlc_timeout.len(), 1);
check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
mine_transaction(&nodes[0], &as_htlc_timeout[0]);
// nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
mine_transaction(&nodes[1], &as_htlc_timeout[0]);
}
if nodes[0].connect_style.borrow().updates_best_block_first() {
let _ = nodes[0].tx_broadcaster.txn_broadcast();
}
// Create a new channel on which to retry the payment before we fail the payment via the
// HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
@ -1049,32 +1060,36 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0].txid(), node_txn[1].txid());
check_spends!(node_txn[1], funding_tx);
check_spends!(node_txn[2], node_txn[1]);
let timeout_txn = vec![node_txn[2].clone()];
let (commitment_tx, htlc_timeout_tx) = {
let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(txn.len(), 2);
check_spends!(txn[0], funding_tx);
check_spends!(txn[1], txn[0]);
(txn.remove(0), txn.remove(0))
};
nodes[1].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
mine_transaction(&nodes[1], &commitment_tx);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(claim_txn.len(), 1);
check_spends!(claim_txn[0], node_txn[1]);
let htlc_success_tx = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], commitment_tx);
txn.pop().unwrap()
};
connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
mine_transaction(&nodes[0], &commitment_tx);
if confirm_commitment_tx {
connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
}
let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] });
if payment_timeout {
assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!

View file

@ -1065,9 +1065,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
} else {
connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_htlc_timeout_tx.len(), 1);
confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
let bs_htlc_timeout_tx = txn.pop().unwrap();
confirm_transaction(&nodes[1], &bs_htlc_timeout_tx);
}
} else {
confirm_transaction(&nodes[1], &bs_commitment_tx[0]);

View file

@ -666,6 +666,9 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor
mine_transaction(&nodes[0], &commitment_tx_b);
mine_transaction(&nodes[1], &commitment_tx_b);
if nodes[1].connect_style.borrow().updates_best_block_first() {
let _ = nodes[1].tx_broadcaster.txn_broadcast();
}
// Provide the preimage now, such that we only claim from the holder commitment (since it's
// currently confirmed) and not the counterparty's.
@ -756,3 +759,122 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
// commitment (still unrevoked) is the currently confirmed closing transaction.
assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
}
fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_counterparty_commitment: bool) {
// Tests that a node will retry broadcasting its own commitment after seeing a confirmed
// counterparty commitment be reorged out.
let mut chanmon_cfgs = create_chanmon_cfgs(2);
if revoked_counterparty_commitment {
chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
}
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let mut config = test_default_channel_config();
if anchors {
config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
config.manually_accept_inbound_channels = true;
}
let persister;
let new_chain_monitor;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
let nodes_1_deserialized;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
// Route a payment so we have an HTLC to claim as well.
let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
if revoked_counterparty_commitment {
// Trigger a fee update such that we advance the state. We will have B broadcast its state
// without the fee update.
let serialized_node = nodes[1].node.encode();
let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
*chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() += 1;
nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
reload_node!(
nodes[1], config, &serialized_node, &[&serialized_monitor], persister, new_chain_monitor, nodes_1_deserialized
);
}
// Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
if anchors {
assert_eq!(txn.len(), 1);
let commitment_tx_a = txn.pop().unwrap();
check_spends!(commitment_tx_a, funding_tx);
} else {
assert_eq!(txn.len(), 2);
let htlc_tx_a = txn.pop().unwrap();
let commitment_tx_a = txn.pop().unwrap();
check_spends!(commitment_tx_a, funding_tx);
check_spends!(htlc_tx_a, commitment_tx_a);
}
};
// B will also broadcast its own commitment.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
let commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
let tx = txn.pop().unwrap();
check_spends!(tx, funding_tx);
tx
};
// Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
mine_transaction(&nodes[0], &commitment_b);
{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
if nodes[0].connect_style.borrow().updates_best_block_first() {
// `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
// updated prior to seeing `commitment_b`.
assert_eq!(txn.len(), if anchors { 2 } else { 3 });
check_spends!(txn.last().unwrap(), commitment_b);
} else {
assert_eq!(txn.len(), 1);
check_spends!(txn[0], commitment_b);
}
}
// Disconnect the block, allowing A to retry its own commitment. Note that we connect two
// blocks, one to get us back to the original height, and another to retry our pending claims.
disconnect_blocks(&nodes[0], 1);
connect_blocks(&nodes[0], 2);
{
let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
if anchors {
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
} else {
assert_eq!(txn.len(), 2);
check_spends!(txn[0], txn[1]); // HTLC timeout A
check_spends!(txn[1], funding_tx); // Commitment A
assert_ne!(txn[1].txid(), commitment_b.txid());
}
}
}
#[test]
fn test_retries_own_commitment_broadcast_after_reorg() {
do_test_retries_own_commitment_broadcast_after_reorg(false, false);
do_test_retries_own_commitment_broadcast_after_reorg(false, true);
do_test_retries_own_commitment_broadcast_after_reorg(true, false);
do_test_retries_own_commitment_broadcast_after_reorg(true, true);
}