From f0de37ae1fb43587015959bb31961fe5a7f4d173 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 22 Aug 2024 15:25:56 +0000 Subject: [PATCH 1/5] Add a new `ConfirmationTarget::MaximumFeeEstimate` When we broke `ConfirmationTarget` out into task-specific names, we left `MaxDustHTLCExposure::FeeRateMultiplier` as using the "when we broadcast feerate" as we were mostly concerned about the dust thresholds on outbound channels where we pick the fee and drive our own funds to dust. In 51bf78d604b28fe189171e1b976fe87cbb2614b7, that changed to include transaction fees on both inbound and outbound channels in our dust exposure amount, but we continued to use `ConfirmationTarget::OnChainSweep` for the fee estimator threshold. While the `MaxDustHTLCExposure::FeeRateMultiplier` value is quite conservative and shouldn't lead to force-closures unless feerate estimates disagree by something like 500 sat/vB (with only one HTLC active in a channel), this happened on Aug 22 when feerates spiked from 4 sat/vB to over 1000 sat/vB in one block. To avoid simple feerate estimate horizons causing this in the future, here we add a new `ConfirmationTarget::MaximumFeeEstimate` which is used for dust calculations. This allows users to split out the estimates they use for checking counterparty feerates from the estimates used for actual broadcasting. --- fuzz/src/chanmon_consistency.rs | 2 +- lightning/src/chain/chaininterface.rs | 6 ++++++ lightning/src/ln/channel.rs | 2 +- lightning/src/util/config.rs | 14 +++++++------- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 84cc4bf26..7c50a5125 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -97,7 +97,7 @@ impl FeeEstimator for FuzzEstimator { // always return a HighPriority feerate here which is >= the maximum Normal feerate and a // Background feerate which is <= the minimum Normal feerate. match conf_target { - ConfirmationTarget::OnChainSweep => MAX_FEE, + ConfirmationTarget::MaximumFeeEstimate | ConfirmationTarget::OnChainSweep => MAX_FEE, ConfirmationTarget::ChannelCloseMinimum | ConfirmationTarget::AnchorChannelFee | ConfirmationTarget::MinAllowedAnchorChannelRemoteFee diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index ba9e9d1fe..0db327470 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -49,6 +49,12 @@ pub trait BroadcasterInterface { /// estimation. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum ConfirmationTarget { + /// The most aggressive (i.e. highest) feerate estimate available. + /// + /// This is used to sanity-check our counterparty's feerates and should be as conservative as + /// possible to ensure that we don't confuse a peer using a very conservative estimator for one + /// trying to burn channel balance to dust. + MaximumFeeEstimate, /// We have some funds available on chain which we need to spend prior to some expiry time at /// which point our counterparty may be able to steal them. Generally we have in the high tens /// to low hundreds of blocks to get our transaction on-chain, but we shouldn't risk too low a diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f4d55f916..96ca8bac3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2442,7 +2442,7 @@ impl ChannelContext where SP::Target: SignerProvider { fn get_dust_exposure_limiting_feerate(&self, fee_estimator: &LowerBoundedFeeEstimator, ) -> u32 where F::Target: FeeEstimator { - fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep) + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaximumFeeEstimate) } pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 { diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index f369e793e..31c975c9c 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -382,9 +382,9 @@ pub enum MaxDustHTLCExposure { /// to this maximum the channel may be unable to send/receive HTLCs between the maximum dust /// exposure and the new minimum value for HTLCs to be economically viable to claim. FixedLimitMsat(u64), - /// This sets a multiplier on the [`ConfirmationTarget::OnChainSweep`] feerate (in sats/KW) to - /// determine the maximum allowed dust exposure. If this variant is used then the maximum dust - /// exposure in millisatoshis is calculated as: + /// This sets a multiplier on the [`ConfirmationTarget::MaximumFeeEstimate`] feerate (in + /// sats/KW) to determine the maximum allowed dust exposure. If this variant is used then the + /// maximum dust exposure in millisatoshis is calculated as: /// `feerate_per_kw * value`. For example, with our default value /// `FeeRateMultiplier(10_000)`: /// @@ -407,7 +407,7 @@ pub enum MaxDustHTLCExposure { /// by default this will be set to a [`Self::FixedLimitMsat`] of 5,000,000 msat. /// /// [`FeeEstimator`]: crate::chain::chaininterface::FeeEstimator - /// [`ConfirmationTarget::OnChainSweep`]: crate::chain::chaininterface::ConfirmationTarget::OnChainSweep + /// [`ConfirmationTarget::MaximumFeeEstimate`]: crate::chain::chaininterface::ConfirmationTarget::MaximumFeeEstimate FeeRateMultiplier(u64), } @@ -514,12 +514,12 @@ pub struct ChannelConfig { /// Note that when using [`MaxDustHTLCExposure::FeeRateMultiplier`] this maximum disagreement /// will scale linearly with increases (or decreases) in the our feerate estimates. Further, /// for anchor channels we expect our counterparty to use a relatively low feerate estimate - /// while we use [`ConfirmationTarget::OnChainSweep`] (which should be relatively high) and - /// feerate disagreement force-closures should only occur when theirs is higher than ours. + /// while we use [`ConfirmationTarget::MaximumFeeEstimate`] (which should be relatively high) + /// and feerate disagreement force-closures should only occur when theirs is higher than ours. /// /// Default value: [`MaxDustHTLCExposure::FeeRateMultiplier`] with a multiplier of `10_000` /// - /// [`ConfirmationTarget::OnChainSweep`]: crate::chain::chaininterface::ConfirmationTarget::OnChainSweep + /// [`ConfirmationTarget::MaximumFeeEstimate`]: crate::chain::chaininterface::ConfirmationTarget::MaximumFeeEstimate pub max_dust_htlc_exposure: MaxDustHTLCExposure, /// The additional fee we're willing to pay to avoid waiting for the counterparty's /// `to_self_delay` to reclaim funds. From 14720190b05c81283e22b32f9d95b11f08f393e5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 22 Aug 2024 19:15:34 +0000 Subject: [PATCH 2/5] Split `ConfirmationTarget::OnChainSweep` into urgent and non-urgent When we force-close a channel, occasionally its due to feerate disagreements or other non-HTLC-related issues. In those cases, there's no reason to use a very urgent feerate estimate - we don't have any timers expiring soon. Instead, we should give users the information they need to be more economical on fees in this case, which we do here by splitting `OnChainSweep` into `UrgentOnChainSweep` and `NonUrgentOnChainSweep` `ConfirmationTarget`s. --- fuzz/src/chanmon_consistency.rs | 4 +- lightning/src/chain/chaininterface.rs | 18 ++++++--- lightning/src/chain/channelmonitor.rs | 55 +++++++++++++++++++++------ lightning/src/chain/onchaintx.rs | 35 +++++++++-------- lightning/src/chain/package.rs | 25 ++++++------ 5 files changed, 93 insertions(+), 44 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7c50a5125..b1b916250 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -97,7 +97,9 @@ impl FeeEstimator for FuzzEstimator { // always return a HighPriority feerate here which is >= the maximum Normal feerate and a // Background feerate which is <= the minimum Normal feerate. match conf_target { - ConfirmationTarget::MaximumFeeEstimate | ConfirmationTarget::OnChainSweep => MAX_FEE, + ConfirmationTarget::MaximumFeeEstimate | ConfirmationTarget::UrgentOnChainSweep => { + MAX_FEE + }, ConfirmationTarget::ChannelCloseMinimum | ConfirmationTarget::AnchorChannelFee | ConfirmationTarget::MinAllowedAnchorChannelRemoteFee diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 0db327470..84281df1d 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -56,10 +56,12 @@ pub enum ConfirmationTarget { /// trying to burn channel balance to dust. MaximumFeeEstimate, /// We have some funds available on chain which we need to spend prior to some expiry time at - /// which point our counterparty may be able to steal them. Generally we have in the high tens - /// to low hundreds of blocks to get our transaction on-chain, but we shouldn't risk too low a - /// fee - this should be a relatively high priority feerate. - OnChainSweep, + /// which point our counterparty may be able to steal them. + /// + /// Generally we have in the high tens to low hundreds of blocks to get our transaction + /// on-chain (it doesn't have to happen in the next few blocks!), but we shouldn't risk too low + /// a fee - this should be a relatively high priority feerate. + UrgentOnChainSweep, /// This is the lowest feerate we will allow our channel counterparty to have in an anchor /// channel in order to close the channel if a channel party goes away. /// @@ -130,14 +132,18 @@ pub enum ConfirmationTarget { /// /// [`ChannelManager::close_channel_with_feerate_and_script`]: crate::ln::channelmanager::ChannelManager::close_channel_with_feerate_and_script ChannelCloseMinimum, - /// The feerate [`OutputSweeper`] will use on transactions spending - /// [`SpendableOutputDescriptor`]s after a channel closure. + /// The feerate used to claim on-chain funds when there is no particular urgency to do so. + /// + /// It is used to get commitment transactions without any HTLCs confirmed in [`ChannelMonitor`] + /// and by [`OutputSweeper`] on transactions spending [`SpendableOutputDescriptor`]s after a + /// channel closure. /// /// Generally spending these outputs is safe as long as they eventually confirm, so a value /// (slightly above) the mempool minimum should suffice. However, as this value will influence /// how long funds will be unavailable after channel closure, [`FeeEstimator`] implementors /// might want to choose a higher feerate to regain control over funds faster. /// + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper /// [`SpendableOutputDescriptor`]: crate::sign::SpendableOutputDescriptor OutputSpendingFee, diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 4cfb4bf12..deb9cc868 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -40,7 +40,7 @@ use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSe use crate::ln::channelmanager::{HTLCSource, SentHTLCId}; use crate::chain; use crate::chain::{BestBlock, WatchedOutput}; -use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; +use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, ecdsa::EcdsaChannelSigner, SignerProvider, EntropySource}; use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler}; @@ -1902,8 +1902,9 @@ impl ChannelMonitor { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); let current_height = inner.best_block.height; + let conf_target = inner.closure_conf_target(); inner.onchain_tx_handler.rebroadcast_pending_claims( - current_height, FeerateStrategy::HighestOfPreviousOrNew, &broadcaster, &fee_estimator, &logger, + current_height, FeerateStrategy::HighestOfPreviousOrNew, &broadcaster, conf_target, &fee_estimator, &logger, ); } @@ -1927,8 +1928,9 @@ impl ChannelMonitor { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); let current_height = inner.best_block.height; + let conf_target = inner.closure_conf_target(); inner.onchain_tx_handler.rebroadcast_pending_claims( - current_height, FeerateStrategy::RetryPrevious, &broadcaster, &fee_estimator, &logger, + current_height, FeerateStrategy::RetryPrevious, &broadcaster, conf_target, &fee_estimator, &logger, ); } @@ -2684,6 +2686,30 @@ pub fn deliberately_bogus_accepted_htlc_witness() -> Vec> { } impl ChannelMonitorImpl { + /// Gets the [`ConfirmationTarget`] we should use when selecting feerates for channel closure + /// transactions for this channel right now. + fn closure_conf_target(&self) -> ConfirmationTarget { + // Treat the sweep as urgent as long as there is at least one HTLC which is pending on a + // valid commitment transaction. + if !self.current_holder_commitment_tx.htlc_outputs.is_empty() { + return ConfirmationTarget::UrgentOnChainSweep; + } + if self.prev_holder_signed_commitment_tx.as_ref().map(|t| !t.htlc_outputs.is_empty()).unwrap_or(false) { + return ConfirmationTarget::UrgentOnChainSweep; + } + if let Some(txid) = self.current_counterparty_commitment_txid { + if !self.counterparty_claimable_outpoints.get(&txid).unwrap().is_empty() { + return ConfirmationTarget::UrgentOnChainSweep; + } + } + if let Some(txid) = self.prev_counterparty_commitment_txid { + if !self.counterparty_claimable_outpoints.get(&txid).unwrap().is_empty() { + return ConfirmationTarget::UrgentOnChainSweep; + } + } + ConfirmationTarget::OutputSpendingFee + } + /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key). @@ -2928,7 +2954,8 @@ impl ChannelMonitorImpl { macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs); - self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger); + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); } } if let Some(txid) = self.current_counterparty_commitment_txid { @@ -2976,7 +3003,8 @@ impl ChannelMonitorImpl { // block. Even if not, its a reasonable metric for the bump criteria on the HTLC // transactions. let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height); - self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger); + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); } } } @@ -3036,9 +3064,10 @@ impl ChannelMonitorImpl { L::Target: Logger, { let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }); + let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster, - fee_estimator, logger + conf_target, fee_estimator, logger, ); } @@ -3851,7 +3880,8 @@ impl ChannelMonitorImpl { self.best_block = BestBlock::new(block_hash, height); log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height); - self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, fee_estimator, logger); + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, conf_target, fee_estimator, logger); Vec::new() } else { Vec::new() } } @@ -4116,8 +4146,9 @@ impl ChannelMonitorImpl { } } - self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, fee_estimator, logger); - self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, fee_estimator, logger); + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); // Determine new outputs to watch by comparing against previously known outputs to watch, // updating the latter in the process. @@ -4156,7 +4187,8 @@ impl ChannelMonitorImpl { self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height < height); let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); - self.onchain_tx_handler.block_disconnected(height, broadcaster, &bounded_fee_estimator, logger); + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.block_disconnected(height, broadcaster, conf_target, &bounded_fee_estimator, logger); self.best_block = BestBlock::new(header.prev_blockhash, height - 1); } @@ -4190,7 +4222,8 @@ impl ChannelMonitorImpl { debug_assert!(!self.onchain_events_awaiting_threshold_conf.iter().any(|ref entry| entry.txid == *txid)); - self.onchain_tx_handler.transaction_unconfirmed(txid, broadcaster, fee_estimator, logger); + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.transaction_unconfirmed(txid, broadcaster, conf_target, fee_estimator, logger); } /// Filters a block's `txdata` for transactions spending watched outputs or for any child diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index eff827983..00910a9d5 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -24,13 +24,13 @@ use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature}; use bitcoin::secp256k1; -use crate::chain::chaininterface::compute_feerate_sat_per_1000_weight; +use crate::chain::chaininterface::{ConfirmationTarget, compute_feerate_sat_per_1000_weight}; use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ChannelSigner, EntropySource, SignerProvider, ecdsa::EcdsaChannelSigner}; use crate::ln::msgs::DecodeError; use crate::ln::types::PaymentPreimage; use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction}; use crate::chain::ClaimId; -use crate::chain::chaininterface::{ConfirmationTarget, FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator}; +use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER}; use crate::chain::package::{PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; @@ -487,7 +487,7 @@ impl OnchainTxHandler { /// connections, like on mobile. pub(super) fn rebroadcast_pending_claims( &mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where B::Target: BroadcasterInterface, @@ -500,7 +500,7 @@ impl OnchainTxHandler { bump_requests.push((*claim_id, request.clone())); } for (claim_id, request) in bump_requests { - self.generate_claim(current_height, &request, &feerate_strategy, fee_estimator, logger) + self.generate_claim(current_height, &request, &feerate_strategy, conf_target, fee_estimator, logger) .map(|(_, new_feerate, claim)| { let mut bumped_feerate = false; if let Some(mut_request) = self.pending_claim_requests.get_mut(&claim_id) { @@ -552,7 +552,7 @@ impl OnchainTxHandler { /// events are not expected to fail, and if they do, we may lose funds. fn generate_claim( &mut self, cur_height: u32, cached_request: &PackageTemplate, feerate_strategy: &FeerateStrategy, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u32, u64, OnchainClaim)> where F::Target: FeeEstimator, { @@ -600,7 +600,7 @@ impl OnchainTxHandler { if cached_request.is_malleable() { if cached_request.requires_external_funding() { let target_feerate_sat_per_1000_weight = cached_request.compute_package_feerate( - fee_estimator, ConfirmationTarget::OnChainSweep, feerate_strategy, + fee_estimator, conf_target, feerate_strategy, ); if let Some(htlcs) = cached_request.construct_malleable_package_with_external_funding(self) { return Some(( @@ -620,7 +620,7 @@ impl OnchainTxHandler { let predicted_weight = cached_request.package_weight(&self.destination_script); if let Some((output_value, new_feerate)) = cached_request.compute_package_output( predicted_weight, self.destination_script.minimal_non_dust().to_sat(), - feerate_strategy, fee_estimator, logger, + feerate_strategy, conf_target, fee_estimator, logger, ) { assert!(new_feerate != 0); @@ -650,7 +650,6 @@ impl OnchainTxHandler { debug_assert_eq!(tx.0.compute_txid(), self.holder_commitment.trust().txid(), "Holder commitment transaction mismatch"); - let conf_target = ConfirmationTarget::OnChainSweep; let package_target_feerate_sat_per_1000_weight = cached_request .compute_package_feerate(fee_estimator, conf_target, feerate_strategy); if let Some(input_amount_sat) = output.funding_amount { @@ -728,7 +727,8 @@ impl OnchainTxHandler { /// `cur_height`, however it must never be higher than `cur_height`. pub(super) fn update_claims_view_from_requests( &mut self, requests: Vec, conf_height: u32, cur_height: u32, - broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + broadcaster: &B, conf_target: ConfirmationTarget, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -798,7 +798,7 @@ impl OnchainTxHandler { // height timer expiration (i.e in how many blocks we're going to take action). for mut req in preprocessed_requests { if let Some((new_timer, new_feerate, claim)) = self.generate_claim( - cur_height, &req, &FeerateStrategy::ForceBump, &*fee_estimator, &*logger, + cur_height, &req, &FeerateStrategy::ForceBump, conf_target, &*fee_estimator, &*logger, ) { req.set_timer(new_timer); req.set_feerate(new_feerate); @@ -863,7 +863,8 @@ impl OnchainTxHandler { /// provided via `cur_height`, however it must never be higher than `cur_height`. pub(super) fn update_claims_view_from_matched_txn( &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash, - cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -1014,7 +1015,7 @@ impl OnchainTxHandler { for (claim_id, request) in bump_candidates.iter() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( - cur_height, &request, &FeerateStrategy::ForceBump, &*fee_estimator, &*logger, + cur_height, &request, &FeerateStrategy::ForceBump, conf_target, &*fee_estimator, &*logger, ) { match bump_claim { OnchainClaim::Tx(bump_tx) => { @@ -1049,6 +1050,7 @@ impl OnchainTxHandler { &mut self, txid: &Txid, broadcaster: B, + conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where @@ -1064,11 +1066,14 @@ impl OnchainTxHandler { } if let Some(height) = height { - self.block_disconnected(height, broadcaster, fee_estimator, logger); + self.block_disconnected(height, broadcaster, conf_target, fee_estimator, logger); } } - pub(super) fn block_disconnected(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) + pub(super) fn block_disconnected( + &mut self, height: u32, broadcaster: B, conf_target: ConfirmationTarget, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -1100,7 +1105,7 @@ impl OnchainTxHandler { // `height` is the height being disconnected, so our `current_height` is 1 lower. let current_height = height - 1; if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( - current_height, &request, &FeerateStrategy::ForceBump, fee_estimator, logger + current_height, &request, &FeerateStrategy::ForceBump, conf_target, fee_estimator, logger ) { request.set_timer(new_timer); request.set_feerate(new_feerate); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 361e12fa5..3dedf2914 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -473,7 +473,7 @@ impl Readable for HolderFundingOutput { (0, funding_redeemscript, required), (1, channel_type_features, option), (2, _legacy_deserialization_prevention_marker, option), - (3, funding_amount, option) + (3, funding_amount, option), }); verify_channel_type_features(&channel_type_features, None)?; @@ -966,7 +966,7 @@ impl PackageTemplate { /// value. pub(crate) fn compute_package_output( &self, predicted_weight: u64, dust_limit_sats: u64, feerate_strategy: &FeerateStrategy, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, { @@ -977,12 +977,12 @@ impl PackageTemplate { if self.feerate_previous != 0 { if let Some((new_fee, feerate)) = feerate_bump( predicted_weight, input_amounts, self.feerate_previous, feerate_strategy, - fee_estimator, logger, + conf_target, fee_estimator, logger, ) { return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); } } else { - if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) { + if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); } } @@ -1102,19 +1102,20 @@ impl Readable for PackageTemplate { } /// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted -/// weight. We first try our [`OnChainSweep`] feerate, if it's not enough we try to sweep half of -/// the input amounts. +/// weight. We first try our estimator's feerate, if it's not enough we try to sweep half of the +/// input amounts. /// /// If the proposed fee is less than the available spent output's values, we return the proposed /// fee and the corresponding updated feerate. If fee is under [`FEERATE_FLOOR_SATS_PER_KW`], we /// return nothing. /// -/// [`OnChainSweep`]: crate::chain::chaininterface::ConfirmationTarget::OnChainSweep /// [`FEERATE_FLOOR_SATS_PER_KW`]: crate::chain::chaininterface::MIN_RELAY_FEE_SAT_PER_1000_WEIGHT -fn compute_fee_from_spent_amounts(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u64, u64)> +fn compute_fee_from_spent_amounts( + input_amounts: u64, predicted_weight: u64, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L +) -> Option<(u64, u64)> where F::Target: FeeEstimator, { - let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep); + let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(conf_target); let fee_rate = cmp::min(sweep_feerate, compute_feerate_sat_per_1000_weight(input_amounts / 2, predicted_weight)); let fee = fee_rate as u64 * (predicted_weight) / 1000; @@ -1136,13 +1137,15 @@ fn compute_fee_from_spent_amounts(input_amounts: u64, predi /// requirement. fn feerate_bump( predicted_weight: u64, input_amounts: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, { // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... - let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) { + let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = + compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) + { match feerate_strategy { FeerateStrategy::RetryPrevious => { let previous_fee = previous_feerate * predicted_weight / 1000; From 3f23e3c2884c2d752cd35acc1b479c29a39f8836 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 26 Aug 2024 21:31:27 +0000 Subject: [PATCH 3/5] Add a constructor and per-target override to `TestFeeEstimator` This will allow us to test `ConfirmationTarget`s used in functional tests by setting an override on just the target we expect to be used. --- lightning-background-processor/src/lib.rs | 7 +++---- lightning/src/chain/channelmonitor.rs | 4 ++-- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 6 +++--- lightning/src/ln/functional_tests.rs | 6 +++--- lightning/src/ln/reload_tests.rs | 3 +-- lightning/src/util/test_utils.rs | 13 +++++++++++-- 7 files changed, 24 insertions(+), 17 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 654774252..69511d5ee 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1107,7 +1107,7 @@ mod tests { use std::collections::VecDeque; use std::path::PathBuf; use std::sync::mpsc::SyncSender; - use std::sync::{Arc, Mutex}; + use std::sync::Arc; use std::time::Duration; use std::{env, fs}; @@ -1126,7 +1126,7 @@ mod tests { #[cfg(c_bindings)] type LockingWrapper = lightning::routing::scoring::MultiThreadedLockableScore; #[cfg(not(c_bindings))] - type LockingWrapper = Mutex; + type LockingWrapper = std::sync::Mutex; type ChannelManager = channelmanager::ChannelManager< Arc, @@ -1532,8 +1532,7 @@ mod tests { let mut nodes = Vec::new(); for i in 0..num_nodes { let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network)); - let fee_estimator = - Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }); + let fee_estimator = Arc::new(test_utils::TestFeeEstimator::new(253)); let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i))); let genesis_block = genesis_block(network); let network_graph = Arc::new(NetworkGraph::new(network, logger.clone())); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index deb9cc868..86f0d3de5 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5016,7 +5016,7 @@ mod tests { use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator}; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::logger::Logger; - use crate::sync::{Arc, Mutex}; + use crate::sync::Arc; use crate::io; use crate::ln::features::ChannelTypeFeatures; @@ -5116,7 +5116,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let logger = Arc::new(TestLogger::new()); let broadcaster = Arc::new(TestBroadcaster::new(Network::Testnet)); - let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) }; + let fee_estimator = TestFeeEstimator::new(253); let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8a07e18a4..ce16b6746 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14198,7 +14198,7 @@ pub mod bench { let genesis_block = bitcoin::constants::genesis_block(network); let tx_broadcaster = test_utils::TestBroadcaster::new(network); - let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; + let fee_estimator = test_utils::TestFeeEstimator::new(253); let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 37a8f9b67..8f1872422 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -659,7 +659,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { // Check that if we serialize and then deserialize all our channel monitors we get the // same set of outputs to watch for on chain as we have now. Note that if we write // tests that fully close channels and remove the monitors at some point this may break. - let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; + let feeest = test_utils::TestFeeEstimator::new(253); let mut deserialized_monitors = Vec::new(); { for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() { @@ -692,7 +692,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { entropy_source: self.keys_manager, node_signer: self.keys_manager, signer_provider: self.keys_manager, - fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, + fee_estimator: &test_utils::TestFeeEstimator::new(253), router: &test_utils::TestRouter::new(Arc::new(network_graph), &self.logger, &scorer), chain_monitor: self.chain_monitor, tx_broadcaster: &broadcaster, @@ -3176,7 +3176,7 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec { let mut chan_mon_cfgs = Vec::new(); for i in 0..node_count { let tx_broadcaster = test_utils::TestBroadcaster::new(Network::Testnet); - let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; + let fee_estimator = test_utils::TestFeeEstimator::new(253); let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", i)); let persister = test_utils::TestPersister::new(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 12f1466e0..2030bdb08 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7247,7 +7247,7 @@ fn test_user_configurable_csv_delay() { let logger = TestLogger::new(); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() - if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, &low_our_to_self_config, 0, 42, None, &logger) { @@ -7261,7 +7261,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.common_fields.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { @@ -7296,7 +7296,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.common_fields.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 21bc1a0b5..a17f8495a 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -28,7 +28,6 @@ use crate::util::config::UserConfig; use bitcoin::hash_types::BlockHash; use crate::prelude::*; -use crate::sync::Mutex; use crate::ln::functional_test_utils::*; @@ -388,7 +387,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { } logger = test_utils::TestLogger::new(); - fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; + fee_estimator = test_utils::TestFeeEstimator::new(253); persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[0].keys_manager; new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index dad004a35..13d7b138f 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -99,10 +99,19 @@ impl Writer for TestVecWriter { pub struct TestFeeEstimator { pub sat_per_kw: Mutex, + pub target_override: Mutex>, +} +impl TestFeeEstimator { + pub fn new(sat_per_kw: u32) -> Self { + Self { + sat_per_kw: Mutex::new(sat_per_kw), + target_override: Mutex::new(new_hash_map()), + } + } } impl chaininterface::FeeEstimator for TestFeeEstimator { - fn get_est_sat_per_1000_weight(&self, _confirmation_target: ConfirmationTarget) -> u32 { - *self.sat_per_kw.lock().unwrap() + fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 { + *self.target_override.lock().unwrap().get(&conf_target).unwrap_or(&*self.sat_per_kw.lock().unwrap()) } } From 80dd594099808c031b1472b7c9ef7189ad67b04a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 27 Aug 2024 16:44:58 +0000 Subject: [PATCH 4/5] Don't ignore events in `test_yield_anchors_events` Our tests should never ignore the events generated as they provide critical context about what's happening in LDK. Here we fix `test_yield_anchors_events` to avoid doing so. --- lightning/src/ln/monitor_tests.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 3fbcef344..9d99c58fc 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2505,6 +2505,20 @@ fn test_yield_anchors_events() { &LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger ); + check_closed_broadcast(&nodes[0], 1, true); + let a_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(a_events.len(), 3); + assert!(a_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); + assert!(a_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); + assert!(a_events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); + + check_closed_broadcast(&nodes[1], 1, true); + let b_events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(b_events.len(), 3); + assert!(b_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); + assert!(b_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); + assert!(b_events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); + let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(holder_events.len(), 1); let (commitment_tx, anchor_tx) = match holder_events.pop().unwrap() { @@ -2530,6 +2544,7 @@ fn test_yield_anchors_events() { }, _ => panic!("Unexpected event"), }; + check_spends!(commitment_tx, funding_tx); assert_eq!(commitment_tx.output[2].value.to_sat(), 1_000); // HTLC A -> B assert_eq!(commitment_tx.output[3].value.to_sat(), 2_000); // HTLC B -> A @@ -2587,6 +2602,7 @@ fn test_yield_anchors_events() { connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + expect_payment_failed!(nodes[0], payment_hash_1, false); connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32); @@ -2599,11 +2615,6 @@ fn test_yield_anchors_events() { } } - // Clear the remaining events as they're not relevant to what we're testing. - nodes[0].node.get_and_clear_pending_events(); - nodes[1].node.get_and_clear_pending_events(); - nodes[0].node.get_and_clear_pending_msg_events(); - nodes[1].node.get_and_clear_pending_msg_events(); } #[test] From cf97cefb472a7bb99a815a2da6749ee23de2c052 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 27 Aug 2024 16:47:57 +0000 Subject: [PATCH 5/5] Test new `ConfirmationTarget` selection based on HTLC set This updates `test_yield_anchors_events` to test both anchor channels with and without HTLCs, and relies on overriding only the singular expected `ConfirmationTarget` used, testing the new `ConfirmationTarget::UrgentOnChainSweep` use. --- lightning/src/ln/monitor_tests.rs | 88 ++++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 9d99c58fc..d158b9ba1 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -12,7 +12,7 @@ use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource}; use crate::chain::transaction::OutPoint; -use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; +use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::ln::channel; @@ -2458,8 +2458,7 @@ fn test_monitor_timer_based_claim() { do_test_monitor_rebroadcast_pending_claims(true); } -#[test] -fn test_yield_anchors_events() { +fn do_test_yield_anchors_events(have_htlcs: bool) { // Tests that two parties supporting anchor outputs can open a channel, route payments over // it, and finalize its resolution uncooperatively. Once the HTLCs are locked in, one side will // force close once the HTLCs expire. The force close should stem from an event emitted by LDK, @@ -2478,45 +2477,72 @@ fn test_yield_anchors_events() { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value( &nodes, 0, 1, 1_000_000, 500_000_000 ); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 2_000_000); + let mut payment_preimage_1 = None; + let mut payment_hash_1 = None; + let mut payment_preimage_2 = None; + let mut payment_hash_2 = None; + if have_htlcs { + let (preimage_1, hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (preimage_2, hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 2_000_000); + payment_preimage_1 = Some(preimage_1); + payment_hash_1 = Some(hash_1); + payment_preimage_2 = Some(preimage_2); + payment_hash_2 = Some(hash_2); + } assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; + // Note that if we use the wrong target, we will immediately broadcast the commitment + // transaction as no bump is required. + if have_htlcs { + nodes[0].fee_estimator.target_override.lock().unwrap().insert(ConfirmationTarget::UrgentOnChainSweep, 500); + } else { + nodes[0].fee_estimator.target_override.lock().unwrap().insert(ConfirmationTarget::OutputSpendingFee, 500); + } connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty()); connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + if !have_htlcs { + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), "".to_string()).unwrap(); + } { let txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); check_spends!(txn[0], funding_tx); } - get_monitor!(nodes[0], chan_id).provide_payment_preimage( - &payment_hash_2, &payment_preimage_2, &node_cfgs[0].tx_broadcaster, - &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger - ); - get_monitor!(nodes[1], chan_id).provide_payment_preimage( - &payment_hash_1, &payment_preimage_1, &node_cfgs[1].tx_broadcaster, - &LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger - ); + if have_htlcs { + get_monitor!(nodes[0], chan_id).provide_payment_preimage( + &payment_hash_2.unwrap(), &payment_preimage_2.unwrap(), &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger + ); + get_monitor!(nodes[1], chan_id).provide_payment_preimage( + &payment_hash_1.unwrap(), &payment_preimage_1.unwrap(), &node_cfgs[1].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger + ); + } else { + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), "".to_string()).unwrap(); + } check_closed_broadcast(&nodes[0], 1, true); let a_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(a_events.len(), 3); - assert!(a_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); - assert!(a_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); + assert_eq!(a_events.len(), if have_htlcs { 3 } else { 1 }); + if have_htlcs { + assert!(a_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); + assert!(a_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); + } assert!(a_events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); check_closed_broadcast(&nodes[1], 1, true); let b_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(b_events.len(), 3); - assert!(b_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); - assert!(b_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); + assert_eq!(b_events.len(), if have_htlcs { 3 } else { 1 }); + if have_htlcs { + assert!(b_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); + assert!(b_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); + } assert!(b_events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); @@ -2546,14 +2572,21 @@ fn test_yield_anchors_events() { }; check_spends!(commitment_tx, funding_tx); - assert_eq!(commitment_tx.output[2].value.to_sat(), 1_000); // HTLC A -> B - assert_eq!(commitment_tx.output[3].value.to_sat(), 2_000); // HTLC B -> A + if have_htlcs { + assert_eq!(commitment_tx.output[2].value.to_sat(), 1_000); // HTLC A -> B + assert_eq!(commitment_tx.output[3].value.to_sat(), 2_000); // HTLC B -> A + } mine_transactions(&nodes[0], &[&commitment_tx, &anchor_tx]); check_added_monitors!(nodes[0], 1); mine_transactions(&nodes[1], &[&commitment_tx, &anchor_tx]); check_added_monitors!(nodes[1], 1); + if !have_htlcs { + // If we don't have any HTLCs, we're done, the rest of the test is about HTLC transactions + return; + } + { let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 }); @@ -2568,8 +2601,8 @@ fn test_yield_anchors_events() { assert_eq!(htlc_timeout_tx.input[0].previous_output.vout, 2); check_spends!(htlc_timeout_tx, commitment_tx); - if let Some(commitment_tx) = txn.pop() { - check_spends!(commitment_tx, funding_tx); + if let Some(new_commitment_tx) = txn.pop() { + check_spends!(new_commitment_tx, funding_tx); } } @@ -2602,7 +2635,7 @@ fn test_yield_anchors_events() { connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - expect_payment_failed!(nodes[0], payment_hash_1, false); + expect_payment_failed!(nodes[0], payment_hash_1.unwrap(), false); connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32); @@ -2614,7 +2647,12 @@ fn test_yield_anchors_events() { _ => panic!("Unexpected event"), } } +} +#[test] +fn test_yield_anchors_events() { + do_test_yield_anchors_events(true); + do_test_yield_anchors_events(false); } #[test]