Merge pull request #2034 from wpaulino/anchor-revoked-aggregate-claim

Add test for aggregated revoked HTLC claim on anchors channel
This commit is contained in:
Matt Corallo 2023-03-21 22:32:18 +00:00 committed by GitHub
commit 9f8e832c7b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 507 additions and 67 deletions

View file

@ -179,6 +179,7 @@ jobs:
cargo check --no-default-features --features=no-std --release cargo check --no-default-features --features=no-std --release
cargo check --no-default-features --features=futures --release cargo check --no-default-features --features=futures --release
cargo doc --release cargo doc --release
RUSTDOCFLAGS="--cfg=anchors" cargo doc --release
fuzz: fuzz:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View file

@ -89,3 +89,8 @@ if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
cargo test --verbose --color always cargo test --verbose --color always
popd popd
fi fi
echo -e "\n\nTest anchors builds"
pushd lightning
RUSTFLAGS="$RUSTFLAGS --cfg=anchors" cargo test --verbose --color always -p lightning
popd

View file

@ -72,18 +72,23 @@ impl OnchainEventEntry {
} }
} }
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it /// Events for claims the [`OnchainTxHandler`] has generated. Once the events are considered safe
/// once they mature to enough confirmations (ANTI_REORG_DELAY) /// from a chain reorg, the [`OnchainTxHandler`] will act accordingly.
#[derive(PartialEq, Eq)] #[derive(PartialEq, Eq)]
enum OnchainEvent { enum OnchainEvent {
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from /// A pending request has been claimed by a transaction spending the exact same set of outpoints
/// bump-txn candidate buffer. /// as the request. This claim can either be ours or from the counterparty. Once the claiming
/// transaction has met [`ANTI_REORG_DELAY`] confirmations, we consider it final and remove the
/// pending request.
Claim { Claim {
package_id: PackageID, package_id: PackageID,
}, },
/// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx. /// The counterparty has claimed an outpoint from one of our pending requests through a
/// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking /// different transaction than ours. If our transaction was attempting to claim multiple
/// the outpoint to be sure to resurect it back to the claim tx if reorgs happen. /// outputs, we need to drop the outpoint claimed by the counterparty and regenerate a new claim
/// transaction for ourselves. We keep tracking, separately, the outpoint claimed by the
/// counterparty up to [`ANTI_REORG_DELAY`] confirmations to ensure we attempt to re-claim it
/// if the counterparty's claim is reorged from the chain.
ContentiousOutpoint { ContentiousOutpoint {
package: PackageTemplate, package: PackageTemplate,
} }
@ -215,7 +220,6 @@ type PackageID = [u8; 32];
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible. /// do RBF bumping if possible.
#[derive(PartialEq)]
pub struct OnchainTxHandler<ChannelSigner: WriteableEcdsaChannelSigner> { pub struct OnchainTxHandler<ChannelSigner: WriteableEcdsaChannelSigner> {
destination_script: Script, destination_script: Script,
holder_commitment: HolderCommitmentTransaction, holder_commitment: HolderCommitmentTransaction,
@ -244,15 +248,26 @@ pub struct OnchainTxHandler<ChannelSigner: WriteableEcdsaChannelSigner> {
pub(crate) pending_claim_requests: HashMap<PackageID, PackageTemplate>, pub(crate) pending_claim_requests: HashMap<PackageID, PackageTemplate>,
#[cfg(not(test))] #[cfg(not(test))]
pending_claim_requests: HashMap<PackageID, PackageTemplate>, pending_claim_requests: HashMap<PackageID, PackageTemplate>,
#[cfg(anchors)]
pending_claim_events: HashMap<PackageID, ClaimEvent>,
// Used to link outpoints claimed in a connected block to a pending claim request. // Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec`
// Key is outpoint than monitor parsing has detected we have keys/scripts to claim // essentially acts as an insertion-ordered `HashMap` there should only ever be one occurrence
// Value is (pending claim request identifier, confirmation_block), identifier // of a `PackageID`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and
// is txid of the initial claiming transaction and is immutable until outpoint is // a new block has been connected, resulting in a new claim, the previous will be replaced with
// post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if // the new.
// block with output gets disconnected. //
// These external events may be generated in the following cases:
// - A channel has been force closed by broadcasting the holder's latest commitment transaction
// - A block being connected/disconnected
// - Learning the preimage for an HTLC we can claim onchain
#[cfg(anchors)]
pending_claim_events: Vec<(PackageID, ClaimEvent)>,
// Used to link outpoints claimed in a connected block to a pending claim request. The keys
// represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to
// claim. The values track the pending claim request identifier and the initial confirmation
// block height, and are immutable until the outpoint has enough confirmations to meet our
// [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if
// the block gets disconnected.
#[cfg(test)] // Used in functional_test to verify sanitization #[cfg(test)] // Used in functional_test to verify sanitization
pub claimable_outpoints: HashMap<BitcoinOutPoint, (PackageID, u32)>, pub claimable_outpoints: HashMap<BitcoinOutPoint, (PackageID, u32)>,
#[cfg(not(test))] #[cfg(not(test))]
@ -265,6 +280,22 @@ pub struct OnchainTxHandler<ChannelSigner: WriteableEcdsaChannelSigner> {
pub(super) secp_ctx: Secp256k1<secp256k1::All>, pub(super) secp_ctx: Secp256k1<secp256k1::All>,
} }
impl<ChannelSigner: WriteableEcdsaChannelSigner> PartialEq for OnchainTxHandler<ChannelSigner> {
fn eq(&self, other: &Self) -> bool {
// `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose.
self.destination_script == other.destination_script &&
self.holder_commitment == other.holder_commitment &&
self.holder_htlc_sigs == other.holder_htlc_sigs &&
self.prev_holder_commitment == other.prev_holder_commitment &&
self.prev_holder_htlc_sigs == other.prev_holder_htlc_sigs &&
self.channel_transaction_parameters == other.channel_transaction_parameters &&
self.pending_claim_requests == other.pending_claim_requests &&
self.claimable_outpoints == other.claimable_outpoints &&
self.locktimed_packages == other.locktimed_packages &&
self.onchain_events_awaiting_threshold_conf == other.onchain_events_awaiting_threshold_conf
}
}
const SERIALIZATION_VERSION: u8 = 1; const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1;
@ -406,7 +437,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
pending_claim_requests, pending_claim_requests,
onchain_events_awaiting_threshold_conf, onchain_events_awaiting_threshold_conf,
#[cfg(anchors)] #[cfg(anchors)]
pending_claim_events: HashMap::new(), pending_claim_events: Vec::new(),
secp_ctx, secp_ctx,
}) })
} }
@ -427,8 +458,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
locktimed_packages: BTreeMap::new(), locktimed_packages: BTreeMap::new(),
onchain_events_awaiting_threshold_conf: Vec::new(), onchain_events_awaiting_threshold_conf: Vec::new(),
#[cfg(anchors)] #[cfg(anchors)]
pending_claim_events: HashMap::new(), pending_claim_events: Vec::new(),
secp_ctx, secp_ctx,
} }
} }
@ -443,9 +473,9 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
#[cfg(anchors)] #[cfg(anchors)]
pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> { pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> {
let mut ret = HashMap::new(); let mut events = Vec::new();
swap(&mut ret, &mut self.pending_claim_events); swap(&mut events, &mut self.pending_claim_events);
ret.into_iter().map(|(_, event)| event).collect::<Vec<_>>() events.into_iter().map(|(_, event)| event).collect()
} }
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
@ -474,12 +504,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
// transaction is reorged out. // transaction is reorged out.
let mut all_inputs_have_confirmed_spend = true; let mut all_inputs_have_confirmed_spend = true;
for outpoint in request_outpoints.iter() { for outpoint in request_outpoints.iter() {
if let Some(first_claim_txid_height) = self.claimable_outpoints.get(*outpoint) { if let Some((request_package_id, _)) = self.claimable_outpoints.get(*outpoint) {
// We check for outpoint spends within claims individually rather than as a set // We check for outpoint spends within claims individually rather than as a set
// since requests can have outpoints split off. // since requests can have outpoints split off.
if !self.onchain_events_awaiting_threshold_conf.iter() if !self.onchain_events_awaiting_threshold_conf.iter()
.any(|event_entry| if let OnchainEvent::Claim { package_id } = event_entry.event { .any(|event_entry| if let OnchainEvent::Claim { package_id } = event_entry.event {
first_claim_txid_height.0 == package_id *request_package_id == package_id
} else { } else {
// The onchain event is not a claim, keep seeking until we find one. // The onchain event is not a claim, keep seeking until we find one.
false false
@ -689,7 +719,8 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
package_id package_id
}, },
}; };
self.pending_claim_events.insert(package_id, claim_event); debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == package_id).count(), 0);
self.pending_claim_events.push((package_id, claim_event));
package_id package_id
}, },
}; };
@ -724,9 +755,9 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
// Scan all input to verify is one of the outpoint spent is of interest for us // Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new(); let mut claimed_outputs_material = Vec::new();
for inp in &tx.input { for inp in &tx.input {
if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) { if let Some((package_id, _)) = self.claimable_outpoints.get(&inp.previous_output) {
// If outpoint has claim request pending on it... // If outpoint has claim request pending on it...
if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) { if let Some(request) = self.pending_claim_requests.get_mut(package_id) {
//... we need to verify equality between transaction outpoints and claim request //... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued // outpoints to know if transaction is the original claim or a bumped one issued
// by us. // by us.
@ -746,7 +777,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
txid: tx.txid(), txid: tx.txid(),
height: conf_height, height: conf_height,
block_hash: Some(conf_hash), block_hash: Some(conf_hash),
event: OnchainEvent::Claim { package_id: first_claim_txid_height.0 } event: OnchainEvent::Claim { package_id: *package_id }
}; };
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) { if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
self.onchain_events_awaiting_threshold_conf.push(entry); self.onchain_events_awaiting_threshold_conf.push(entry);
@ -773,7 +804,21 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
} }
//TODO: recompute soonest_timelock to avoid wasting a bit on fees //TODO: recompute soonest_timelock to avoid wasting a bit on fees
if at_least_one_drop { if at_least_one_drop {
bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone()); bump_candidates.insert(*package_id, request.clone());
// If we have any pending claim events for the request being updated
// that have yet to be consumed, we'll remove them since they will
// end up producing an invalid transaction by double spending
// input(s) that already have a confirmed spend. If such spend is
// reorged out of the chain, then we'll attempt to re-spend the
// inputs once we see it.
#[cfg(anchors)] {
#[cfg(debug_assertions)] {
let existing = self.pending_claim_events.iter()
.filter(|entry| entry.0 == *package_id).count();
assert!(existing == 0 || existing == 1);
}
self.pending_claim_events.retain(|entry| entry.0 != *package_id);
}
} }
} }
break; //No need to iterate further, either tx is our or their break; //No need to iterate further, either tx is our or their
@ -809,8 +854,14 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.", log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.",
outpoint, log_bytes!(package_id)); outpoint, log_bytes!(package_id));
self.claimable_outpoints.remove(outpoint); self.claimable_outpoints.remove(outpoint);
#[cfg(anchors)] }
self.pending_claim_events.remove(&package_id); #[cfg(anchors)] {
#[cfg(debug_assertions)] {
let num_existing = self.pending_claim_events.iter()
.filter(|entry| entry.0 == package_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
self.pending_claim_events.retain(|(id, _)| *id != package_id);
} }
} }
}, },
@ -826,17 +877,17 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
} }
// Check if any pending claim request must be rescheduled // Check if any pending claim request must be rescheduled
for (first_claim_txid, ref request) in self.pending_claim_requests.iter() { for (package_id, request) in self.pending_claim_requests.iter() {
if let Some(h) = request.timer() { if let Some(h) = request.timer() {
if cur_height >= h { if cur_height >= h {
bump_candidates.insert(*first_claim_txid, (*request).clone()); bump_candidates.insert(*package_id, request.clone());
} }
} }
} }
// Build, bump and rebroadcast tx accordingly // Build, bump and rebroadcast tx accordingly
log_trace!(logger, "Bumping {} candidates", bump_candidates.len()); log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
for (first_claim_txid, request) in bump_candidates.iter() { for (package_id, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) {
match bump_claim { match bump_claim {
OnchainClaim::Tx(bump_tx) => { OnchainClaim::Tx(bump_tx) => {
@ -846,10 +897,16 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
#[cfg(anchors)] #[cfg(anchors)]
OnchainClaim::Event(claim_event) => { OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints()); log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
self.pending_claim_events.insert(*first_claim_txid, claim_event); #[cfg(debug_assertions)] {
let num_existing = self.pending_claim_events.iter().
filter(|entry| entry.0 == *package_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
self.pending_claim_events.retain(|event| event.0 != *package_id);
self.pending_claim_events.push((*package_id, claim_event));
}, },
} }
if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) { if let Some(request) = self.pending_claim_requests.get_mut(package_id) {
request.set_timer(new_timer); request.set_timer(new_timer);
request.set_feerate(new_feerate); request.set_feerate(new_feerate);
} }
@ -895,12 +952,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
//- resurect outpoint back in its claimable set and regenerate tx //- resurect outpoint back in its claimable set and regenerate tx
match entry.event { match entry.event {
OnchainEvent::ContentiousOutpoint { package } => { OnchainEvent::ContentiousOutpoint { package } => {
if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) {
if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) {
request.merge_package(package); request.merge_package(package);
// Using a HashMap guarantee us than if we have multiple outpoints getting // Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast // resurrected only one bump claim tx is going to be broadcast
bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone()); bump_candidates.insert(pending_claim.clone(), request.clone());
} }
} }
}, },
@ -910,7 +967,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
self.onchain_events_awaiting_threshold_conf.push(entry); self.onchain_events_awaiting_threshold_conf.push(entry);
} }
} }
for (_first_claim_txid_height, request) in bump_candidates.iter_mut() { for ((_package_id, _), ref mut request) in bump_candidates.iter_mut() {
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) {
request.set_timer(new_timer); request.set_timer(new_timer);
request.set_feerate(new_feerate); request.set_feerate(new_feerate);
@ -922,7 +979,13 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
#[cfg(anchors)] #[cfg(anchors)]
OnchainClaim::Event(claim_event) => { OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints()); log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event); #[cfg(debug_assertions)] {
let num_existing = self.pending_claim_events.iter()
.filter(|entry| entry.0 == *_package_id).count();
assert!(num_existing == 0 || num_existing == 1);
}
self.pending_claim_events.retain(|event| event.0 != *_package_id);
self.pending_claim_events.push((*_package_id, claim_event));
}, },
} }
} }

View file

@ -811,7 +811,7 @@ pub fn build_anchor_input_witness(funding_key: &PublicKey, funding_sig: &Signatu
/// ///
/// Normally, this is converted to the broadcaster/countersignatory-organized DirectedChannelTransactionParameters /// Normally, this is converted to the broadcaster/countersignatory-organized DirectedChannelTransactionParameters
/// before use, via the as_holder_broadcastable and as_counterparty_broadcastable functions. /// before use, via the as_holder_broadcastable and as_counterparty_broadcastable functions.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct ChannelTransactionParameters { pub struct ChannelTransactionParameters {
/// Holder public keys /// Holder public keys
pub holder_pubkeys: ChannelPublicKeys, pub holder_pubkeys: ChannelPublicKeys,
@ -835,7 +835,7 @@ pub struct ChannelTransactionParameters {
} }
/// Late-bound per-channel counterparty data used to build transactions. /// Late-bound per-channel counterparty data used to build transactions.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct CounterpartyChannelTransactionParameters { pub struct CounterpartyChannelTransactionParameters {
/// Counter-party public keys /// Counter-party public keys
pub pubkeys: ChannelPublicKeys, pub pubkeys: ChannelPublicKeys,

View file

@ -1239,24 +1239,23 @@ macro_rules! check_warn_msg {
/// Check that a channel's closing channel update has been broadcasted, and optionally /// Check that a channel's closing channel update has been broadcasted, and optionally
/// check whether an error message event has occurred. /// check whether an error message event has occurred.
pub fn check_closed_broadcast(node: &Node, with_error_msg: bool) -> Option<msgs::ErrorMessage> { pub fn check_closed_broadcast(node: &Node, num_channels: usize, with_error_msg: bool) -> Vec<msgs::ErrorMessage> {
let msg_events = node.node.get_and_clear_pending_msg_events(); let msg_events = node.node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), if with_error_msg { 2 } else { 1 }); assert_eq!(msg_events.len(), if with_error_msg { num_channels * 2 } else { num_channels });
match msg_events[0] { msg_events.into_iter().filter_map(|msg_event| {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => { match msg_event {
assert_eq!(msg.contents.flags & 2, 2); MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
}, assert_eq!(msg.contents.flags & 2, 2);
_ => panic!("Unexpected event"), None
} },
if with_error_msg {
match msg_events[1] {
MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
assert!(with_error_msg);
// TODO: Check node_id // TODO: Check node_id
Some(msg.clone()) Some(msg.clone())
}, },
_ => panic!("Unexpected event"), _ => panic!("Unexpected event"),
} }
} else { None } }).collect()
} }
/// Check that a channel's closing channel update has been broadcasted, and optionally /// Check that a channel's closing channel update has been broadcasted, and optionally
@ -1266,7 +1265,7 @@ pub fn check_closed_broadcast(node: &Node, with_error_msg: bool) -> Option<msgs:
#[macro_export] #[macro_export]
macro_rules! check_closed_broadcast { macro_rules! check_closed_broadcast {
($node: expr, $with_error_msg: expr) => { ($node: expr, $with_error_msg: expr) => {
$crate::ln::functional_test_utils::check_closed_broadcast(&$node, $with_error_msg) $crate::ln::functional_test_utils::check_closed_broadcast(&$node, 1, $with_error_msg).pop()
} }
} }

View file

@ -10,7 +10,7 @@
//! Further functional tests which test blockchain reorganizations. //! Further functional tests which test blockchain reorganizations.
#[cfg(anchors)] #[cfg(anchors)]
use crate::chain::keysinterface::BaseSign; use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner};
#[cfg(anchors)] #[cfg(anchors)]
use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance};
@ -19,20 +19,34 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::ln::channel; use crate::ln::channel;
#[cfg(anchors)] #[cfg(anchors)]
use crate::ln::chan_utils; use crate::ln::chan_utils;
#[cfg(anchors)]
use crate::ln::channelmanager::ChannelManager;
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId};
use crate::ln::msgs::ChannelMessageHandler; use crate::ln::msgs::ChannelMessageHandler;
#[cfg(anchors)] #[cfg(anchors)]
use crate::util::config::UserConfig; use crate::util::config::UserConfig;
#[cfg(anchors)] #[cfg(anchors)]
use crate::util::crypto::sign;
#[cfg(anchors)]
use crate::util::events::BumpTransactionEvent; use crate::util::events::BumpTransactionEvent;
use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
#[cfg(anchors)]
use crate::util::ser::Writeable;
#[cfg(anchors)]
use crate::util::test_utils;
#[cfg(anchors)]
use bitcoin::blockdata::transaction::EcdsaSighashType;
use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes; use bitcoin::blockdata::opcodes;
use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::Secp256k1;
#[cfg(anchors)] #[cfg(anchors)]
use bitcoin::{Amount, Script, TxIn, TxOut, PackedLockTime}; use bitcoin::secp256k1::SecretKey;
#[cfg(anchors)]
use bitcoin::{Amount, PublicKey, Script, TxIn, TxOut, PackedLockTime, Witness};
use bitcoin::Transaction; use bitcoin::Transaction;
#[cfg(anchors)]
use bitcoin::util::sighash::SighashCache;
use crate::prelude::*; use crate::prelude::*;
@ -1748,7 +1762,7 @@ fn test_yield_anchors_events() {
let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
// Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the // Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the
// best block is being updated prior to the confirmed transactions. // best block is updated before the confirmed transactions are notified.
match *nodes[0].connect_style.borrow() { match *nodes[0].connect_style.borrow() {
ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => { ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => {
assert_eq!(holder_events.len(), 3); assert_eq!(holder_events.len(), 3);
@ -1815,3 +1829,358 @@ fn test_yield_anchors_events() {
// Clear the remaining events as they're not relevant to what we're testing. // Clear the remaining events as they're not relevant to what we're testing.
nodes[0].node.get_and_clear_pending_events(); nodes[0].node.get_and_clear_pending_events();
} }
#[cfg(anchors)]
#[test]
fn test_anchors_aggregated_revoked_htlc_tx() {
// Test that `ChannelMonitor`s can properly detect and claim funds from a counterparty claiming
// multiple HTLCs from multiple channels in a single transaction via the success path from a
// revoked commitment.
let secp = Secp256k1::new();
let mut chanmon_cfgs = create_chanmon_cfgs(2);
// Required to sign a revoked commitment transaction
chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let mut anchors_config = UserConfig::default();
anchors_config.channel_handshake_config.announced_channel = true;
anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]);
let bob_persister: test_utils::TestPersister;
let bob_chain_monitor: test_utils::TestChainMonitor;
let bob_deserialized: ChannelManager<
&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface,
&test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator,
&test_utils::TestRouter, &test_utils::TestLogger,
>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
// Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
// point such that he broadcasts a revoked commitment transaction.
let bob_serialized = nodes[1].node.encode();
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
// Bob claims all the HTLCs...
claim_payment(&nodes[0], &[&nodes[1]], payment_a.0);
claim_payment(&nodes[0], &[&nodes[1]], payment_b.0);
claim_payment(&nodes[0], &[&nodes[1]], payment_c.0);
claim_payment(&nodes[0], &[&nodes[1]], payment_d.0);
// ...and sends one back through each channel such that he has a motive to broadcast his
// revoked state.
send_payment(&nodes[1], &[&nodes[0]], 30_000_000);
send_payment(&nodes[1], &[&nodes[0]], 30_000_000);
// Restart Bob with the revoked state and provide the HTLC preimages he claimed.
reload_node!(
nodes[1], anchors_config, bob_serialized, &[&bob_serialized_monitor_a, &bob_serialized_monitor_b],
bob_persister, bob_chain_monitor, bob_deserialized
);
for chan_id in [chan_a.2, chan_b.2].iter() {
let monitor = get_monitor!(nodes[1], chan_id);
for payment in [payment_a, payment_b, payment_c, payment_d].iter() {
monitor.provide_payment_preimage(
&payment.1, &payment.0, &node_cfgs[1].tx_broadcaster,
&LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger
);
}
}
// Bob force closes by broadcasting his revoked state for each channel.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
let revoked_commitment_a = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 1);
let revoked_commitment = txn.pop().unwrap();
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
check_spends!(revoked_commitment, chan_a.3);
revoked_commitment
};
nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
let revoked_commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 1);
let revoked_commitment = txn.pop().unwrap();
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
check_spends!(revoked_commitment, chan_b.3);
revoked_commitment
};
// Bob should now receive two events to bump his revoked commitment transaction fees.
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
let anchor_tx = {
let secret_key = SecretKey::from_slice(&[1; 32]).unwrap();
let public_key = PublicKey::new(secret_key.public_key(&secp));
let fee_utxo_script = Script::new_v0_p2wpkh(&public_key.wpubkey_hash().unwrap());
let coinbase_tx = Transaction {
version: 2,
lock_time: PackedLockTime::ZERO,
input: vec![TxIn { ..Default::default() }],
output: vec![TxOut { // UTXO to attach fees to `anchor_tx`
value: Amount::ONE_BTC.to_sat(),
script_pubkey: fee_utxo_script.clone(),
}],
};
let mut anchor_tx = Transaction {
version: 2,
lock_time: PackedLockTime::ZERO,
input: vec![
TxIn { // Fee input
previous_output: bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 },
..Default::default()
},
],
output: vec![TxOut { // Fee input change
value: coinbase_tx.output[0].value / 2 ,
script_pubkey: Script::new_op_return(&[]),
}],
};
let mut signers = Vec::with_capacity(2);
for event in events {
match event {
Event::BumpTransaction(BumpTransactionEvent::ChannelClose { anchor_descriptor, .. }) => {
anchor_tx.input.push(TxIn {
previous_output: anchor_descriptor.outpoint,
..Default::default()
});
let signer = nodes[1].keys_manager.derive_channel_keys(
anchor_descriptor.channel_value_satoshis, &anchor_descriptor.channel_keys_id,
);
signers.push(signer);
},
_ => panic!("Unexpected event"),
}
}
for (i, signer) in signers.into_iter().enumerate() {
let anchor_idx = i + 1;
let funding_sig = signer.sign_holder_anchor_input(&mut anchor_tx, anchor_idx, &secp).unwrap();
anchor_tx.input[anchor_idx].witness = chan_utils::build_anchor_input_witness(
&signer.pubkeys().funding_pubkey, &funding_sig
);
}
let fee_utxo_sig = {
let witness_script = Script::new_p2pkh(&public_key.pubkey_hash());
let sighash = hash_to_message!(&SighashCache::new(&anchor_tx).segwit_signature_hash(
0, &witness_script, coinbase_tx.output[0].value, EcdsaSighashType::All
).unwrap()[..]);
let sig = sign(&secp, &sighash, &secret_key);
let mut sig = sig.serialize_der().to_vec();
sig.push(EcdsaSighashType::All as u8);
sig
};
anchor_tx.input[0].witness = Witness::from_vec(vec![fee_utxo_sig, public_key.to_bytes()]);
check_spends!(anchor_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
anchor_tx
};
for node in &nodes {
mine_transactions(node, &[&revoked_commitment_a, &revoked_commitment_b, &anchor_tx]);
}
check_added_monitors!(&nodes[0], 2);
check_closed_broadcast(&nodes[0], 2, true);
check_closed_event!(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed);
// Alice should detect the confirmed revoked commitments, and attempt to claim all of the
// revoked outputs.
{
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 2);
let (revoked_claim_a, revoked_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
(&txn[0], &txn[1])
} else {
(&txn[1], &txn[0])
};
// TODO: to_self claim must be separate from HTLC claims
assert_eq!(revoked_claim_a.input.len(), 3); // Spends both HTLC outputs and to_self output
assert_eq!(revoked_claim_a.output.len(), 1);
check_spends!(revoked_claim_a, revoked_commitment_a);
assert_eq!(revoked_claim_b.input.len(), 3); // Spends both HTLC outputs and to_self output
assert_eq!(revoked_claim_b.output.len(), 1);
check_spends!(revoked_claim_b, revoked_commitment_b);
}
// Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
// through the success path.
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
// Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the
// best block is updated before the confirmed transactions are notified.
match *nodes[1].connect_style.borrow() {
ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => {
assert_eq!(events.len(), 4);
if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = events.remove(0) {}
else { panic!("unexpected event"); }
if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = events.remove(1) {}
else { panic!("unexpected event"); }
},
_ => assert_eq!(events.len(), 2),
};
let htlc_tx = {
let secret_key = SecretKey::from_slice(&[1; 32]).unwrap();
let public_key = PublicKey::new(secret_key.public_key(&secp));
let fee_utxo_script = Script::new_v0_p2wpkh(&public_key.wpubkey_hash().unwrap());
let coinbase_tx = Transaction {
version: 2,
lock_time: PackedLockTime::ZERO,
input: vec![TxIn { ..Default::default() }],
output: vec![TxOut { // UTXO to attach fees to `htlc_tx`
value: Amount::ONE_BTC.to_sat(),
script_pubkey: fee_utxo_script.clone(),
}],
};
let mut htlc_tx = Transaction {
version: 2,
lock_time: PackedLockTime::ZERO,
input: vec![TxIn { // Fee input
previous_output: bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 },
..Default::default()
}],
output: vec![TxOut { // Fee input change
value: coinbase_tx.output[0].value / 2 ,
script_pubkey: Script::new_op_return(&[]),
}],
};
let mut descriptors = Vec::with_capacity(4);
for event in events {
if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { mut htlc_descriptors, .. }) = event {
assert_eq!(htlc_descriptors.len(), 2);
for htlc_descriptor in &htlc_descriptors {
assert!(!htlc_descriptor.htlc.offered);
let signer = nodes[1].keys_manager.derive_channel_keys(
htlc_descriptor.channel_value_satoshis, &htlc_descriptor.channel_keys_id
);
let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp);
htlc_tx.input.push(htlc_descriptor.unsigned_tx_input());
htlc_tx.output.push(htlc_descriptor.tx_output(&per_commitment_point, &secp));
}
descriptors.append(&mut htlc_descriptors);
} else {
panic!("Unexpected event");
}
}
for (idx, htlc_descriptor) in descriptors.into_iter().enumerate() {
let htlc_input_idx = idx + 1;
let signer = nodes[1].keys_manager.derive_channel_keys(
htlc_descriptor.channel_value_satoshis, &htlc_descriptor.channel_keys_id
);
let our_sig = signer.sign_holder_htlc_transaction(&htlc_tx, htlc_input_idx, &htlc_descriptor, &secp).unwrap();
let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp);
let witness_script = htlc_descriptor.witness_script(&per_commitment_point, &secp);
htlc_tx.input[htlc_input_idx].witness = htlc_descriptor.tx_input_witness(&our_sig, &witness_script);
}
let fee_utxo_sig = {
let witness_script = Script::new_p2pkh(&public_key.pubkey_hash());
let sighash = hash_to_message!(&SighashCache::new(&htlc_tx).segwit_signature_hash(
0, &witness_script, coinbase_tx.output[0].value, EcdsaSighashType::All
).unwrap()[..]);
let sig = sign(&secp, &sighash, &secret_key);
let mut sig = sig.serialize_der().to_vec();
sig.push(EcdsaSighashType::All as u8);
sig
};
htlc_tx.input[0].witness = Witness::from_vec(vec![fee_utxo_sig, public_key.to_bytes()]);
check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
htlc_tx
};
for node in &nodes {
mine_transaction(node, &htlc_tx);
}
// Alice should see that Bob is trying to claim to HTLCs, so she should now try to claim them at
// the second level instead.
let revoked_claims = {
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 4);
let revoked_to_self_claim_a = txn.iter().find(|tx|
tx.input.len() == 1 &&
tx.output.len() == 1 &&
tx.input[0].previous_output.txid == revoked_commitment_a.txid()
).unwrap();
check_spends!(revoked_to_self_claim_a, revoked_commitment_a);
let revoked_to_self_claim_b = txn.iter().find(|tx|
tx.input.len() == 1 &&
tx.output.len() == 1 &&
tx.input[0].previous_output.txid == revoked_commitment_b.txid()
).unwrap();
check_spends!(revoked_to_self_claim_b, revoked_commitment_b);
let revoked_htlc_claims = txn.iter().filter(|tx|
tx.input.len() == 2 &&
tx.output.len() == 1 &&
tx.input[0].previous_output.txid == htlc_tx.txid()
).collect::<Vec<_>>();
assert_eq!(revoked_htlc_claims.len(), 2);
for revoked_htlc_claim in revoked_htlc_claims {
check_spends!(revoked_htlc_claim, htlc_tx);
}
txn
};
for node in &nodes {
mine_transactions(node, &revoked_claims.iter().collect::<Vec<_>>());
}
// Connect one block to make sure the HTLC events are not yielded while ANTI_REORG_DELAY has not
// been reached.
connect_blocks(&nodes[0], 1);
connect_blocks(&nodes[1], 1);
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
// Connect the remaining blocks to reach ANTI_REORG_DELAY.
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let spendable_output_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
assert_eq!(spendable_output_events.len(), 4);
for (idx, event) in spendable_output_events.iter().enumerate() {
if let Event::SpendableOutputs { outputs } = event {
assert_eq!(outputs.len(), 1);
let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(
&[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, &Secp256k1::new(),
).unwrap();
check_spends!(spend_tx, revoked_claims[idx]);
} else {
panic!("unexpected event");
}
}
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty());
// TODO: From Bob's PoV, he still thinks he can claim the outputs from his revoked commitment.
// This needs to be fixed before we enable pruning `ChannelMonitor`s once they don't have any
// balances to claim.
//
// The 6 claimable balances correspond to his `to_self` outputs and the 2 HTLC outputs in each
// revoked commitment which Bob has the preimage for.
assert_eq!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 6);
}

View file

@ -256,7 +256,7 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCDestination,
#[cfg(anchors)] #[cfg(anchors)]
/// A descriptor used to sign for a commitment transaction's anchor output. /// A descriptor used to sign for a commitment transaction's anchor output.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct AnchorDescriptor { pub struct AnchorDescriptor {
/// A unique identifier used along with `channel_value_satoshis` to re-derive the /// A unique identifier used along with `channel_value_satoshis` to re-derive the
/// [`InMemorySigner`] required to sign `input`. /// [`InMemorySigner`] required to sign `input`.
@ -276,7 +276,7 @@ pub struct AnchorDescriptor {
#[cfg(anchors)] #[cfg(anchors)]
/// A descriptor used to sign for a commitment transaction's HTLC output. /// A descriptor used to sign for a commitment transaction's HTLC output.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct HTLCDescriptor { pub struct HTLCDescriptor {
/// A unique identifier used along with `channel_value_satoshis` to re-derive the /// A unique identifier used along with `channel_value_satoshis` to re-derive the
/// [`InMemorySigner`] required to sign `input`. /// [`InMemorySigner`] required to sign `input`.
@ -290,10 +290,10 @@ pub struct HTLCDescriptor {
/// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
pub channel_value_satoshis: u64, pub channel_value_satoshis: u64,
/// The necessary channel parameters that need to be provided to the re-derived /// The necessary channel parameters that need to be provided to the re-derived
/// [`InMemorySigner`] through [`BaseSign::provide_channel_parameters`]. /// [`InMemorySigner`] through [`ChannelSigner::provide_channel_parameters`].
/// ///
/// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
/// [`BaseSign::provide_channel_parameters`]: crate::chain::keysinterface::BaseSign::provide_channel_parameters /// [`ChannelSigner::provide_channel_parameters`]: crate::chain::keysinterface::ChannelSigner::provide_channel_parameters
pub channel_parameters: ChannelTransactionParameters, pub channel_parameters: ChannelTransactionParameters,
/// The txid of the commitment transaction in which the HTLC output lives. /// The txid of the commitment transaction in which the HTLC output lives.
pub commitment_txid: Txid, pub commitment_txid: Txid,
@ -369,7 +369,7 @@ impl HTLCDescriptor {
#[cfg(anchors)] #[cfg(anchors)]
/// Represents the different types of transactions, originating from LDK, to be bumped. /// Represents the different types of transactions, originating from LDK, to be bumped.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq, Eq)]
pub enum BumpTransactionEvent { pub enum BumpTransactionEvent {
/// Indicates that a channel featuring anchor outputs is to be closed by broadcasting the local /// Indicates that a channel featuring anchor outputs is to be closed by broadcasting the local
/// commitment transaction. Since commitment transactions have a static feerate pre-agreed upon, /// commitment transaction. Since commitment transactions have a static feerate pre-agreed upon,
@ -387,7 +387,7 @@ pub enum BumpTransactionEvent {
/// child anchor transaction. To sign its anchor input, an [`InMemorySigner`] should be /// child anchor transaction. To sign its anchor input, an [`InMemorySigner`] should be
/// re-derived through [`KeysManager::derive_channel_keys`] with the help of /// re-derived through [`KeysManager::derive_channel_keys`] with the help of
/// [`AnchorDescriptor::channel_keys_id`] and [`AnchorDescriptor::channel_value_satoshis`]. The /// [`AnchorDescriptor::channel_keys_id`] and [`AnchorDescriptor::channel_value_satoshis`]. The
/// anchor input signature can be computed with [`BaseSign::sign_holder_anchor_input`], /// anchor input signature can be computed with [`EcdsaChannelSigner::sign_holder_anchor_input`],
/// which can then be provided to [`build_anchor_input_witness`] along with the `funding_pubkey` /// which can then be provided to [`build_anchor_input_witness`] along with the `funding_pubkey`
/// to obtain the full witness required to spend. /// to obtain the full witness required to spend.
/// ///
@ -410,7 +410,7 @@ pub enum BumpTransactionEvent {
/// ///
/// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
/// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
/// [`BaseSign::sign_holder_anchor_input`]: crate::chain::keysinterface::BaseSign::sign_holder_anchor_input /// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_anchor_input
/// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness /// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness
ChannelClose { ChannelClose {
/// The target feerate that the transaction package, which consists of the commitment /// The target feerate that the transaction package, which consists of the commitment
@ -444,7 +444,7 @@ pub enum BumpTransactionEvent {
/// HTLC transaction. To sign HTLC inputs, an [`InMemorySigner`] should be re-derived through /// HTLC transaction. To sign HTLC inputs, an [`InMemorySigner`] should be re-derived through
/// [`KeysManager::derive_channel_keys`] with the help of `channel_keys_id` and /// [`KeysManager::derive_channel_keys`] with the help of `channel_keys_id` and
/// `channel_value_satoshis`. Each HTLC input's signature can be computed with /// `channel_value_satoshis`. Each HTLC input's signature can be computed with
/// [`BaseSign::sign_holder_htlc_transaction`], which can then be provided to /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`], which can then be provided to
/// [`HTLCDescriptor::tx_input_witness`] to obtain the fully signed witness required to spend. /// [`HTLCDescriptor::tx_input_witness`] to obtain the fully signed witness required to spend.
/// ///
/// It is possible to receive more than one instance of this event if a valid HTLC transaction /// It is possible to receive more than one instance of this event if a valid HTLC transaction
@ -459,10 +459,13 @@ pub enum BumpTransactionEvent {
/// ///
/// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
/// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
/// [`BaseSign::sign_holder_htlc_transaction`]: crate::chain::keysinterface::BaseSign::sign_holder_htlc_transaction /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_htlc_transaction
/// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness /// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness
HTLCResolution { HTLCResolution {
/// The target feerate that the resulting HTLC transaction must meet.
target_feerate_sat_per_1000_weight: u32, target_feerate_sat_per_1000_weight: u32,
/// The set of pending HTLCs on the confirmed commitment that need to be claimed, preferably
/// by the same transaction.
htlc_descriptors: Vec<HTLCDescriptor>, htlc_descriptors: Vec<HTLCDescriptor>,
}, },
} }