mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-01-18 21:34:48 +01:00
Merge pull request #3322 from TheBlueMatt/2024-06-mpp-claim-without-man
Stop relying on ChannelMonitor persistence after manager read
This commit is contained in:
commit
5c975f701a
@ -2,6 +2,8 @@
|
||||
set -e
|
||||
set -x
|
||||
RUSTFLAGS='-D warnings' cargo clippy -- \
|
||||
`# Things where clippy is just wrong` \
|
||||
-A clippy::unwrap-or-default \
|
||||
`# Errors` \
|
||||
-A clippy::erasing_op \
|
||||
-A clippy::never_loop \
|
||||
|
@ -768,7 +768,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
|
||||
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state);
|
||||
}
|
||||
let mut monitor_refs = new_hash_map();
|
||||
for (outpoint, monitor) in monitors.iter_mut() {
|
||||
for (outpoint, monitor) in monitors.iter() {
|
||||
monitor_refs.insert(*outpoint, monitor);
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ use crate::types::payment::{PaymentHash, PaymentPreimage};
|
||||
use crate::ln::msgs::DecodeError;
|
||||
use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint};
|
||||
use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
|
||||
use crate::ln::channelmanager::{HTLCSource, SentHTLCId};
|
||||
use crate::ln::channelmanager::{HTLCSource, SentHTLCId, PaymentClaimDetails};
|
||||
use crate::chain;
|
||||
use crate::chain::{BestBlock, WatchedOutput};
|
||||
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
|
||||
@ -546,6 +546,9 @@ pub(crate) enum ChannelMonitorUpdateStep {
|
||||
},
|
||||
PaymentPreimage {
|
||||
payment_preimage: PaymentPreimage,
|
||||
/// If this preimage was from an inbound payment claim, information about the claim should
|
||||
/// be included here to enable claim replay on startup.
|
||||
payment_info: Option<PaymentClaimDetails>,
|
||||
},
|
||||
CommitmentSecret {
|
||||
idx: u64,
|
||||
@ -594,6 +597,7 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
|
||||
},
|
||||
(2, PaymentPreimage) => {
|
||||
(0, payment_preimage, required),
|
||||
(1, payment_info, option),
|
||||
},
|
||||
(3, CommitmentSecret) => {
|
||||
(0, idx, required),
|
||||
@ -919,8 +923,16 @@ pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
|
||||
/// The set of payment hashes from inbound payments for which we know the preimage. Payment
|
||||
/// preimages that are not included in any unrevoked local commitment transaction or unrevoked
|
||||
/// remote commitment transactions are automatically removed when commitment transactions are
|
||||
/// revoked.
|
||||
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
|
||||
/// revoked. Note that this happens one revocation after it theoretically could, leaving
|
||||
/// preimages present here for the previous state even when the channel is "at rest". This is a
|
||||
/// good safety buffer, but also is important as it ensures we retain payment preimages for the
|
||||
/// previous local commitment transaction, which may have been broadcast already when we see
|
||||
/// the revocation (in setups with redundant monitors).
|
||||
///
|
||||
/// We also store [`PaymentClaimDetails`] here, tracking the payment information(s) for this
|
||||
/// preimage for inbound payments. This allows us to rebuild the inbound payment information on
|
||||
/// startup even if we lost our `ChannelManager`.
|
||||
payment_preimages: HashMap<PaymentHash, (PaymentPreimage, Vec<PaymentClaimDetails>)>,
|
||||
|
||||
// Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated
|
||||
// during chain data processing. This prevents a race in `ChainMonitor::update_channel` (and
|
||||
@ -1146,7 +1158,7 @@ impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
|
||||
writer.write_all(&byte_utils::be48_to_array(self.current_holder_commitment_number))?;
|
||||
|
||||
writer.write_all(&(self.payment_preimages.len() as u64).to_be_bytes())?;
|
||||
for payment_preimage in self.payment_preimages.values() {
|
||||
for (payment_preimage, _) in self.payment_preimages.values() {
|
||||
writer.write_all(&payment_preimage.0[..])?;
|
||||
}
|
||||
|
||||
@ -1224,6 +1236,7 @@ impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
|
||||
(19, self.channel_id, required),
|
||||
(21, self.balances_empty_height, option),
|
||||
(23, self.holder_pays_commitment_tx_fee, option),
|
||||
(25, self.payment_preimages, required),
|
||||
});
|
||||
|
||||
Ok(())
|
||||
@ -1488,7 +1501,15 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
|
||||
/// This is used to provide payment preimage(s) out-of-band during startup without updating the
|
||||
/// off-chain state with a new commitment transaction.
|
||||
pub(crate) fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
|
||||
///
|
||||
/// It is used only for legacy (created prior to LDK 0.1) pending payments on upgrade, and the
|
||||
/// flow that uses it assumes that this [`ChannelMonitor`] is persisted prior to the
|
||||
/// [`ChannelManager`] being persisted (as the state necessary to call this method again is
|
||||
/// removed from the [`ChannelManager`] and thus a persistence inversion would imply we do not
|
||||
/// get the preimage back into this [`ChannelMonitor`] on startup).
|
||||
///
|
||||
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
|
||||
pub(crate) fn provide_payment_preimage_unsafe_legacy<B: Deref, F: Deref, L: Deref>(
|
||||
&self,
|
||||
payment_hash: &PaymentHash,
|
||||
payment_preimage: &PaymentPreimage,
|
||||
@ -1502,8 +1523,11 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
{
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash));
|
||||
// Note that we don't pass any MPP claim parts here. This is generally not okay but in this
|
||||
// case is acceptable as we only call this method from `ChannelManager` deserialization in
|
||||
// cases where we are replaying a claim started on a previous version of LDK.
|
||||
inner.provide_payment_preimage(
|
||||
payment_hash, payment_preimage, broadcaster, fee_estimator, &logger)
|
||||
payment_hash, payment_preimage, &None, broadcaster, fee_estimator, &logger)
|
||||
}
|
||||
|
||||
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
|
||||
@ -2194,7 +2218,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
outbound_payment,
|
||||
});
|
||||
}
|
||||
} else if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) {
|
||||
} else if let Some((payment_preimage, _)) = self.payment_preimages.get(&htlc.payment_hash) {
|
||||
// Otherwise (the payment was inbound), only expose it as claimable if
|
||||
// we know the preimage.
|
||||
// Note that if there is a pending claim, but it did not use the
|
||||
@ -2415,7 +2439,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
outbound_payment,
|
||||
});
|
||||
}
|
||||
} else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
|
||||
} else if us.payment_preimages.contains_key(&htlc.payment_hash) {
|
||||
inbound_claiming_htlc_rounded_msat += rounded_value_msat;
|
||||
if htlc.transaction_output_index.is_some() {
|
||||
claimable_inbound_htlc_value_sat += htlc.amount_msat / 1000;
|
||||
@ -2570,7 +2594,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn get_stored_preimages(&self) -> HashMap<PaymentHash, PaymentPreimage> {
|
||||
pub(crate) fn get_stored_preimages(&self) -> HashMap<PaymentHash, (PaymentPreimage, Vec<PaymentClaimDetails>)> {
|
||||
self.inner.lock().unwrap().payment_preimages.clone()
|
||||
}
|
||||
}
|
||||
@ -2929,14 +2953,27 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
|
||||
/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
|
||||
/// commitment_tx_infos which contain the payment hash have been revoked.
|
||||
///
|
||||
/// Note that this is often called multiple times for the same payment and must be idempotent.
|
||||
fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
|
||||
&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B,
|
||||
&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage,
|
||||
payment_info: &Option<PaymentClaimDetails>, broadcaster: &B,
|
||||
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>)
|
||||
where B::Target: BroadcasterInterface,
|
||||
F::Target: FeeEstimator,
|
||||
L::Target: Logger,
|
||||
{
|
||||
self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
|
||||
self.payment_preimages.entry(payment_hash.clone())
|
||||
.and_modify(|(_, payment_infos)| {
|
||||
if let Some(payment_info) = payment_info {
|
||||
if !payment_infos.contains(&payment_info) {
|
||||
payment_infos.push(payment_info.clone());
|
||||
}
|
||||
}
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
(payment_preimage.clone(), payment_info.clone().into_iter().collect())
|
||||
});
|
||||
|
||||
let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| {
|
||||
self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event {
|
||||
@ -3139,9 +3176,9 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
|
||||
self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point, logger)
|
||||
},
|
||||
ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
|
||||
ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, payment_info } => {
|
||||
log_trace!(logger, "Updating ChannelMonitor with payment preimage");
|
||||
self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger)
|
||||
self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, payment_info, broadcaster, &bounded_fee_estimator, logger)
|
||||
},
|
||||
ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
|
||||
log_trace!(logger, "Updating ChannelMonitor with commitment secret");
|
||||
@ -3593,7 +3630,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
return (claimable_outpoints, to_counterparty_output_info);
|
||||
}
|
||||
}
|
||||
let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
|
||||
let preimage = if htlc.offered { if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
|
||||
if preimage.is_some() || !htlc.offered {
|
||||
let counterparty_htlc_outp = if htlc.offered {
|
||||
PackageSolvingData::CounterpartyOfferedHTLCOutput(
|
||||
@ -3681,7 +3718,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
);
|
||||
(htlc_output, conf_height)
|
||||
} else {
|
||||
let payment_preimage = if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
|
||||
let payment_preimage = if let Some((preimage, _)) = self.payment_preimages.get(&htlc.payment_hash) {
|
||||
preimage.clone()
|
||||
} else {
|
||||
// We can't build an HTLC-Success transaction without the preimage
|
||||
@ -3835,7 +3872,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
|
||||
if let Some(vout) = htlc.0.transaction_output_index {
|
||||
let preimage = if !htlc.0.offered {
|
||||
if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
|
||||
if let Some((preimage, _)) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
|
||||
// We can't build an HTLC-Success transaction without the preimage
|
||||
continue;
|
||||
}
|
||||
@ -4808,7 +4845,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
|
||||
for _ in 0..payment_preimages_len {
|
||||
let preimage: PaymentPreimage = Readable::read(reader)?;
|
||||
let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
|
||||
if let Some(_) = payment_preimages.insert(hash, preimage) {
|
||||
if let Some(_) = payment_preimages.insert(hash, (preimage, Vec::new())) {
|
||||
return Err(DecodeError::InvalidValue);
|
||||
}
|
||||
}
|
||||
@ -4891,6 +4928,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
|
||||
let mut balances_empty_height = None;
|
||||
let mut channel_id = None;
|
||||
let mut holder_pays_commitment_tx_fee = None;
|
||||
let mut payment_preimages_with_info: Option<HashMap<_, _>> = None;
|
||||
read_tlv_fields!(reader, {
|
||||
(1, funding_spend_confirmed, option),
|
||||
(3, htlcs_resolved_on_chain, optional_vec),
|
||||
@ -4904,7 +4942,24 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
|
||||
(19, channel_id, option),
|
||||
(21, balances_empty_height, option),
|
||||
(23, holder_pays_commitment_tx_fee, option),
|
||||
(25, payment_preimages_with_info, option),
|
||||
});
|
||||
if let Some(payment_preimages_with_info) = payment_preimages_with_info {
|
||||
if payment_preimages_with_info.len() != payment_preimages.len() {
|
||||
return Err(DecodeError::InvalidValue);
|
||||
}
|
||||
for (payment_hash, (payment_preimage, _)) in payment_preimages.iter() {
|
||||
// Note that because `payment_preimages` is built back from preimages directly,
|
||||
// checking that the two maps have the same hash -> preimage pairs also checks that
|
||||
// the payment hashes in `payment_preimages_with_info`'s preimages match its
|
||||
// hashes.
|
||||
let new_preimage = payment_preimages_with_info.get(payment_hash).map(|(p, _)| p);
|
||||
if new_preimage != Some(payment_preimage) {
|
||||
return Err(DecodeError::InvalidValue);
|
||||
}
|
||||
}
|
||||
payment_preimages = payment_preimages_with_info;
|
||||
}
|
||||
|
||||
// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
|
||||
// events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
|
||||
@ -5097,8 +5152,12 @@ mod tests {
|
||||
assert_eq!(replay_update.updates.len(), 1);
|
||||
if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] {
|
||||
} else { panic!(); }
|
||||
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 });
|
||||
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 });
|
||||
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage: payment_preimage_1, payment_info: None,
|
||||
});
|
||||
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage: payment_preimage_2, payment_info: None,
|
||||
});
|
||||
|
||||
let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks));
|
||||
assert!(
|
||||
@ -5228,7 +5287,9 @@ mod tests {
|
||||
preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
|
||||
for &(ref preimage, ref hash) in preimages.iter() {
|
||||
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator);
|
||||
monitor.provide_payment_preimage(hash, preimage, &broadcaster, &bounded_fee_estimator, &logger);
|
||||
monitor.provide_payment_preimage_unsafe_legacy(
|
||||
hash, preimage, &broadcaster, &bounded_fee_estimator, &logger
|
||||
);
|
||||
}
|
||||
|
||||
// Now provide a secret, pruning preimages 10-15
|
||||
|
@ -32,7 +32,7 @@ use crate::ln::msgs;
|
||||
use crate::ln::msgs::{ClosingSigned, ClosingSignedFeeRange, DecodeError};
|
||||
use crate::ln::script::{self, ShutdownScript};
|
||||
use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
|
||||
use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
|
||||
use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentClaimDetails, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
|
||||
use crate::ln::chan_utils::{
|
||||
CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight,
|
||||
htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction,
|
||||
@ -1290,7 +1290,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
|
||||
// further `send_update_fee` calls, dropping the previous holding cell update entirely.
|
||||
holding_cell_update_fee: Option<u32>,
|
||||
next_holder_htlc_id: u64,
|
||||
next_counterparty_htlc_id: u64,
|
||||
pub(super) next_counterparty_htlc_id: u64,
|
||||
feerate_per_kw: u32,
|
||||
|
||||
/// The timestamp set on our latest `channel_update` message for this channel. It is updated
|
||||
@ -4027,26 +4027,31 @@ impl<SP: Deref> Channel<SP> where
|
||||
/// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
|
||||
/// entirely.
|
||||
///
|
||||
/// This is only used for payments received prior to LDK 0.1.
|
||||
///
|
||||
/// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
|
||||
/// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
|
||||
///
|
||||
/// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
|
||||
/// disconnected).
|
||||
pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
|
||||
pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy<L: Deref>
|
||||
(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
|
||||
where L::Target: Logger {
|
||||
// Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
|
||||
// (see equivalent if condition there).
|
||||
assert!(!self.context.channel_state.can_generate_new_commitment());
|
||||
let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
|
||||
let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
|
||||
let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger);
|
||||
self.context.latest_monitor_update_id = mon_update_id;
|
||||
if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
|
||||
assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
|
||||
}
|
||||
}
|
||||
|
||||
fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
|
||||
fn get_update_fulfill_htlc<L: Deref>(
|
||||
&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage,
|
||||
payment_info: Option<PaymentClaimDetails>, logger: &L,
|
||||
) -> UpdateFulfillFetch where L::Target: Logger {
|
||||
// Either ChannelReady got set (which means it won't be unset) or there is no way any
|
||||
// caller thought we could have something claimed (cause we wouldn't have accepted in an
|
||||
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
|
||||
@ -4104,6 +4109,7 @@ impl<SP: Deref> Channel<SP> where
|
||||
counterparty_node_id: Some(self.context.counterparty_node_id),
|
||||
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage: payment_preimage_arg.clone(),
|
||||
payment_info,
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
@ -4171,9 +4177,12 @@ impl<SP: Deref> Channel<SP> where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
|
||||
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(
|
||||
&mut self, htlc_id: u64, payment_preimage: PaymentPreimage,
|
||||
payment_info: Option<PaymentClaimDetails>, logger: &L,
|
||||
) -> UpdateFulfillCommitFetch where L::Target: Logger {
|
||||
let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
|
||||
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
|
||||
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) {
|
||||
UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
|
||||
// Even if we aren't supposed to let new monitor updates with commitment state
|
||||
// updates run, we still need to push the preimage ChannelMonitorUpdateStep no
|
||||
@ -4934,9 +4943,14 @@ impl<SP: Deref> Channel<SP> where
|
||||
// not fail - any in between attempts to claim the HTLC will have resulted
|
||||
// in it hitting the holding cell again and we cannot change the state of a
|
||||
// holding cell HTLC from fulfill to anything else.
|
||||
//
|
||||
// Note that we should have already provided a preimage-containing
|
||||
// `ChannelMonitorUpdate` to the user, making this one redundant, however
|
||||
// there's no harm in including the extra `ChannelMonitorUpdateStep` here.
|
||||
// We do not bother to track and include `payment_info` here, however.
|
||||
let mut additional_monitor_update =
|
||||
if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
|
||||
self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
|
||||
self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger)
|
||||
{ monitor_update } else { unreachable!() };
|
||||
update_fulfill_count += 1;
|
||||
monitor_update.updates.append(&mut additional_monitor_update.updates);
|
||||
|
@ -801,6 +801,7 @@ pub(super) enum RAACommitmentOrder {
|
||||
}
|
||||
|
||||
/// Information about a payment which is currently being claimed.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
struct ClaimingPayment {
|
||||
amount_msat: u64,
|
||||
payment_purpose: events::PaymentPurpose,
|
||||
@ -898,6 +899,73 @@ struct ClaimablePayments {
|
||||
pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
|
||||
}
|
||||
|
||||
impl ClaimablePayments {
|
||||
/// Moves a payment from [`Self::claimable_payments`] to [`Self::pending_claiming_payments`].
|
||||
///
|
||||
/// If `custom_tlvs_known` is false and custom even TLVs are set by the sender, the set of
|
||||
/// pending HTLCs will be returned in the `Err` variant of this method. They MUST then be
|
||||
/// failed by the caller as they will not be in either [`Self::claimable_payments`] or
|
||||
/// [`Self::pending_claiming_payments`].
|
||||
///
|
||||
/// If `custom_tlvs_known` is true, and a matching payment is found, it will always be moved.
|
||||
///
|
||||
/// If no payment is found, `Err(Vec::new())` is returned.
|
||||
fn begin_claiming_payment<L: Deref, S: Deref>(
|
||||
&mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
|
||||
inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
|
||||
) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
|
||||
where L::Target: Logger, S::Target: NodeSigner,
|
||||
{
|
||||
match self.claimable_payments.remove(&payment_hash) {
|
||||
Some(payment) => {
|
||||
let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
|
||||
.expect("Failed to get node_id for node recipient");
|
||||
for htlc in payment.htlcs.iter() {
|
||||
if htlc.prev_hop.phantom_shared_secret.is_some() {
|
||||
let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
|
||||
.expect("Failed to get node_id for phantom node recipient");
|
||||
receiver_node_id = phantom_pubkey;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
|
||||
if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
|
||||
log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
|
||||
&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
|
||||
return Err(payment.htlcs);
|
||||
}
|
||||
}
|
||||
|
||||
let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
|
||||
let claiming_payment = self.pending_claiming_payments
|
||||
.entry(payment_hash)
|
||||
.and_modify(|_| {
|
||||
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
|
||||
log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
|
||||
&payment_hash);
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
|
||||
let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
|
||||
ClaimingPayment {
|
||||
amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
|
||||
payment_purpose: payment.purpose,
|
||||
receiver_node_id,
|
||||
htlcs,
|
||||
sender_intended_value,
|
||||
onion_fields: payment.onion_fields,
|
||||
payment_id: Some(payment_id),
|
||||
}
|
||||
}).clone();
|
||||
|
||||
Ok((payment.htlcs, claiming_payment))
|
||||
},
|
||||
None => Err(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Events which we process internally but cannot be processed immediately at the generation site
|
||||
/// usually because we're running pre-full-init. They are handled immediately once we detect we are
|
||||
/// running normally, and specifically must be processed before any other non-background
|
||||
@ -1062,12 +1130,71 @@ impl_writeable_tlv_based_enum!(EventCompletionAction,
|
||||
}
|
||||
);
|
||||
|
||||
/// The source argument which is passed to [`ChannelManager::claim_mpp_part`].
|
||||
///
|
||||
/// This is identical to [`MPPClaimHTLCSource`] except that [`Self::counterparty_node_id`] is an
|
||||
/// `Option`, whereas it is required in [`MPPClaimHTLCSource`]. In the future, we should ideally
|
||||
/// drop this and merge the two, however doing so may break upgrades for nodes which have pending
|
||||
/// forwarded payments.
|
||||
struct HTLCClaimSource {
|
||||
counterparty_node_id: Option<PublicKey>,
|
||||
funding_txo: OutPoint,
|
||||
channel_id: ChannelId,
|
||||
htlc_id: u64,
|
||||
}
|
||||
|
||||
impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
|
||||
fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
|
||||
HTLCClaimSource {
|
||||
counterparty_node_id: Some(o.counterparty_node_id),
|
||||
funding_txo: o.funding_txo,
|
||||
channel_id: o.channel_id,
|
||||
htlc_id: o.htlc_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
/// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is
|
||||
/// tracked in [`PendingMPPClaim`] as well as in [`ChannelMonitor`]s, so that it can be converted
|
||||
/// to an [`HTLCClaimSource`] for claim replays on startup.
|
||||
struct MPPClaimHTLCSource {
|
||||
counterparty_node_id: PublicKey,
|
||||
funding_txo: OutPoint,
|
||||
channel_id: ChannelId,
|
||||
htlc_id: u64,
|
||||
}
|
||||
|
||||
impl_writeable_tlv_based!(MPPClaimHTLCSource, {
|
||||
(0, counterparty_node_id, required),
|
||||
(2, funding_txo, required),
|
||||
(4, channel_id, required),
|
||||
(6, htlc_id, required),
|
||||
});
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PendingMPPClaim {
|
||||
channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId, u64)>,
|
||||
channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>,
|
||||
channels_without_preimage: Vec<MPPClaimHTLCSource>,
|
||||
channels_with_preimage: Vec<MPPClaimHTLCSource>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
/// When we're claiming a(n MPP) payment, we want to store information about that payment in the
|
||||
/// [`ChannelMonitor`] so that we can replay the claim without any information from the
|
||||
/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims
|
||||
/// against all MPP parts as well as generate an [`Event::PaymentClaimed`].
|
||||
pub(crate) struct PaymentClaimDetails {
|
||||
mpp_parts: Vec<MPPClaimHTLCSource>,
|
||||
/// Use [`ClaimingPayment`] as a stable source of all the fields we need to generate the
|
||||
/// [`Event::PaymentClaimed`].
|
||||
claiming_payment: ClaimingPayment,
|
||||
}
|
||||
|
||||
impl_writeable_tlv_based!(PaymentClaimDetails, {
|
||||
(0, mpp_parts, required_vec),
|
||||
(2, claiming_payment, required),
|
||||
});
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
|
||||
|
||||
@ -1502,7 +1629,7 @@ where
|
||||
/// let mut channel_monitors = read_channel_monitors();
|
||||
/// let args = ChannelManagerReadArgs::new(
|
||||
/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
|
||||
/// router, message_router, logger, default_config, channel_monitors.iter_mut().collect(),
|
||||
/// router, message_router, logger, default_config, channel_monitors.iter().collect(),
|
||||
/// );
|
||||
/// let (block_hash, channel_manager) =
|
||||
/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
|
||||
@ -6665,59 +6792,24 @@ where
|
||||
|
||||
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
|
||||
|
||||
let sources = {
|
||||
let mut claimable_payments = self.claimable_payments.lock().unwrap();
|
||||
if let Some(payment) = claimable_payments.claimable_payments.remove(&payment_hash) {
|
||||
let mut receiver_node_id = self.our_network_pubkey;
|
||||
for htlc in payment.htlcs.iter() {
|
||||
if htlc.prev_hop.phantom_shared_secret.is_some() {
|
||||
let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode)
|
||||
.expect("Failed to get node_id for phantom node recipient");
|
||||
receiver_node_id = phantom_pubkey;
|
||||
break;
|
||||
let (sources, claiming_payment) = {
|
||||
let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
|
||||
payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret,
|
||||
custom_tlvs_known,
|
||||
);
|
||||
|
||||
match res {
|
||||
Ok((htlcs, payment_info)) => (htlcs, payment_info),
|
||||
Err(htlcs) => {
|
||||
for htlc in htlcs {
|
||||
let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
|
||||
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
|
||||
let receiver = HTLCDestination::FailedPayment { payment_hash };
|
||||
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let payment_id = payment.inbound_payment_id(&self.inbound_payment_id_secret);
|
||||
let claiming_payment = claimable_payments.pending_claiming_payments
|
||||
.entry(payment_hash)
|
||||
.and_modify(|_| {
|
||||
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
|
||||
log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
|
||||
&payment_hash);
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
|
||||
let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
|
||||
ClaimingPayment {
|
||||
amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
|
||||
payment_purpose: payment.purpose,
|
||||
receiver_node_id,
|
||||
htlcs,
|
||||
sender_intended_value,
|
||||
onion_fields: payment.onion_fields,
|
||||
payment_id: Some(payment_id),
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields {
|
||||
if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
|
||||
log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
|
||||
&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
|
||||
claimable_payments.pending_claiming_payments.remove(&payment_hash);
|
||||
mem::drop(claimable_payments);
|
||||
for htlc in payment.htlcs {
|
||||
let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
|
||||
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
|
||||
let receiver = HTLCDestination::FailedPayment { payment_hash };
|
||||
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
payment.htlcs
|
||||
} else { return; }
|
||||
}
|
||||
};
|
||||
debug_assert!(!sources.is_empty());
|
||||
|
||||
@ -6762,22 +6854,27 @@ where
|
||||
return;
|
||||
}
|
||||
if valid_mpp {
|
||||
let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
|
||||
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
|
||||
Some(MPPClaimHTLCSource {
|
||||
counterparty_node_id: cp_id,
|
||||
funding_txo: htlc.prev_hop.outpoint,
|
||||
channel_id: htlc.prev_hop.channel_id,
|
||||
htlc_id: htlc.prev_hop.htlc_id,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).collect();
|
||||
let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
|
||||
let channels_without_preimage = sources.iter().filter_map(|htlc| {
|
||||
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
|
||||
let prev_hop = &htlc.prev_hop;
|
||||
Some((cp_id, prev_hop.outpoint, prev_hop.channel_id, prev_hop.htlc_id))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).collect();
|
||||
Some(Arc::new(Mutex::new(PendingMPPClaim {
|
||||
channels_without_preimage,
|
||||
channels_without_preimage: mpp_parts.clone(),
|
||||
channels_with_preimage: Vec::new(),
|
||||
})))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
|
||||
for htlc in sources {
|
||||
let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
|
||||
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
|
||||
@ -6793,7 +6890,7 @@ where
|
||||
}
|
||||
});
|
||||
self.claim_funds_from_hop(
|
||||
htlc.prev_hop, payment_preimage,
|
||||
htlc.prev_hop, payment_preimage, payment_info.clone(),
|
||||
|_, definitely_duplicate| {
|
||||
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
|
||||
(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
|
||||
@ -6823,7 +6920,28 @@ where
|
||||
ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
|
||||
>(
|
||||
&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
|
||||
completion_action: ComplFunc,
|
||||
payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
|
||||
) {
|
||||
let counterparty_node_id =
|
||||
match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
|
||||
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
|
||||
None => None
|
||||
};
|
||||
|
||||
let htlc_source = HTLCClaimSource {
|
||||
counterparty_node_id,
|
||||
funding_txo: prev_hop.outpoint,
|
||||
channel_id: prev_hop.channel_id,
|
||||
htlc_id: prev_hop.htlc_id,
|
||||
};
|
||||
self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action)
|
||||
}
|
||||
|
||||
fn claim_mpp_part<
|
||||
ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
|
||||
>(
|
||||
&self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
|
||||
payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
|
||||
) {
|
||||
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
|
||||
|
||||
@ -6840,12 +6958,8 @@ where
|
||||
{
|
||||
let per_peer_state = self.per_peer_state.read().unwrap();
|
||||
let chan_id = prev_hop.channel_id;
|
||||
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
|
||||
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
|
||||
None => None
|
||||
};
|
||||
|
||||
let peer_state_opt = counterparty_node_id_opt.as_ref().map(
|
||||
let peer_state_opt = prev_hop.counterparty_node_id.as_ref().map(
|
||||
|counterparty_node_id| per_peer_state.get(counterparty_node_id)
|
||||
.map(|peer_mutex| peer_mutex.lock().unwrap())
|
||||
).unwrap_or(None);
|
||||
@ -6857,7 +6971,8 @@ where
|
||||
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
|
||||
let counterparty_node_id = chan.context.get_counterparty_node_id();
|
||||
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
|
||||
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
|
||||
let fulfill_res =
|
||||
chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
|
||||
|
||||
match fulfill_res {
|
||||
UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
|
||||
@ -6871,7 +6986,7 @@ where
|
||||
peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
|
||||
}
|
||||
if !during_init {
|
||||
handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
|
||||
handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_lock,
|
||||
peer_state, per_peer_state, chan);
|
||||
} else {
|
||||
// If we're running during init we cannot update a monitor directly -
|
||||
@ -6880,7 +6995,7 @@ where
|
||||
self.pending_background_events.lock().unwrap().push(
|
||||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
|
||||
counterparty_node_id,
|
||||
funding_txo: prev_hop.outpoint,
|
||||
funding_txo: prev_hop.funding_txo,
|
||||
channel_id: prev_hop.channel_id,
|
||||
update: monitor_update.clone(),
|
||||
});
|
||||
@ -6889,7 +7004,16 @@ where
|
||||
UpdateFulfillCommitFetch::DuplicateClaim {} => {
|
||||
let (action_opt, raa_blocker_opt) = completion_action(None, true);
|
||||
if let Some(raa_blocker) = raa_blocker_opt {
|
||||
debug_assert!(peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
|
||||
// If we're making a claim during startup, its a replay of a
|
||||
// payment claim from a `ChannelMonitor`. In some cases (MPP or
|
||||
// if the HTLC was only recently removed) we make such claims
|
||||
// after an HTLC has been removed from a channel entirely, and
|
||||
// thus the RAA blocker has long since completed.
|
||||
//
|
||||
// In any other case, the RAA blocker must still be present and
|
||||
// blocking RAAs.
|
||||
debug_assert!(during_init ||
|
||||
peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
|
||||
}
|
||||
let action = if let Some(action) = action_opt {
|
||||
action
|
||||
@ -6901,38 +7025,41 @@ where
|
||||
|
||||
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
|
||||
chan_id, action);
|
||||
let (node_id, _funding_outpoint, channel_id, blocker) =
|
||||
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
|
||||
downstream_counterparty_node_id: node_id,
|
||||
downstream_funding_outpoint: funding_outpoint,
|
||||
downstream_funding_outpoint: _,
|
||||
blocking_action: blocker, downstream_channel_id: channel_id,
|
||||
} = action {
|
||||
(node_id, funding_outpoint, channel_id, blocker)
|
||||
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
|
||||
let mut peer_state = peer_state_mtx.lock().unwrap();
|
||||
if let Some(blockers) = peer_state
|
||||
.actions_blocking_raa_monitor_updates
|
||||
.get_mut(&channel_id)
|
||||
{
|
||||
let mut found_blocker = false;
|
||||
blockers.retain(|iter| {
|
||||
// Note that we could actually be blocked, in
|
||||
// which case we need to only remove the one
|
||||
// blocker which was added duplicatively.
|
||||
let first_blocker = !found_blocker;
|
||||
if *iter == blocker { found_blocker = true; }
|
||||
*iter != blocker || !first_blocker
|
||||
});
|
||||
debug_assert!(found_blocker);
|
||||
}
|
||||
} else {
|
||||
debug_assert!(false);
|
||||
}
|
||||
} else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
|
||||
debug_assert!(during_init,
|
||||
"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
|
||||
mem::drop(per_peer_state);
|
||||
self.handle_monitor_update_completion_actions([action]);
|
||||
} else {
|
||||
debug_assert!(false,
|
||||
"Duplicate claims should always free another channel immediately");
|
||||
"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
|
||||
return;
|
||||
};
|
||||
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
|
||||
let mut peer_state = peer_state_mtx.lock().unwrap();
|
||||
if let Some(blockers) = peer_state
|
||||
.actions_blocking_raa_monitor_updates
|
||||
.get_mut(&channel_id)
|
||||
{
|
||||
let mut found_blocker = false;
|
||||
blockers.retain(|iter| {
|
||||
// Note that we could actually be blocked, in
|
||||
// which case we need to only remove the one
|
||||
// blocker which was added duplicatively.
|
||||
let first_blocker = !found_blocker;
|
||||
if *iter == blocker { found_blocker = true; }
|
||||
*iter != blocker || !first_blocker
|
||||
});
|
||||
debug_assert!(found_blocker);
|
||||
}
|
||||
} else {
|
||||
debug_assert!(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -6942,9 +7069,10 @@ where
|
||||
}
|
||||
let preimage_update = ChannelMonitorUpdate {
|
||||
update_id: CLOSED_CHANNEL_UPDATE_ID,
|
||||
counterparty_node_id: None,
|
||||
counterparty_node_id: prev_hop.counterparty_node_id,
|
||||
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage,
|
||||
payment_info,
|
||||
}],
|
||||
channel_id: Some(prev_hop.channel_id),
|
||||
};
|
||||
@ -6952,7 +7080,7 @@ where
|
||||
if !during_init {
|
||||
// We update the ChannelMonitor on the backward link, after
|
||||
// receiving an `update_fulfill_htlc` from the forward link.
|
||||
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
|
||||
let update_res = self.chain_monitor.update_channel(prev_hop.funding_txo, &preimage_update);
|
||||
if update_res != ChannelMonitorUpdateStatus::Completed {
|
||||
// TODO: This needs to be handled somehow - if we receive a monitor update
|
||||
// with a preimage we *must* somehow manage to propagate it to the upstream
|
||||
@ -6975,7 +7103,7 @@ where
|
||||
// complete the monitor update completion action from `completion_action`.
|
||||
self.pending_background_events.lock().unwrap().push(
|
||||
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
|
||||
prev_hop.outpoint, prev_hop.channel_id, preimage_update,
|
||||
prev_hop.funding_txo, prev_hop.channel_id, preimage_update,
|
||||
)));
|
||||
}
|
||||
// Note that we do process the completion action here. This totally could be a
|
||||
@ -7055,7 +7183,7 @@ where
|
||||
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
|
||||
#[cfg(debug_assertions)]
|
||||
let claiming_chan_funding_outpoint = hop_data.outpoint;
|
||||
self.claim_funds_from_hop(hop_data, payment_preimage,
|
||||
self.claim_funds_from_hop(hop_data, payment_preimage, None,
|
||||
|htlc_claim_value_msat, definitely_duplicate| {
|
||||
let chan_to_release =
|
||||
if let Some(node_id) = next_channel_counterparty_node_id {
|
||||
@ -7091,7 +7219,7 @@ where
|
||||
if *funding_txo == claiming_chan_funding_outpoint {
|
||||
assert!(update.updates.iter().any(|upd|
|
||||
if let ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage: update_preimage
|
||||
payment_preimage: update_preimage, ..
|
||||
} = upd {
|
||||
payment_preimage == *update_preimage
|
||||
} else { false }
|
||||
@ -7184,15 +7312,25 @@ where
|
||||
if *pending_claim == claim_ptr {
|
||||
let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
|
||||
let pending_claim_state = &mut *pending_claim_state_lock;
|
||||
pending_claim_state.channels_without_preimage.retain(|(cp, outp, cid, hid)| {
|
||||
if *cp == counterparty_node_id && *cid == chan_id && *hid == htlc_id {
|
||||
pending_claim_state.channels_with_preimage.push((*cp, *outp, *cid));
|
||||
pending_claim_state.channels_without_preimage.retain(|htlc_info| {
|
||||
let this_claim =
|
||||
htlc_info.counterparty_node_id == counterparty_node_id
|
||||
&& htlc_info.channel_id == chan_id
|
||||
&& htlc_info.htlc_id == htlc_id;
|
||||
if this_claim {
|
||||
pending_claim_state.channels_with_preimage.push(htlc_info.clone());
|
||||
false
|
||||
} else { true }
|
||||
});
|
||||
if pending_claim_state.channels_without_preimage.is_empty() {
|
||||
for (cp, outp, cid) in pending_claim_state.channels_with_preimage.iter() {
|
||||
freed_channels.push((*cp, *outp, *cid, blocker.clone()));
|
||||
for htlc_info in pending_claim_state.channels_with_preimage.iter() {
|
||||
let freed_chan = (
|
||||
htlc_info.counterparty_node_id,
|
||||
htlc_info.funding_txo,
|
||||
htlc_info.channel_id,
|
||||
blocker.clone()
|
||||
);
|
||||
freed_channels.push(freed_chan);
|
||||
}
|
||||
}
|
||||
!pending_claim_state.channels_without_preimage.is_empty()
|
||||
@ -7216,7 +7354,7 @@ where
|
||||
onion_fields,
|
||||
payment_id,
|
||||
}) = payment {
|
||||
self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
|
||||
let event = events::Event::PaymentClaimed {
|
||||
payment_hash,
|
||||
purpose,
|
||||
amount_msat,
|
||||
@ -7225,7 +7363,16 @@ where
|
||||
sender_intended_total_msat,
|
||||
onion_fields,
|
||||
payment_id,
|
||||
}, None));
|
||||
};
|
||||
let event_action = (event, None);
|
||||
let mut pending_events = self.pending_events.lock().unwrap();
|
||||
// If we're replaying a claim on startup we may end up duplicating an event
|
||||
// that's already in our queue, so check before we push another one. The
|
||||
// `payment_id` should suffice to ensure we never spuriously drop a second
|
||||
// event for a duplicate payment.
|
||||
if !pending_events.contains(&event_action) {
|
||||
pending_events.push_back(event_action);
|
||||
}
|
||||
}
|
||||
},
|
||||
MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
|
||||
@ -12024,6 +12171,7 @@ where
|
||||
(11, self.probing_cookie_secret, required),
|
||||
(13, htlc_onion_fields, optional_vec),
|
||||
(14, decode_update_add_htlcs_opt, option),
|
||||
(15, self.inbound_payment_id_secret, required),
|
||||
});
|
||||
|
||||
Ok(())
|
||||
@ -12083,9 +12231,12 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
|
||||
/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
|
||||
/// same way you would handle a [`chain::Filter`] call using
|
||||
/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
|
||||
/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
|
||||
/// 5) Disconnect/connect blocks on the [`ChannelManager`].
|
||||
/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
|
||||
/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
|
||||
/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
|
||||
/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
|
||||
/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing
|
||||
/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is
|
||||
/// otherwise not required.
|
||||
/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
|
||||
/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
|
||||
/// the next step.
|
||||
@ -12168,7 +12319,7 @@ where
|
||||
/// this struct.
|
||||
///
|
||||
/// This is not exported to bindings users because we have no HashMap bindings
|
||||
pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
|
||||
pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
|
||||
}
|
||||
|
||||
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
|
||||
@ -12191,7 +12342,7 @@ where
|
||||
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
|
||||
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
|
||||
default_config: UserConfig,
|
||||
mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
|
||||
mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
|
||||
@ -12932,65 +13083,6 @@ where
|
||||
|
||||
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
|
||||
|
||||
for (_, monitor) in args.channel_monitors.iter() {
|
||||
for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
|
||||
if let Some(payment) = claimable_payments.remove(&payment_hash) {
|
||||
log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
|
||||
let mut claimable_amt_msat = 0;
|
||||
let mut receiver_node_id = Some(our_network_pubkey);
|
||||
let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
|
||||
if phantom_shared_secret.is_some() {
|
||||
let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
|
||||
.expect("Failed to get node_id for phantom node recipient");
|
||||
receiver_node_id = Some(phantom_pubkey)
|
||||
}
|
||||
for claimable_htlc in &payment.htlcs {
|
||||
claimable_amt_msat += claimable_htlc.value;
|
||||
|
||||
// Add a holding-cell claim of the payment to the Channel, which should be
|
||||
// applied ~immediately on peer reconnection. Because it won't generate a
|
||||
// new commitment transaction we can just provide the payment preimage to
|
||||
// the corresponding ChannelMonitor and nothing else.
|
||||
//
|
||||
// We do so directly instead of via the normal ChannelMonitor update
|
||||
// procedure as the ChainMonitor hasn't yet been initialized, implying
|
||||
// we're not allowed to call it directly yet. Further, we do the update
|
||||
// without incrementing the ChannelMonitor update ID as there isn't any
|
||||
// reason to.
|
||||
// If we were to generate a new ChannelMonitor update ID here and then
|
||||
// crash before the user finishes block connect we'd end up force-closing
|
||||
// this channel as well. On the flip side, there's no harm in restarting
|
||||
// without the new monitor persisted - we'll end up right back here on
|
||||
// restart.
|
||||
let previous_channel_id = claimable_htlc.prev_hop.channel_id;
|
||||
if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
|
||||
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
|
||||
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
|
||||
let peer_state = &mut *peer_state_lock;
|
||||
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
|
||||
let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash));
|
||||
channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
|
||||
}
|
||||
}
|
||||
if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
|
||||
previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
|
||||
}
|
||||
}
|
||||
let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
|
||||
pending_events_read.push_back((events::Event::PaymentClaimed {
|
||||
receiver_node_id,
|
||||
payment_hash,
|
||||
purpose: payment.purpose,
|
||||
amount_msat: claimable_amt_msat,
|
||||
htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
|
||||
sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
|
||||
onion_fields: payment.onion_fields,
|
||||
payment_id: Some(payment_id),
|
||||
}, None));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
|
||||
if let Some(peer_state) = per_peer_state.get(&node_id) {
|
||||
for (channel_id, actions) in monitor_update_blocked_actions.iter() {
|
||||
@ -13091,6 +13183,148 @@ where
|
||||
default_configuration: args.default_config,
|
||||
};
|
||||
|
||||
for (_, monitor) in args.channel_monitors.iter() {
|
||||
for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() {
|
||||
if !payment_claims.is_empty() {
|
||||
for payment_claim in payment_claims {
|
||||
if payment_claim.mpp_parts.is_empty() {
|
||||
return Err(DecodeError::InvalidValue);
|
||||
}
|
||||
let pending_claims = PendingMPPClaim {
|
||||
channels_without_preimage: payment_claim.mpp_parts.clone(),
|
||||
channels_with_preimage: Vec::new(),
|
||||
};
|
||||
let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
|
||||
|
||||
// While it may be duplicative to generate a PaymentClaimed here, trying to
|
||||
// figure out if the user definitely saw it before shutdown would require some
|
||||
// nontrivial logic and may break as we move away from regularly persisting
|
||||
// ChannelManager. Instead, we rely on the users' event handler being
|
||||
// idempotent and just blindly generate one no matter what, letting the
|
||||
// preimages eventually timing out from ChannelMonitors to prevent us from
|
||||
// doing so forever.
|
||||
|
||||
let claim_found =
|
||||
channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment(
|
||||
payment_hash, &channel_manager.node_signer, &channel_manager.logger,
|
||||
&channel_manager.inbound_payment_id_secret, true,
|
||||
);
|
||||
if claim_found.is_err() {
|
||||
let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
|
||||
match claimable_payments.pending_claiming_payments.entry(payment_hash) {
|
||||
hash_map::Entry::Occupied(_) => {
|
||||
debug_assert!(false, "Entry was added in begin_claiming_payment");
|
||||
return Err(DecodeError::InvalidValue);
|
||||
},
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
entry.insert(payment_claim.claiming_payment);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for part in payment_claim.mpp_parts.iter() {
|
||||
let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| (
|
||||
part.counterparty_node_id, part.channel_id, part.htlc_id,
|
||||
PendingMPPClaimPointer(Arc::clone(&ptr))
|
||||
));
|
||||
let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr|
|
||||
RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
|
||||
pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
|
||||
}
|
||||
);
|
||||
// Note that we don't need to pass the `payment_info` here - its
|
||||
// already (clearly) durably on disk in the `ChannelMonitor` so there's
|
||||
// no need to worry about getting it into others.
|
||||
channel_manager.claim_mpp_part(
|
||||
part.into(), payment_preimage, None,
|
||||
|_, _|
|
||||
(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr)
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let per_peer_state = channel_manager.per_peer_state.read().unwrap();
|
||||
let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
|
||||
let payment = claimable_payments.claimable_payments.remove(&payment_hash);
|
||||
mem::drop(claimable_payments);
|
||||
if let Some(payment) = payment {
|
||||
log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
|
||||
let mut claimable_amt_msat = 0;
|
||||
let mut receiver_node_id = Some(our_network_pubkey);
|
||||
let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
|
||||
if phantom_shared_secret.is_some() {
|
||||
let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
|
||||
.expect("Failed to get node_id for phantom node recipient");
|
||||
receiver_node_id = Some(phantom_pubkey)
|
||||
}
|
||||
for claimable_htlc in &payment.htlcs {
|
||||
claimable_amt_msat += claimable_htlc.value;
|
||||
|
||||
// Add a holding-cell claim of the payment to the Channel, which should be
|
||||
// applied ~immediately on peer reconnection. Because it won't generate a
|
||||
// new commitment transaction we can just provide the payment preimage to
|
||||
// the corresponding ChannelMonitor and nothing else.
|
||||
//
|
||||
// We do so directly instead of via the normal ChannelMonitor update
|
||||
// procedure as the ChainMonitor hasn't yet been initialized, implying
|
||||
// we're not allowed to call it directly yet. Further, we do the update
|
||||
// without incrementing the ChannelMonitor update ID as there isn't any
|
||||
// reason to.
|
||||
// If we were to generate a new ChannelMonitor update ID here and then
|
||||
// crash before the user finishes block connect we'd end up force-closing
|
||||
// this channel as well. On the flip side, there's no harm in restarting
|
||||
// without the new monitor persisted - we'll end up right back here on
|
||||
// restart.
|
||||
let previous_channel_id = claimable_htlc.prev_hop.channel_id;
|
||||
let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
|
||||
.get(&claimable_htlc.prev_hop.outpoint).cloned();
|
||||
if let Some(peer_node_id) = peer_node_id_opt {
|
||||
let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
|
||||
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
|
||||
let peer_state = &mut *peer_state_lock;
|
||||
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
|
||||
let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
|
||||
channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
|
||||
claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger
|
||||
);
|
||||
}
|
||||
}
|
||||
if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
|
||||
// Note that this is unsafe as we no longer require the
|
||||
// `ChannelMonitor`s to be re-persisted prior to this
|
||||
// `ChannelManager` being persisted after we get started running.
|
||||
// If this `ChannelManager` gets persisted first then we crash, we
|
||||
// won't have the `claimable_payments` entry we need to re-enter
|
||||
// this code block, causing us to not re-apply the preimage to this
|
||||
// `ChannelMonitor`.
|
||||
//
|
||||
// We should never be here with modern payment claims, however, as
|
||||
// they should always include the HTLC list. Instead, this is only
|
||||
// for nodes during upgrade, and we explicitly require the old
|
||||
// persistence semantics on upgrade in the release notes.
|
||||
previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
|
||||
&payment_hash, &payment_preimage, &channel_manager.tx_broadcaster,
|
||||
&channel_manager.fee_estimator, &channel_manager.logger
|
||||
);
|
||||
}
|
||||
}
|
||||
let mut pending_events = channel_manager.pending_events.lock().unwrap();
|
||||
let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
|
||||
pending_events.push_back((events::Event::PaymentClaimed {
|
||||
receiver_node_id,
|
||||
payment_hash,
|
||||
purpose: payment.purpose,
|
||||
amount_msat: claimable_amt_msat,
|
||||
htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
|
||||
sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
|
||||
onion_fields: payment.onion_fields,
|
||||
payment_id: Some(payment_id),
|
||||
}, None));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for htlc_source in failed_htlcs.drain(..) {
|
||||
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
|
||||
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
|
||||
|
@ -686,7 +686,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
|
||||
// them to ensure we can write and reload our ChannelManager.
|
||||
{
|
||||
let mut channel_monitors = new_hash_map();
|
||||
for monitor in deserialized_monitors.iter_mut() {
|
||||
for monitor in deserialized_monitors.iter() {
|
||||
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
|
||||
}
|
||||
|
||||
@ -1128,7 +1128,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
|
||||
let mut node_read = &chanman_encoded[..];
|
||||
let (_, node_deserialized) = {
|
||||
let mut channel_monitors = new_hash_map();
|
||||
for monitor in monitors_read.iter_mut() {
|
||||
for monitor in monitors_read.iter() {
|
||||
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
|
||||
}
|
||||
<(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs {
|
||||
|
@ -3702,7 +3702,10 @@ fn test_force_close_fail_back() {
|
||||
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
|
||||
{
|
||||
get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
|
||||
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
|
||||
.provide_payment_preimage_unsafe_legacy(
|
||||
&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger
|
||||
);
|
||||
}
|
||||
mine_transaction(&nodes[2], &commitment_tx);
|
||||
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
|
||||
|
@ -1883,8 +1883,10 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) {
|
||||
|
||||
// Cheat by giving A's ChannelMonitor the preimage to the to-be-claimed HTLC so that we have an
|
||||
// HTLC-claim transaction on the to-be-revoked state.
|
||||
get_monitor!(nodes[0], chan_id).provide_payment_preimage(&claimed_payment_hash, &claimed_payment_preimage,
|
||||
&node_cfgs[0].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger);
|
||||
get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy(
|
||||
&claimed_payment_hash, &claimed_payment_preimage, &node_cfgs[0].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger
|
||||
);
|
||||
|
||||
// Now get the latest commitment transaction from A and then update the fee to revoke it
|
||||
let as_revoked_txn = get_local_commitment_txn!(nodes[0], chan_id);
|
||||
@ -2507,11 +2509,11 @@ fn do_test_yield_anchors_events(have_htlcs: bool) {
|
||||
}
|
||||
|
||||
if have_htlcs {
|
||||
get_monitor!(nodes[0], chan_id).provide_payment_preimage(
|
||||
get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy(
|
||||
&payment_hash_2.unwrap(), &payment_preimage_2.unwrap(), &node_cfgs[0].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger
|
||||
);
|
||||
get_monitor!(nodes[1], chan_id).provide_payment_preimage(
|
||||
get_monitor!(nodes[1], chan_id).provide_payment_preimage_unsafe_legacy(
|
||||
&payment_hash_1.unwrap(), &payment_preimage_1.unwrap(), &node_cfgs[1].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger
|
||||
);
|
||||
@ -2706,7 +2708,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
|
||||
for chan_id in [chan_a.2, chan_b.2].iter() {
|
||||
let monitor = get_monitor!(nodes[1], chan_id);
|
||||
for payment in [payment_a, payment_b, payment_c, payment_d].iter() {
|
||||
monitor.provide_payment_preimage(
|
||||
monitor.provide_payment_preimage_unsafe_legacy(
|
||||
&payment.1, &payment.0, &node_cfgs[1].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger
|
||||
);
|
||||
|
@ -426,7 +426,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
|
||||
chain_monitor: nodes[0].chain_monitor,
|
||||
tx_broadcaster: nodes[0].tx_broadcaster,
|
||||
logger: &logger,
|
||||
channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
|
||||
channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
|
||||
}) { } else {
|
||||
panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
|
||||
};
|
||||
@ -444,7 +444,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
|
||||
chain_monitor: nodes[0].chain_monitor,
|
||||
tx_broadcaster: nodes[0].tx_broadcaster,
|
||||
logger: &logger,
|
||||
channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
|
||||
channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
|
||||
}).unwrap();
|
||||
nodes_0_deserialized = nodes_0_deserialized_tmp;
|
||||
assert!(nodes_0_read.is_empty());
|
||||
@ -878,27 +878,39 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
|
||||
// Now restart nodes[3].
|
||||
reload_node!(nodes[3], original_manager, &[&updated_monitor.0, &original_monitor.0], persister, new_chain_monitor, nodes_3_deserialized);
|
||||
|
||||
// On startup the preimage should have been copied into the non-persisted monitor:
|
||||
// Until the startup background events are processed (in `get_and_clear_pending_events`,
|
||||
// below), the preimage is not copied to the non-persisted monitor...
|
||||
assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
|
||||
assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
|
||||
assert_eq!(
|
||||
get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash),
|
||||
persist_both_monitors,
|
||||
);
|
||||
|
||||
nodes[1].node.peer_disconnected(nodes[3].node.get_our_node_id());
|
||||
nodes[2].node.peer_disconnected(nodes[3].node.get_our_node_id());
|
||||
|
||||
// During deserialization, we should have closed one channel and broadcast its latest
|
||||
// commitment transaction. We should also still have the original PaymentClaimable event we
|
||||
// never finished processing.
|
||||
// never finished processing as well as a PaymentClaimed event regenerated when we replayed the
|
||||
// preimage onto the non-persisted monitor.
|
||||
let events = nodes[3].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
|
||||
if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
|
||||
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
|
||||
if persist_both_monitors {
|
||||
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
|
||||
check_added_monitors(&nodes[3], 2);
|
||||
if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[3] { } else { panic!(); }
|
||||
check_added_monitors(&nodes[3], 6);
|
||||
} else {
|
||||
check_added_monitors(&nodes[3], 1);
|
||||
if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[2] { } else { panic!(); }
|
||||
check_added_monitors(&nodes[3], 3);
|
||||
}
|
||||
|
||||
// Now that we've processed background events, the preimage should have been copied into the
|
||||
// non-persisted monitor:
|
||||
assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
|
||||
assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
|
||||
|
||||
// On restart, we should also get a duplicate PaymentClaimed event as we persisted the
|
||||
// ChannelManager prior to handling the original one.
|
||||
if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } =
|
||||
@ -948,6 +960,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
|
||||
nodes[0].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
|
||||
commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
|
||||
expect_payment_sent!(nodes[0], payment_preimage);
|
||||
|
||||
// Ensure that the remaining channel is fully operation and not blocked (and that after a
|
||||
// cycle of commitment updates the payment preimage is ultimately pruned).
|
||||
send_payment(&nodes[0], &[&nodes[2], &nodes[3]], 100_000);
|
||||
assert!(!get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1024,8 +1041,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
if claim_htlc {
|
||||
get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
|
||||
&nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
|
||||
get_monitor!(nodes[2], chan_id_2).provide_payment_preimage_unsafe_legacy(
|
||||
&payment_hash, &payment_preimage, &nodes[2].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger
|
||||
);
|
||||
}
|
||||
assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
|
||||
|
||||
|
@ -674,7 +674,7 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor
|
||||
|
||||
// Provide the preimage now, such that we only claim from the holder commitment (since it's
|
||||
// currently confirmed) and not the counterparty's.
|
||||
get_monitor!(nodes[1], chan_id).provide_payment_preimage(
|
||||
get_monitor!(nodes[1], chan_id).provide_payment_preimage_unsafe_legacy(
|
||||
&payment_hash, &payment_preimage, &nodes[1].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator(nodes[1].fee_estimator), &nodes[1].logger
|
||||
);
|
||||
@ -749,7 +749,7 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
|
||||
|
||||
// Provide the preimage now, such that we only claim from the previous commitment (since it's
|
||||
// currently confirmed) and not the latest.
|
||||
get_monitor!(nodes[1], chan_id).provide_payment_preimage(
|
||||
get_monitor!(nodes[1], chan_id).provide_payment_preimage_unsafe_legacy(
|
||||
&payment_hash, &payment_preimage, &nodes[1].tx_broadcaster,
|
||||
&LowerBoundedFeeEstimator(nodes[1].fee_estimator), &nodes[1].logger
|
||||
);
|
||||
|
@ -1001,6 +1001,7 @@ impl Readable for Vec<u8> {
|
||||
impl_for_vec!(ecdsa::Signature);
|
||||
impl_for_vec!(crate::chain::channelmonitor::ChannelMonitorUpdate);
|
||||
impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction);
|
||||
impl_for_vec!(crate::ln::channelmanager::PaymentClaimDetails);
|
||||
impl_for_vec!(crate::ln::msgs::SocketAddress);
|
||||
impl_for_vec!((A, B), A, B);
|
||||
impl_writeable_for_vec!(&crate::routing::router::BlindedTail);
|
||||
|
6
pending_changelog/3322-a.txt
Normal file
6
pending_changelog/3322-a.txt
Normal file
@ -0,0 +1,6 @@
|
||||
API Changes
|
||||
===========
|
||||
|
||||
Additional information is now stored in `ChannelMonitorUpdate`s which may increase the size of
|
||||
`ChannelMonitorUpdate`s claiming inbound payments substantially. The expected maximum size of
|
||||
`ChannelMonitorUpdate`s shouldn't change materially.
|
7
pending_changelog/3322-b.txt
Normal file
7
pending_changelog/3322-b.txt
Normal file
@ -0,0 +1,7 @@
|
||||
API Updates
|
||||
===========
|
||||
|
||||
As a part of adding robustness against several unlikely scenarios, redundant `PaymentClaimed`
|
||||
`Event`s will be generated more frequently on startup for payments received on LDK 0.1 and
|
||||
newer. A new `Event::PaymentClaimed::payment_id` field may be used to better differentiate
|
||||
between redundant payments.
|
8
pending_changelog/matt-persist-preimage-on-upgrade.txt
Normal file
8
pending_changelog/matt-persist-preimage-on-upgrade.txt
Normal file
@ -0,0 +1,8 @@
|
||||
# Backwards Compatibility
|
||||
* The `ChannelManager` deserialization semantics no longer require that
|
||||
`ChannelMonitor`s be re-persisted after `(BlockHash, ChannelManager)::read`
|
||||
is called prior to normal node operation. This applies to upgraded nodes
|
||||
only *after* a startup with the old semantics completes at least once. IOW,
|
||||
you must deserialize the `ChannelManager` with upgraded LDK, persist the
|
||||
`ChannelMonitor`s then continue to normal startup once, and thereafter you
|
||||
may skip the `ChannelMonitor` persistence step.
|
Loading…
Reference in New Issue
Block a user