mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-01-18 21:34:48 +01:00
Merge pull request #3355 from TheBlueMatt/2024-10-mon-ids-after-close
This commit is contained in:
commit
1c5b4c129b
@ -1,4 +1,3 @@
|
||||
use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
|
||||
use lightning::events::ClosureReason;
|
||||
use lightning::ln::functional_test_utils::{
|
||||
connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block,
|
||||
@ -168,5 +167,5 @@ pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
// Make sure everything is persisted as expected after close.
|
||||
check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
|
||||
check_persisted_data!(11);
|
||||
}
|
||||
|
@ -89,11 +89,9 @@ pub struct ChannelMonitorUpdate {
|
||||
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
|
||||
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
|
||||
///
|
||||
/// The only instances we allow where update_id values are not strictly increasing have a
|
||||
/// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
|
||||
/// will force close the channel by broadcasting the latest commitment transaction or
|
||||
/// special post-force-close updates, like providing preimages necessary to claim outputs on the
|
||||
/// broadcast commitment transaction. See its docs for more details.
|
||||
/// Note that for [`ChannelMonitorUpdate`]s generated on LDK versions prior to 0.1 after the
|
||||
/// channel was closed, this value may be [`u64::MAX`]. In that case, multiple updates may
|
||||
/// appear with the same ID, and all should be replayed.
|
||||
///
|
||||
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
|
||||
pub update_id: u64,
|
||||
@ -104,15 +102,9 @@ pub struct ChannelMonitorUpdate {
|
||||
pub channel_id: Option<ChannelId>,
|
||||
}
|
||||
|
||||
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
|
||||
///
|
||||
/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or
|
||||
/// (2) providing a preimage (after the channel has been force closed) from a forward link that
|
||||
/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
|
||||
/// commitment transaction.
|
||||
///
|
||||
/// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
|
||||
pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
|
||||
/// LDK prior to 0.1 used this constant as the [`ChannelMonitorUpdate::update_id`] for any
|
||||
/// [`ChannelMonitorUpdate`]s which were generated after the channel was closed.
|
||||
const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
|
||||
|
||||
impl Writeable for ChannelMonitorUpdate {
|
||||
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
|
||||
@ -1553,6 +1545,8 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
|
||||
/// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
|
||||
/// ChannelMonitor.
|
||||
///
|
||||
/// Note that for channels closed prior to LDK 0.1, this may return [`u64::MAX`].
|
||||
pub fn get_latest_update_id(&self) -> u64 {
|
||||
self.inner.lock().unwrap().get_latest_update_id()
|
||||
}
|
||||
@ -1717,6 +1711,12 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
self.inner.lock().unwrap().get_cur_holder_commitment_number()
|
||||
}
|
||||
|
||||
/// Gets whether we've been notified that this channel is closed by the `ChannelManager` (i.e.
|
||||
/// via a [`ChannelMonitorUpdateStep::ChannelForceClosed`]).
|
||||
pub(crate) fn offchain_closed(&self) -> bool {
|
||||
self.inner.lock().unwrap().lockdown_from_offchain
|
||||
}
|
||||
|
||||
/// Gets the `node_id` of the counterparty for this channel.
|
||||
///
|
||||
/// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`
|
||||
@ -3110,11 +3110,11 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
F::Target: FeeEstimator,
|
||||
L::Target: Logger,
|
||||
{
|
||||
if self.latest_update_id == CLOSED_CHANNEL_UPDATE_ID && updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
|
||||
log_info!(logger, "Applying post-force-closed update to monitor {} with {} change(s).",
|
||||
if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID {
|
||||
log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).",
|
||||
log_funding_info!(self), updates.updates.len());
|
||||
} else if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
|
||||
log_info!(logger, "Applying force close update to monitor {} with {} change(s).",
|
||||
} else if updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID {
|
||||
log_info!(logger, "Applying pre-0.1 force close update to monitor {} with {} change(s).",
|
||||
log_funding_info!(self), updates.updates.len());
|
||||
} else {
|
||||
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).",
|
||||
@ -3137,14 +3137,14 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
|
||||
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
|
||||
// them as well.
|
||||
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
|
||||
if updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID || self.lockdown_from_offchain {
|
||||
assert_eq!(updates.updates.len(), 1);
|
||||
match updates.updates[0] {
|
||||
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
|
||||
// We should have already seen a `ChannelForceClosed` update if we're trying to
|
||||
// provide a preimage at this point.
|
||||
ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
|
||||
debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
|
||||
debug_assert!(self.lockdown_from_offchain),
|
||||
_ => {
|
||||
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
|
||||
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
|
||||
@ -3224,17 +3224,29 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
self.counterparty_commitment_txs_from_update(updates);
|
||||
}
|
||||
|
||||
// If the updates succeeded and we were in an already closed channel state, then there's no
|
||||
// need to refuse any updates we expect to receive afer seeing a confirmed commitment.
|
||||
if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.latest_update_id = updates.update_id;
|
||||
|
||||
// Refuse updates after we've detected a spend onchain, but only if we haven't processed a
|
||||
// force closed monitor update yet.
|
||||
if ret.is_ok() && self.funding_spend_seen && self.latest_update_id != CLOSED_CHANNEL_UPDATE_ID {
|
||||
// Refuse updates after we've detected a spend onchain (or if the channel was otherwise
|
||||
// closed), but only if the update isn't the kind of update we expect to see after channel
|
||||
// closure.
|
||||
let mut is_pre_close_update = false;
|
||||
for update in updates.updates.iter() {
|
||||
match update {
|
||||
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { .. }
|
||||
|ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. }
|
||||
|ChannelMonitorUpdateStep::ShutdownScript { .. }
|
||||
|ChannelMonitorUpdateStep::CommitmentSecret { .. } =>
|
||||
is_pre_close_update = true,
|
||||
// After a channel is closed, we don't communicate with our peer about it, so the
|
||||
// only things we will update is getting a new preimage (from a different channel)
|
||||
// or being told that the channel is closed. All other updates are generated while
|
||||
// talking to our peer.
|
||||
ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
|
||||
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
|
||||
}
|
||||
}
|
||||
|
||||
if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update {
|
||||
log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent");
|
||||
Err(())
|
||||
} else { ret }
|
||||
|
@ -45,7 +45,7 @@ use crate::ln::chan_utils;
|
||||
use crate::ln::onion_utils::HTLCFailReason;
|
||||
use crate::chain::BestBlock;
|
||||
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
|
||||
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
|
||||
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
|
||||
use crate::chain::transaction::{OutPoint, TransactionData};
|
||||
use crate::sign::ecdsa::EcdsaChannelSigner;
|
||||
use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
|
||||
@ -3656,7 +3656,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
|
||||
// monitor update to the user, even if we return one).
|
||||
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
|
||||
if !self.channel_state.is_pre_funded_state() {
|
||||
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
|
||||
self.latest_monitor_update_id += 1;
|
||||
Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
|
||||
update_id: self.latest_monitor_update_id,
|
||||
counterparty_node_id: Some(self.counterparty_node_id),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -15,7 +15,7 @@ use crate::chain;
|
||||
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
|
||||
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
|
||||
use crate::chain::channelmonitor;
|
||||
use crate::chain::channelmonitor::{Balance, CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
|
||||
use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
|
||||
use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
|
||||
@ -11005,7 +11005,8 @@ fn test_close_in_funding_batch() {
|
||||
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
|
||||
let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
|
||||
assert_eq!(monitor_updates_1.len(), 1);
|
||||
assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
|
||||
assert_eq!(monitor_updates_1[0].updates.len(), 1);
|
||||
assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
|
||||
}
|
||||
|
||||
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
@ -11092,10 +11093,12 @@ fn test_batch_funding_close_after_funding_signed() {
|
||||
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
|
||||
let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
|
||||
assert_eq!(monitor_updates_1.len(), 1);
|
||||
assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
|
||||
assert_eq!(monitor_updates_1[0].updates.len(), 1);
|
||||
assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
|
||||
let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
|
||||
assert_eq!(monitor_updates_2.len(), 1);
|
||||
assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
|
||||
assert_eq!(monitor_updates_2[0].updates.len(), 1);
|
||||
assert!(matches!(monitor_updates_2[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
|
||||
}
|
||||
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
match msg_events[0] {
|
||||
|
@ -2302,9 +2302,6 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
|
||||
|
||||
// Connecting more blocks should result in the HTLC transactions being rebroadcast.
|
||||
connect_blocks(&nodes[0], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
|
||||
if check_old_monitor_retries_after_upgrade {
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
{
|
||||
let txn = nodes[0].tx_broadcaster.txn_broadcast();
|
||||
assert_eq!(txn.len(), 1);
|
||||
@ -3014,7 +3011,6 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c
|
||||
// If we saw the commitment before our `counterparty_payment_script` was fixed, we'll never
|
||||
// get the spendable output event for the `to_remote` output, so we'll need to get it
|
||||
// manually via `get_spendable_outputs`.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
let outputs = get_monitor!(nodes[1], chan_id).get_spendable_outputs(&commitment_tx, commitment_tx_conf_height);
|
||||
assert_eq!(outputs.len(), 1);
|
||||
let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(
|
||||
|
@ -993,7 +993,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
|
||||
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.test_process_background_events();
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
|
||||
reconnect_args.send_channel_ready = (true, true);
|
||||
@ -1023,7 +1022,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
|
||||
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.test_process_background_events();
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
@ -1162,7 +1160,6 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo
|
||||
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
|
||||
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
|
||||
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -3522,7 +3519,6 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint:
|
||||
reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert!(events.is_empty());
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
|
||||
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
|
||||
use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
|
||||
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep};
|
||||
use crate::sign::EntropySource;
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
|
||||
@ -1264,7 +1264,8 @@ fn test_reload_partial_funding_batch() {
|
||||
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
|
||||
let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
|
||||
assert_eq!(monitor_updates_1.len(), 1);
|
||||
assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
|
||||
assert_eq!(monitor_updates_1[0].updates.len(), 1);
|
||||
assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
|
||||
}
|
||||
|
||||
// The funding transaction should not have been broadcast, but we broadcast the force-close
|
||||
|
@ -311,6 +311,10 @@ impl<T> Mutex<T> {
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
|
||||
self.inner.get_mut().map_err(|_| ())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
|
||||
|
@ -40,6 +40,10 @@ impl<T> Mutex<T> {
|
||||
pub fn into_inner(self) -> LockResult<T> {
|
||||
Ok(self.inner.into_inner())
|
||||
}
|
||||
|
||||
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
|
||||
Ok(self.inner.get_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
|
||||
|
@ -21,9 +21,7 @@ use crate::{io, log_error};
|
||||
use crate::chain;
|
||||
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
|
||||
use crate::chain::chainmonitor::Persist;
|
||||
use crate::chain::channelmonitor::{
|
||||
ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID,
|
||||
};
|
||||
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::ln::channelmanager::AChannelManager;
|
||||
use crate::routing::gossip::NetworkGraph;
|
||||
@ -728,16 +726,17 @@ where
|
||||
/// - No full monitor is found in [`KVStore`]
|
||||
/// - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
|
||||
/// - LDK commands re-persisting the entire monitor through this function, specifically when
|
||||
/// `update` is `None`.
|
||||
/// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
|
||||
/// `update` is `None`.
|
||||
/// - The update is at [`u64::MAX`], indicating an update generated by pre-0.1 LDK.
|
||||
fn update_persisted_channel(
|
||||
&self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
|
||||
monitor: &ChannelMonitor<ChannelSigner>,
|
||||
) -> chain::ChannelMonitorUpdateStatus {
|
||||
const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = u64::MAX;
|
||||
if let Some(update) = update {
|
||||
if update.update_id != CLOSED_CHANNEL_UPDATE_ID
|
||||
&& update.update_id % self.maximum_pending_updates != 0
|
||||
{
|
||||
let persist_update = update.update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID
|
||||
&& update.update_id % self.maximum_pending_updates != 0;
|
||||
if persist_update {
|
||||
let monitor_name = MonitorName::from(funding_txo);
|
||||
let update_name = UpdateName::from(update.update_id);
|
||||
match self.kv_store.write(
|
||||
@ -764,7 +763,7 @@ where
|
||||
// In case of channel-close monitor update, we need to read old monitor before persisting
|
||||
// the new one in order to determine the cleanup range.
|
||||
let maybe_old_monitor = match monitor.get_latest_update_id() {
|
||||
CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
|
||||
LEGACY_CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
@ -772,23 +771,24 @@ where
|
||||
let monitor_update_status = self.persist_new_channel(funding_txo, monitor);
|
||||
|
||||
if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
|
||||
let cleanup_range =
|
||||
if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
|
||||
// If there is an error while reading old monitor, we skip clean up.
|
||||
maybe_old_monitor.map(|(_, ref old_monitor)| {
|
||||
let start = old_monitor.get_latest_update_id();
|
||||
// We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
|
||||
let end = cmp::min(
|
||||
start.saturating_add(self.maximum_pending_updates),
|
||||
CLOSED_CHANNEL_UPDATE_ID - 1,
|
||||
);
|
||||
(start, end)
|
||||
})
|
||||
} else {
|
||||
let end = monitor.get_latest_update_id();
|
||||
let start = end.saturating_sub(self.maximum_pending_updates);
|
||||
Some((start, end))
|
||||
};
|
||||
let channel_closed_legacy =
|
||||
monitor.get_latest_update_id() == LEGACY_CLOSED_CHANNEL_UPDATE_ID;
|
||||
let cleanup_range = if channel_closed_legacy {
|
||||
// If there is an error while reading old monitor, we skip clean up.
|
||||
maybe_old_monitor.map(|(_, ref old_monitor)| {
|
||||
let start = old_monitor.get_latest_update_id();
|
||||
// We never persist an update with the legacy closed update_id
|
||||
let end = cmp::min(
|
||||
start.saturating_add(self.maximum_pending_updates),
|
||||
LEGACY_CLOSED_CHANNEL_UPDATE_ID - 1,
|
||||
);
|
||||
(start, end)
|
||||
})
|
||||
} else {
|
||||
let end = monitor.get_latest_update_id();
|
||||
let start = end.saturating_sub(self.maximum_pending_updates);
|
||||
Some((start, end))
|
||||
};
|
||||
|
||||
if let Some((start, end)) = cleanup_range {
|
||||
self.cleanup_in_range(monitor_name, start, end);
|
||||
@ -1185,24 +1185,19 @@ mod tests {
|
||||
// check that when we read it, we got the right update id
|
||||
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
||||
|
||||
// if the CM is at consolidation threshold, ensure no updates are stored.
|
||||
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
|
||||
if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
|
||||
|| mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID
|
||||
{
|
||||
assert_eq!(
|
||||
persister_0
|
||||
.kv_store
|
||||
.list(
|
||||
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
|
||||
monitor_name.as_str()
|
||||
)
|
||||
.unwrap()
|
||||
.len(),
|
||||
0,
|
||||
"updates stored when they shouldn't be in persister 0"
|
||||
);
|
||||
}
|
||||
assert_eq!(
|
||||
persister_0
|
||||
.kv_store
|
||||
.list(
|
||||
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
|
||||
monitor_name.as_str()
|
||||
)
|
||||
.unwrap()
|
||||
.len() as u64,
|
||||
mon.get_latest_update_id() % persister_0_max_pending_updates,
|
||||
"Wrong number of updates stored in persister 0",
|
||||
);
|
||||
}
|
||||
persisted_chan_data_1 =
|
||||
persister_1.read_all_channel_monitors_with_updates().unwrap();
|
||||
@ -1210,23 +1205,18 @@ mod tests {
|
||||
for (_, mon) in persisted_chan_data_1.iter() {
|
||||
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
||||
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
|
||||
// if the CM is at consolidation threshold, ensure no updates are stored.
|
||||
if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
|
||||
|| mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID
|
||||
{
|
||||
assert_eq!(
|
||||
persister_1
|
||||
.kv_store
|
||||
.list(
|
||||
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
|
||||
monitor_name.as_str()
|
||||
)
|
||||
.unwrap()
|
||||
.len(),
|
||||
0,
|
||||
"updates stored when they shouldn't be in persister 1"
|
||||
);
|
||||
}
|
||||
assert_eq!(
|
||||
persister_1
|
||||
.kv_store
|
||||
.list(
|
||||
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
|
||||
monitor_name.as_str()
|
||||
)
|
||||
.unwrap()
|
||||
.len() as u64,
|
||||
mon.get_latest_update_id() % persister_1_max_pending_updates,
|
||||
"Wrong number of updates stored in persister 1",
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -1283,28 +1273,8 @@ mod tests {
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
// Make sure everything is persisted as expected after close.
|
||||
check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
|
||||
|
||||
// Make sure the expected number of stale updates is present.
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap();
|
||||
let (_, monitor) = &persisted_chan_data[0];
|
||||
let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
|
||||
// The channel should have 0 updates, as it wrote a full monitor and consolidated.
|
||||
assert_eq!(
|
||||
persister_0
|
||||
.kv_store
|
||||
.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())
|
||||
.unwrap()
|
||||
.len(),
|
||||
0
|
||||
);
|
||||
assert_eq!(
|
||||
persister_1
|
||||
.kv_store
|
||||
.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())
|
||||
.unwrap()
|
||||
.len(),
|
||||
0
|
||||
check_persisted_data!(
|
||||
persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1
|
||||
);
|
||||
}
|
||||
|
||||
@ -1452,40 +1422,6 @@ mod tests {
|
||||
UpdateName::from(1).as_str()
|
||||
)
|
||||
.is_err());
|
||||
|
||||
// Force close.
|
||||
let chan_id = nodes[0].node.list_channels()[0].channel_id;
|
||||
let node_id_1 = nodes[1].node.get_our_node_id();
|
||||
let err_msg = "Channel force-closed".to_string();
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_id_1, err_msg).unwrap();
|
||||
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
|
||||
check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 100000);
|
||||
check_closed_broadcast!(nodes[0], true);
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
|
||||
// Write an update near u64::MAX
|
||||
persister_0
|
||||
.kv_store
|
||||
.write(
|
||||
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
|
||||
monitor_name.as_str(),
|
||||
UpdateName::from(u64::MAX - 1).as_str(),
|
||||
&[0u8; 1],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Do the stale update cleanup
|
||||
persister_0.cleanup_stale_updates(false).unwrap();
|
||||
|
||||
// Confirm the stale update is unreadable/gone
|
||||
assert!(persister_0
|
||||
.kv_store
|
||||
.read(
|
||||
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
|
||||
monitor_name.as_str(),
|
||||
UpdateName::from(u64::MAX - 1).as_str()
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
fn persist_fn<P: Deref, ChannelSigner: EcdsaChannelSigner>(_persist: P) -> bool
|
||||
|
6
pending_changelog/matt-no-upgrade-skip.txt
Normal file
6
pending_changelog/matt-no-upgrade-skip.txt
Normal file
@ -0,0 +1,6 @@
|
||||
## Backwards Compatibility
|
||||
* Nodes with pending forwarded HTLCs or unclaimed payments cannot be
|
||||
upgraded directly from 0.0.123 or earlier to 0.1. Instead, they must
|
||||
first either resolve all pending HTLCs (including those pending
|
||||
resolution on-chain), or run 0.0.124 and resolve any HTLCs that were
|
||||
originally forwarded or received running 0.0.123 or earlier.
|
Loading…
Reference in New Issue
Block a user