mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-01-18 21:34:48 +01:00
Avoid startup PeerState
entries for peers with unfunded channels
If a peer creates a channel with us which never reaches the funding stage (or never gets any commitment updates after creation), we'll avoid inserting the `update_id` into `closed_channel_monitor_update_ids` at runtime to avoid keeping a `PeerState` entry around for no reason. However, on startup we still create a `ChannelMonitorUpdate` with a `ChannelForceClosed` update step to ensure the `ChannelMonitor` is locked and shut down. This is pretty redundant, and results in a bunch of on-startup `ChannelMonitorUpdate`s for any old but non-archived `ChannelMonitor`s. Instead, here, we check if a `ChannelMonitor` already saw a `ChannelForceClosed` update step before we generate the on-startup `ChannelMonitorUpdate`. This also allows us to skip the `closed_channel_monitor_update_ids` insertion as we can be confident we'll never have a `ChannelMonitorUpdate` for this channel at all.
This commit is contained in:
parent
c99d3d785d
commit
4582b201ea
@ -1711,6 +1711,12 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
self.inner.lock().unwrap().get_cur_holder_commitment_number()
|
||||
}
|
||||
|
||||
/// Gets whether we've been notified that this channel is closed by the `ChannelManager` (i.e.
|
||||
/// via a [`ChannelMonitorUpdateStep::ChannelForceClosed`]).
|
||||
pub(crate) fn offchain_closed(&self) -> bool {
|
||||
self.inner.lock().unwrap().lockdown_from_offchain
|
||||
}
|
||||
|
||||
/// Gets the `node_id` of the counterparty for this channel.
|
||||
///
|
||||
/// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`
|
||||
|
@ -7253,8 +7253,6 @@ where
|
||||
let prev_channel_id = hop_data.channel_id;
|
||||
let prev_user_channel_id = hop_data.user_channel_id;
|
||||
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
|
||||
#[cfg(debug_assertions)]
|
||||
let claiming_chan_funding_outpoint = hop_data.outpoint;
|
||||
self.claim_funds_from_hop(hop_data, payment_preimage, None,
|
||||
|htlc_claim_value_msat, definitely_duplicate| {
|
||||
let chan_to_release =
|
||||
@ -7279,61 +7277,6 @@ where
|
||||
// monitor updates still in flight. In that case, we shouldn't
|
||||
// immediately free, but instead let that monitor update complete
|
||||
// in the background.
|
||||
#[cfg(debug_assertions)] {
|
||||
let background_events = self.pending_background_events.lock().unwrap();
|
||||
// There should be a `BackgroundEvent` pending...
|
||||
assert!(background_events.iter().any(|ev| {
|
||||
match ev {
|
||||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
|
||||
funding_txo, update, ..
|
||||
} => {
|
||||
if *funding_txo == claiming_chan_funding_outpoint {
|
||||
// to apply a monitor update that blocked the claiming channel,
|
||||
assert!(update.updates.iter().any(|upd|
|
||||
if let ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage: update_preimage, ..
|
||||
} = upd {
|
||||
payment_preimage == *update_preimage
|
||||
} else {
|
||||
false
|
||||
}
|
||||
), "{:?}", update);
|
||||
true
|
||||
} else if *funding_txo == next_channel_outpoint {
|
||||
// or the channel we'd unblock is already closed,
|
||||
assert!(update.updates.iter().any(|upd|
|
||||
if let ChannelMonitorUpdateStep::ChannelForceClosed { .. } = upd {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
), "{:?}", update);
|
||||
true
|
||||
} else { false }
|
||||
},
|
||||
// or the channel we'd unblock is already closed (for an
|
||||
// old channel),
|
||||
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
|
||||
(funding_txo, _channel_id, monitor_update)
|
||||
) => {
|
||||
if *funding_txo == next_channel_outpoint {
|
||||
assert_eq!(monitor_update.updates.len(), 1);
|
||||
assert!(matches!(
|
||||
monitor_update.updates[0],
|
||||
ChannelMonitorUpdateStep::ChannelForceClosed { .. }
|
||||
));
|
||||
true
|
||||
} else { false }
|
||||
},
|
||||
// or the monitor update has completed and will unblock
|
||||
// immediately once we get going.
|
||||
BackgroundEvent::MonitorUpdatesComplete {
|
||||
channel_id, ..
|
||||
} =>
|
||||
*channel_id == prev_channel_id,
|
||||
}
|
||||
}), "{:?}", *background_events);
|
||||
}
|
||||
(None, None)
|
||||
} else if definitely_duplicate {
|
||||
if let Some(other_chan) = chan_to_release {
|
||||
@ -12632,6 +12575,28 @@ where
|
||||
|
||||
for (funding_txo, monitor) in args.channel_monitors.iter() {
|
||||
if !funding_txo_set.contains(funding_txo) {
|
||||
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
|
||||
// If the ChannelMonitor had any updates, we may need to update it further and
|
||||
// thus track it in `closed_channel_monitor_update_ids`. If the channel never
|
||||
// had any updates at all, there can't be any HTLCs pending which we need to
|
||||
// claim.
|
||||
// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
|
||||
// provide it with a closure update its `update_id` will be at 1.
|
||||
if !monitor.offchain_closed() || monitor.get_latest_update_id() > 1 {
|
||||
per_peer_state.entry(counterparty_node_id)
|
||||
.or_insert_with(|| Mutex::new(empty_peer_state()))
|
||||
.lock().unwrap()
|
||||
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
|
||||
.and_modify(|v| *v = cmp::max(monitor.get_latest_update_id(), *v))
|
||||
.or_insert(monitor.get_latest_update_id());
|
||||
}
|
||||
}
|
||||
|
||||
if monitor.offchain_closed() {
|
||||
// We already appled a ChannelForceClosed update.
|
||||
continue;
|
||||
}
|
||||
|
||||
let logger = WithChannelMonitor::from(&args.logger, monitor, None);
|
||||
let channel_id = monitor.channel_id();
|
||||
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
|
||||
@ -12650,13 +12615,6 @@ where
|
||||
update: monitor_update,
|
||||
};
|
||||
close_background_events.push(update);
|
||||
|
||||
per_peer_state.entry(counterparty_node_id)
|
||||
.or_insert_with(|| Mutex::new(empty_peer_state()))
|
||||
.lock().unwrap()
|
||||
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
|
||||
.and_modify(|v| *v = cmp::max(monitor.get_latest_update_id(), *v))
|
||||
.or_insert(monitor.get_latest_update_id());
|
||||
} else {
|
||||
// This is a fairly old `ChannelMonitor` that hasn't seen an update to its
|
||||
// off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
|
||||
|
@ -2302,9 +2302,6 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
|
||||
|
||||
// Connecting more blocks should result in the HTLC transactions being rebroadcast.
|
||||
connect_blocks(&nodes[0], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
|
||||
if check_old_monitor_retries_after_upgrade {
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
{
|
||||
let txn = nodes[0].tx_broadcaster.txn_broadcast();
|
||||
assert_eq!(txn.len(), 1);
|
||||
@ -3014,7 +3011,6 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c
|
||||
// If we saw the commitment before our `counterparty_payment_script` was fixed, we'll never
|
||||
// get the spendable output event for the `to_remote` output, so we'll need to get it
|
||||
// manually via `get_spendable_outputs`.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
let outputs = get_monitor!(nodes[1], chan_id).get_spendable_outputs(&commitment_tx, commitment_tx_conf_height);
|
||||
assert_eq!(outputs.len(), 1);
|
||||
let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(
|
||||
|
@ -993,7 +993,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
|
||||
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.test_process_background_events();
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
|
||||
reconnect_args.send_channel_ready = (true, true);
|
||||
@ -1023,7 +1022,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
|
||||
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.test_process_background_events();
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
@ -1162,7 +1160,6 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo
|
||||
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
|
||||
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
|
||||
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -3522,7 +3519,6 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint:
|
||||
reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert!(events.is_empty());
|
||||
check_added_monitors(&nodes[0], 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
Loading…
Reference in New Issue
Block a user