Merge pull request #2059 from wpaulino/broadcast-missing-anchors-event

Queue BackgroundEvent to force close channels upon ChannelManager::read
This commit is contained in:
Matt Corallo 2023-03-29 21:54:58 +00:00 committed by GitHub
commit a9534fe6b5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 161 additions and 114 deletions

View file

@ -141,6 +141,7 @@ mod tests {
use bitcoin::{Txid, TxMerkleNode};
use lightning::chain::ChannelMonitorUpdateStatus;
use lightning::chain::chainmonitor::Persist;
use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
use lightning::chain::transaction::OutPoint;
use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
use lightning::events::{ClosureReason, MessageSendEventsProvider};
@ -253,7 +254,7 @@ mod tests {
check_added_monitors!(nodes[1], 1);
// Make sure everything is persisted as expected after close.
check_persisted_data!(11);
check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
}
// Test that if the persister's path to channel data is read-only, writing a

View file

@ -69,34 +69,36 @@ use crate::sync::{Mutex, LockTestExt};
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
#[derive(Clone)]
#[derive(Clone, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
/// increasing and increase by one for each new update, with one exception specified below.
/// increasing and increase by one for each new update, with two exceptions specified below.
///
/// This sequence number is also used to track up to which points updates which returned
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
/// The only instance where update_id values are not strictly increasing is the case where we
/// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
/// its docs for more details.
/// The only instances we allow where update_id values are not strictly increasing have a
/// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
/// will force close the channel by broadcasting the latest commitment transaction or
/// special post-force-close updates, like providing preimages necessary to claim outputs on the
/// broadcast commitment transaction. See its docs for more details.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
}
/// If:
/// (1) a channel has been force closed and
/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
/// this channel's (the backward link's) broadcasted commitment transaction
/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
/// with the update providing said payment preimage. No other update types are allowed after
/// force-close.
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
///
/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or
/// (2) providing a preimage (after the channel has been force closed) from a forward link that
/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
/// commitment transaction.
///
/// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
impl Writeable for ChannelMonitorUpdate {
@ -488,8 +490,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
);
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
#[derive(Clone)]
#[derive(Clone, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
@ -1201,17 +1202,6 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
}
pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(
&self,
broadcaster: &B,
logger: &L,
) where
B::Target: BroadcasterInterface,
L::Target: Logger,
{
self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger);
}
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
/// itself.
///
@ -2265,14 +2255,22 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
{
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
// ChannelMonitor updates may be applied after force close if we receive a
// preimage for a broadcasted commitment transaction HTLC output that we'd
// like to claim on-chain. If this is the case, we no longer have guaranteed
// access to the monitor's update ID, so we use a sentinel value instead.
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
// broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
// is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
// sentinel value instead.
//
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
// them as well.
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
// We should have already seen a `ChannelForceClosed` update if we're trying to
// provide a preimage at this point.
ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
_ => {
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
@ -2364,6 +2362,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
},
}
}
// If the updates succeeded and we were in an already closed channel state, then there's no
// need to refuse any updates we expect to receive afer seeing a confirmed commitment.
if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
return Ok(());
}
self.latest_update_id = updates.update_id;
if ret.is_ok() && self.funding_spend_seen {

View file

@ -33,7 +33,7 @@ use crate::ln::chan_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::events::ClosureReason;
@ -6081,7 +6081,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
self.latest_monitor_update_id += 1;
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],

View file

@ -7354,6 +7354,7 @@ where
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
let mut pending_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
@ -7383,9 +7384,11 @@ where
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
if let Some(monitor_update) = monitor_update {
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
}
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
channel_closures.push(events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
@ -7450,10 +7453,13 @@ where
}
}
for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
};
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
}
}
@ -7506,10 +7512,17 @@ where
}
let background_event_count: u64 = Readable::read(reader)?;
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
0 => {
let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
if pending_background_events.iter().find(|e| {
let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
*pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
}).is_none() {
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
}
}
_ => return Err(DecodeError::InvalidValue),
}
}
@ -7884,7 +7897,7 @@ where
per_peer_state: FairRwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
pending_background_events: Mutex::new(pending_background_events_read),
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),

View file

@ -1855,15 +1855,18 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
// Serialize Bob with the initial state of both channels, which we'll use later.
let bob_serialized = nodes[1].node.encode();
// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
// Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
// point such that he broadcasts a revoked commitment transaction.
let bob_serialized = nodes[1].node.encode();
// Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
// at this point such that he broadcasts a revoked commitment transaction with the HTLCs
// present.
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
@ -1893,30 +1896,26 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
}
}
// Bob force closes by broadcasting his revoked state for each channel.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
let revoked_commitment_a = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 1);
let revoked_commitment = txn.pop().unwrap();
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
check_spends!(revoked_commitment, chan_a.3);
revoked_commitment
};
nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
let revoked_commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 1);
let revoked_commitment = txn.pop().unwrap();
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
check_spends!(revoked_commitment, chan_b.3);
revoked_commitment
// Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
// broadcast the latest commitment transaction known to them, which in our case is the one with
// the HTLCs still pending.
nodes[1].node.timer_tick_occurred();
check_added_monitors(&nodes[1], 2);
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
let (revoked_commitment_a, revoked_commitment_b) = {
let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 2);
assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
check_spends!(&txn[0], &chan_a.3);
check_spends!(&txn[1], &chan_b.3);
(txn[0].clone(), txn[1].clone())
} else {
check_spends!(&txn[1], &chan_a.3);
check_spends!(&txn[0], &chan_b.3);
(txn[1].clone(), txn[0].clone())
}
};
// Bob should now receive two events to bump his revoked commitment transaction fees.

View file

@ -335,9 +335,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
nodes[0].node.timer_tick_occurred();
if !confirm_before_reload {
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
} else {
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
}
check_added_monitors!(nodes[0], 1);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
@ -500,9 +506,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
nodes[0].node.timer_tick_occurred();
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
check_added_monitors!(nodes[0], 1);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@ -2795,6 +2803,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
// the double-claim that would otherwise appear at the end of this test.
nodes[0].node.timer_tick_occurred();
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);

View file

@ -423,20 +423,22 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
{ // Channel close should result in a commitment tx
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
}
for monitor in node_0_monitors.drain(..) {
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
{ // Channel close should result in a commitment tx
nodes[0].node.timer_tick_occurred();
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
}
check_added_monitors!(nodes[0], 1);
// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@ -921,8 +923,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
});
}
nodes[1].node.timer_tick_occurred();
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_commitment_tx.len(), 1);
check_added_monitors!(nodes[1], 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

View file

@ -321,12 +321,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
if !reorg_after_reload {
// If the channel is already closed when we reload the node, we'll broadcast a closing
// transaction via the ChannelMonitor which is missing a corresponding channel.
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
}
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
}
if reorg_after_reload {

View file

@ -74,24 +74,31 @@ struct LockDep {
_lockdep_trace: Backtrace,
}
// Locates the frame preceding the earliest `debug_sync` frame in the call stack. This ensures we
// can properly detect a lock's construction and acquiral callsites, since the latter may contain
// multiple `debug_sync` frames.
#[cfg(feature = "backtrace")]
fn get_construction_location(backtrace: &Backtrace) -> (String, Option<u32>) {
// Find the first frame that is after `debug_sync` (or that is in our tests) and use
// that as the mutex construction site. Note that the first few frames may be in
// the `backtrace` crate, so we have to ignore those.
fn locate_call_symbol(backtrace: &Backtrace) -> (String, Option<u32>) {
// Find the earliest `debug_sync` frame (or that is in our tests) and use the frame preceding it
// as the callsite. Note that the first few frames may be in the `backtrace` crate, so we have
// to ignore those.
let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
let mut found_debug_sync = false;
for frame in backtrace.frames() {
for symbol in frame.symbols() {
let symbol_name = symbol.name().unwrap().as_str().unwrap();
if !sync_mutex_constr_regex.is_match(symbol_name) {
if found_debug_sync {
return (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno());
}
} else { found_debug_sync = true; }
let mut symbol_after_latest_debug_sync = None;
for frame in backtrace.frames().iter() {
for symbol in frame.symbols().iter() {
if let Some(symbol_name) = symbol.name().map(|name| name.as_str()).flatten() {
if !sync_mutex_constr_regex.is_match(symbol_name) {
if found_debug_sync {
symbol_after_latest_debug_sync = Some(symbol);
found_debug_sync = false;
}
} else { found_debug_sync = true; }
}
}
}
panic!("Couldn't find mutex construction callsite");
let symbol = symbol_after_latest_debug_sync.expect("Couldn't find lock call symbol");
(format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno())
}
impl LockMetadata {
@ -108,13 +115,13 @@ impl LockMetadata {
#[cfg(feature = "backtrace")]
{
let (lock_constr_location, lock_constr_colno) =
get_construction_location(&res._lock_construction_bt);
locate_call_symbol(&res._lock_construction_bt);
LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
match locks.entry(lock_constr_location) {
hash_map::Entry::Occupied(e) => {
assert_eq!(lock_constr_colno,
get_construction_location(&e.get()._lock_construction_bt).1,
locate_call_symbol(&e.get()._lock_construction_bt).1,
"Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
return Arc::clone(e.get())
},
@ -126,9 +133,8 @@ impl LockMetadata {
fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
LOCKS_HELD.with(|held| {
// For each lock which is currently locked, check that no lock's locked-before
// set includes the lock we're about to lock, which would imply a lockorder
// inversion.
// For each lock that is currently held, check that no lock's `locked_before` set
// includes the lock we're about to hold, which would imply a lockorder inversion.
for (locked_idx, _locked) in held.borrow().iter() {
if *locked_idx == this.lock_idx {
// Note that with `feature = "backtrace"` set, we may be looking at different
@ -138,23 +144,34 @@ impl LockMetadata {
#[cfg(feature = "backtrace")]
debug_assert!(_double_lock_self_allowed,
"Tried to acquire a lock while it was held!\nLock constructed at {}",
get_construction_location(&this._lock_construction_bt).0);
locate_call_symbol(&this._lock_construction_bt).0);
#[cfg(not(feature = "backtrace"))]
panic!("Tried to acquire a lock while it was held!");
}
}
for (_locked_idx, locked) in held.borrow().iter() {
for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
#[cfg(feature = "backtrace")]
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
get_construction_location(&this._lock_construction_bt).0,
this.lock_idx, this._lock_construction_bt,
get_construction_location(&locked._lock_construction_bt).0,
locked.lock_idx, locked._lock_construction_bt,
_locked_dep._lockdep_trace);
#[cfg(not(feature = "backtrace"))]
panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
let is_dep_this_lock = *locked_dep_idx == this.lock_idx;
let has_same_construction = *locked_dep_idx == locked.lock_idx;
if is_dep_this_lock && !has_same_construction {
#[allow(unused_mut, unused_assignments)]
let mut has_same_callsite = false;
#[cfg(feature = "backtrace")] {
has_same_callsite = _double_lock_self_allowed &&
locate_call_symbol(&_locked_dep._lockdep_trace) ==
locate_call_symbol(&Backtrace::new());
}
if !has_same_callsite {
#[cfg(feature = "backtrace")]
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
locate_call_symbol(&this._lock_construction_bt).0,
this.lock_idx, this._lock_construction_bt,
locate_call_symbol(&locked._lock_construction_bt).0,
locked.lock_idx, locked._lock_construction_bt,
_locked_dep._lockdep_trace);
#[cfg(not(feature = "backtrace"))]
panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
}
}
}
// Insert any already-held locks in our locked-before set.

View file

@ -0,0 +1,4 @@
## Backwards Compatibility
- Providing `ChannelMonitorUpdate`s generated by LDK 0.0.115 to a
`ChannelMonitor` on 0.0.114 or before may panic.