Simplify and fix AtomicCounter

`AtomicCounter` was slightly race-y on 32-bit platforms because it
increments the high `AtomicUsize` independently from the low
`AtomicUsize`, leading to a potential race where another thread
could observe the low increment but not the high increment and see
a value of 0 twice.

This isn't a big deal because (a) most platforms are 64-bit these
days, (b) 32-bit platforms aren't super likely to have their
counter overflow 32 bits anyway, and (c) the two writes are
back-to-back so having another thread read during that window is
very unlikely.

However, we can also optimize the counter somewhat by using the
`target_has_atomic = "64"` cfg flag, which we do here, allowing us
to use `AtomicU64` even on 32-bit platforms where 64-bit atomics
are available.

This changes some test behavior slightly, which requires
adaptation.

Fixes #3000
This commit is contained in:
Matt Corallo 2024-09-08 19:05:28 +00:00
parent 6e340c43de
commit 2ab133d432
4 changed files with 50 additions and 21 deletions

View file

@ -7668,8 +7668,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
// node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
// output, checked above).

View file

@ -2251,6 +2251,10 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Reset our RNG counters to mirror the RNG output from when this test was written.
nodes[0].keys_manager.backing.inner.entropy_source.set_counter(0x1_0000_0004);
nodes[1].keys_manager.backing.inner.entropy_source.set_counter(0x1_0000_0004);
// Open a channel, lock in an HTLC, and immediately broadcast the commitment transaction. This
// ensures that the HTLC timeout package is held until we reach its expiration height.
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000);

View file

@ -1853,6 +1853,9 @@ pub struct KeysManager {
channel_master_key: Xpriv,
channel_child_index: AtomicUsize,
#[cfg(test)]
pub(crate) entropy_source: RandomBytes,
#[cfg(not(test))]
entropy_source: RandomBytes,
seed: [u8; 32],
@ -2309,6 +2312,9 @@ impl SignerProvider for KeysManager {
/// Switching between this struct and [`KeysManager`] will invalidate any previously issued
/// invoices and attempts to pay previous invoices will fail.
pub struct PhantomKeysManager {
#[cfg(test)]
pub(crate) inner: KeysManager,
#[cfg(not(test))]
inner: KeysManager,
inbound_payment_key: KeyMaterial,
phantom_secret: SecretKey,
@ -2487,6 +2493,13 @@ impl RandomBytes {
pub fn new(seed: [u8; 32]) -> Self {
Self { seed, index: AtomicCounter::new() }
}
#[cfg(test)]
/// Force the counter to a value to produce the same output again. Mostly useful in tests where
/// we need to maintain behavior with a previous version which didn't use as much RNG output.
pub(crate) fn set_counter(&self, count: u64) {
self.index.set_counter(count);
}
}
impl EntropySource for RandomBytes {

View file

@ -1,32 +1,44 @@
//! A simple atomic counter that uses AtomicUsize to give a u64 counter.
//! A simple atomic counter that uses mutexes if the platform doesn't support atomic u64s.
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
compile_error!("We need at least 32-bit pointers for atomic counter (and to have enough memory to run LDK)");
#[cfg(target_has_atomic = "64")]
use core::sync::atomic::{AtomicU64, Ordering};
#[cfg(not(target_has_atomic = "64"))]
use crate::sync::Mutex;
use core::sync::atomic::{AtomicUsize, Ordering};
#[derive(Debug)]
pub(crate) struct AtomicCounter {
// Usize needs to be at least 32 bits to avoid overflowing both low and high. If usize is 64
// bits we will never realistically count into high:
counter_low: AtomicUsize,
counter_high: AtomicUsize,
#[cfg(target_has_atomic = "64")]
counter: AtomicU64,
#[cfg(not(target_has_atomic = "64"))]
counter: Mutex<u64>,
}
impl AtomicCounter {
pub(crate) fn new() -> Self {
Self {
counter_low: AtomicUsize::new(0),
counter_high: AtomicUsize::new(0),
#[cfg(target_has_atomic = "64")]
counter: AtomicU64::new(0),
#[cfg(not(target_has_atomic = "64"))]
counter: Mutex::new(0),
}
}
pub(crate) fn get_increment(&self) -> u64 {
let low = self.counter_low.fetch_add(1, Ordering::AcqRel) as u64;
let high = if low == 0 {
self.counter_high.fetch_add(1, Ordering::AcqRel) as u64
} else {
self.counter_high.load(Ordering::Acquire) as u64
};
(high << 32) | low
#[cfg(target_has_atomic = "64")] {
self.counter.fetch_add(1, Ordering::AcqRel)
}
#[cfg(not(target_has_atomic = "64"))] {
let mut mtx = self.counter.lock().unwrap();
*mtx += 1;
*mtx - 1
}
}
#[cfg(test)]
pub(crate) fn set_counter(&self, count: u64) {
#[cfg(target_has_atomic = "64")] {
self.counter.store(count, Ordering::Release);
}
#[cfg(not(target_has_atomic = "64"))] {
let mut mtx = self.counter.lock().unwrap();
*mtx = count;
}
}
}