2022-01-13 01:00:43 +00:00
pub use ::alloc ::sync ::Arc ;
use core ::ops ::{ Deref , DerefMut } ;
use core ::time ::Duration ;
use std ::cell ::RefCell ;
use std ::sync ::atomic ::{ AtomicUsize , Ordering } ;
use std ::sync ::Mutex as StdMutex ;
use std ::sync ::MutexGuard as StdMutexGuard ;
use std ::sync ::RwLock as StdRwLock ;
use std ::sync ::RwLockReadGuard as StdRwLockReadGuard ;
use std ::sync ::RwLockWriteGuard as StdRwLockWriteGuard ;
use std ::sync ::Condvar as StdCondvar ;
2022-07-19 17:51:45 +00:00
use prelude ::HashMap ;
2022-01-13 01:00:43 +00:00
#[ cfg(feature = " backtrace " ) ]
2022-07-19 17:51:45 +00:00
use { prelude ::hash_map , backtrace ::Backtrace , std ::sync ::Once } ;
2022-04-12 17:32:35 +00:00
#[ cfg(not(feature = " backtrace " )) ]
struct Backtrace { }
#[ cfg(not(feature = " backtrace " )) ]
impl Backtrace { fn new ( ) -> Backtrace { Backtrace { } } }
2022-01-13 01:00:43 +00:00
pub type LockResult < Guard > = Result < Guard , ( ) > ;
pub struct Condvar {
inner : StdCondvar ,
}
impl Condvar {
pub fn new ( ) -> Condvar {
Condvar { inner : StdCondvar ::new ( ) }
}
pub fn wait < ' a , T > ( & ' a self , guard : MutexGuard < ' a , T > ) -> LockResult < MutexGuard < ' a , T > > {
let mutex : & ' a Mutex < T > = guard . mutex ;
self . inner . wait ( guard . into_inner ( ) ) . map ( | lock | MutexGuard { mutex , lock } ) . map_err ( | _ | ( ) )
}
#[ allow(unused) ]
pub fn wait_timeout < ' a , T > ( & ' a self , guard : MutexGuard < ' a , T > , dur : Duration ) -> LockResult < ( MutexGuard < ' a , T > , ( ) ) > {
let mutex = guard . mutex ;
self . inner . wait_timeout ( guard . into_inner ( ) , dur ) . map ( | ( lock , _ ) | ( MutexGuard { mutex , lock } , ( ) ) ) . map_err ( | _ | ( ) )
}
pub fn notify_all ( & self ) { self . inner . notify_all ( ) ; }
}
thread_local! {
2022-03-09 19:10:59 +00:00
/// We track the set of locks currently held by a reference to their `LockMetadata`
2022-07-19 17:51:45 +00:00
static LOCKS_HELD : RefCell < HashMap < u64 , Arc < LockMetadata > > > = RefCell ::new ( HashMap ::new ( ) ) ;
2022-01-13 01:00:43 +00:00
}
2022-03-09 19:10:59 +00:00
static LOCK_IDX : AtomicUsize = AtomicUsize ::new ( 0 ) ;
2022-01-13 01:00:43 +00:00
2022-04-12 17:32:35 +00:00
#[ cfg(feature = " backtrace " ) ]
static mut LOCKS : Option < StdMutex < HashMap < String , Arc < LockMetadata > > > > = None ;
#[ cfg(feature = " backtrace " ) ]
static LOCKS_INIT : Once = Once ::new ( ) ;
2022-03-09 19:10:59 +00:00
/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
2022-01-13 01:00:43 +00:00
/// when the Mutex itself was constructed.
2022-03-09 19:10:59 +00:00
struct LockMetadata {
lock_idx : u64 ,
2022-07-19 17:51:45 +00:00
locked_before : StdMutex < HashMap < u64 , LockDep > > ,
2022-04-12 17:32:35 +00:00
_lock_construction_bt : Backtrace ,
2022-01-13 01:00:43 +00:00
}
2022-04-12 17:32:35 +00:00
struct LockDep {
lock : Arc < LockMetadata > ,
2022-07-26 20:29:36 +00:00
/// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
_lockdep_trace : Backtrace ,
2022-04-12 17:32:35 +00:00
}
#[ cfg(feature = " backtrace " ) ]
fn get_construction_location ( backtrace : & Backtrace ) -> String {
// Find the first frame that is after `debug_sync` (or that is in our tests) and use
// that as the mutex construction site. Note that the first few frames may be in
// the `backtrace` crate, so we have to ignore those.
let sync_mutex_constr_regex = regex ::Regex ::new ( r "lightning.*debug_sync.*new" ) . unwrap ( ) ;
let mut found_debug_sync = false ;
for frame in backtrace . frames ( ) {
for symbol in frame . symbols ( ) {
let symbol_name = symbol . name ( ) . unwrap ( ) . as_str ( ) . unwrap ( ) ;
if ! sync_mutex_constr_regex . is_match ( symbol_name ) {
if found_debug_sync {
if let Some ( col ) = symbol . colno ( ) {
return format! ( " {} : {} : {} " , symbol . filename ( ) . unwrap ( ) . display ( ) , symbol . lineno ( ) . unwrap ( ) , col ) ;
} else {
// Windows debug symbols don't support column numbers, so fall back to
// line numbers only if no `colno` is available
return format! ( " {} : {} " , symbol . filename ( ) . unwrap ( ) . display ( ) , symbol . lineno ( ) . unwrap ( ) ) ;
}
}
} else { found_debug_sync = true ; }
}
}
panic! ( " Couldn't find mutex construction callsite " ) ;
}
2022-03-09 19:10:59 +00:00
impl LockMetadata {
2022-04-12 17:32:35 +00:00
fn new ( ) -> Arc < LockMetadata > {
let backtrace = Backtrace ::new ( ) ;
let lock_idx = LOCK_IDX . fetch_add ( 1 , Ordering ::Relaxed ) as u64 ;
let res = Arc ::new ( LockMetadata {
2022-07-19 17:51:45 +00:00
locked_before : StdMutex ::new ( HashMap ::new ( ) ) ,
2022-04-12 17:32:35 +00:00
lock_idx ,
_lock_construction_bt : backtrace ,
} ) ;
#[ cfg(feature = " backtrace " ) ]
{
let lock_constr_location = get_construction_location ( & res . _lock_construction_bt ) ;
LOCKS_INIT . call_once ( | | { unsafe { LOCKS = Some ( StdMutex ::new ( HashMap ::new ( ) ) ) ; } } ) ;
let mut locks = unsafe { LOCKS . as_ref ( ) } . unwrap ( ) . lock ( ) . unwrap ( ) ;
match locks . entry ( lock_constr_location ) {
hash_map ::Entry ::Occupied ( e ) = > return Arc ::clone ( e . get ( ) ) ,
hash_map ::Entry ::Vacant ( e ) = > { e . insert ( Arc ::clone ( & res ) ) ; } ,
}
2022-03-09 06:23:39 +00:00
}
2022-04-12 17:32:35 +00:00
res
2022-03-09 06:23:39 +00:00
}
2022-03-09 06:24:06 +00:00
// Returns whether we were a recursive lock (only relevant for read)
2022-03-09 19:10:59 +00:00
fn _pre_lock ( this : & Arc < LockMetadata > , read : bool ) -> bool {
2022-03-09 06:24:06 +00:00
let mut inserted = false ;
2022-03-09 19:10:59 +00:00
LOCKS_HELD . with ( | held | {
// For each lock which is currently locked, check that no lock's locked-before
// set includes the lock we're about to lock, which would imply a lockorder
2022-03-09 06:23:39 +00:00
// inversion.
2022-07-19 17:51:45 +00:00
for ( locked_idx , _locked ) in held . borrow ( ) . iter ( ) {
if read & & * locked_idx = = this . lock_idx {
2022-03-09 06:24:06 +00:00
// Recursive read locks are explicitly allowed
return ;
}
}
2022-07-19 17:51:45 +00:00
for ( locked_idx , locked ) in held . borrow ( ) . iter ( ) {
if ! read & & * locked_idx = = this . lock_idx {
2022-04-12 17:32:35 +00:00
// With `feature = "backtrace"` set, we may be looking at different instances
// of the same lock.
debug_assert! ( cfg! ( feature = " backtrace " ) , " Tried to acquire a lock while it was held! " ) ;
2022-03-09 06:24:06 +00:00
}
2022-07-26 20:29:36 +00:00
for ( locked_dep_idx , _locked_dep ) in locked . locked_before . lock ( ) . unwrap ( ) . iter ( ) {
2022-07-19 17:51:45 +00:00
if * locked_dep_idx = = this . lock_idx & & * locked_dep_idx ! = locked . lock_idx {
2022-03-09 06:23:39 +00:00
#[ cfg(feature = " backtrace " ) ]
2022-04-12 17:32:35 +00:00
panic! ( " Tried to violate existing lockorder. \n Mutex that should be locked after the current lock was created at the following backtrace. \n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1 \n Lock being taken constructed at: {} ( {} ): \n {:?} \n Lock constructed at: {} ( {} ) \n {:?} \n \n Lock dep created at: \n {:?} \n \n " ,
get_construction_location ( & this . _lock_construction_bt ) , this . lock_idx , this . _lock_construction_bt ,
get_construction_location ( & locked . _lock_construction_bt ) , locked . lock_idx , locked . _lock_construction_bt ,
2022-07-26 20:29:36 +00:00
_locked_dep . _lockdep_trace ) ;
2022-03-09 06:23:39 +00:00
#[ cfg(not(feature = " backtrace " )) ]
panic! ( " Tried to violate existing lockorder. Build with the backtrace feature for more info. " ) ;
}
}
2022-03-09 19:10:59 +00:00
// Insert any already-held locks in our locked-before set.
2022-04-12 17:32:35 +00:00
let mut locked_before = this . locked_before . lock ( ) . unwrap ( ) ;
2022-07-19 17:51:45 +00:00
if ! locked_before . contains_key ( & locked . lock_idx ) {
2022-07-26 20:29:36 +00:00
let lockdep = LockDep { lock : Arc ::clone ( locked ) , _lockdep_trace : Backtrace ::new ( ) } ;
2022-07-19 17:51:45 +00:00
locked_before . insert ( lockdep . lock . lock_idx , lockdep ) ;
2022-04-12 17:32:35 +00:00
}
2022-03-09 06:23:39 +00:00
}
2022-07-19 17:51:45 +00:00
held . borrow_mut ( ) . insert ( this . lock_idx , Arc ::clone ( this ) ) ;
2022-03-09 06:24:06 +00:00
inserted = true ;
2022-03-09 06:23:39 +00:00
} ) ;
2022-03-09 06:24:06 +00:00
inserted
2022-03-09 06:23:39 +00:00
}
2022-03-09 19:10:59 +00:00
fn pre_lock ( this : & Arc < LockMetadata > ) { Self ::_pre_lock ( this , false ) ; }
fn pre_read_lock ( this : & Arc < LockMetadata > ) -> bool { Self ::_pre_lock ( this , true ) }
2022-03-09 06:24:06 +00:00
2022-03-09 19:10:59 +00:00
fn try_locked ( this : & Arc < LockMetadata > ) {
LOCKS_HELD . with ( | held | {
2022-03-09 06:23:39 +00:00
// Since a try-lock will simply fail if the lock is held already, we do not
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
// succeeds, we do consider it to have created lockorder dependencies.
2022-04-12 17:32:35 +00:00
let mut locked_before = this . locked_before . lock ( ) . unwrap ( ) ;
2022-07-19 17:51:45 +00:00
for ( locked_idx , locked ) in held . borrow ( ) . iter ( ) {
if ! locked_before . contains_key ( locked_idx ) {
2022-07-26 20:29:36 +00:00
let lockdep = LockDep { lock : Arc ::clone ( locked ) , _lockdep_trace : Backtrace ::new ( ) } ;
2022-07-19 17:51:45 +00:00
locked_before . insert ( * locked_idx , lockdep ) ;
2022-04-12 17:32:35 +00:00
}
2022-03-09 06:23:39 +00:00
}
2022-07-19 17:51:45 +00:00
held . borrow_mut ( ) . insert ( this . lock_idx , Arc ::clone ( this ) ) ;
2022-03-09 06:23:39 +00:00
} ) ;
}
}
2022-01-13 01:00:43 +00:00
pub struct Mutex < T : Sized > {
inner : StdMutex < T > ,
2022-03-09 19:10:59 +00:00
deps : Arc < LockMetadata > ,
2022-01-13 01:00:43 +00:00
}
#[ must_use = " if unused the Mutex will immediately unlock " ]
pub struct MutexGuard < ' a , T : Sized + ' a > {
mutex : & ' a Mutex < T > ,
lock : StdMutexGuard < ' a , T > ,
}
impl < ' a , T : Sized > MutexGuard < ' a , T > {
fn into_inner ( self ) -> StdMutexGuard < ' a , T > {
// Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
unsafe {
let v : StdMutexGuard < ' a , T > = std ::ptr ::read ( & self . lock ) ;
std ::mem ::forget ( self ) ;
v
}
}
}
impl < T : Sized > Drop for MutexGuard < '_ , T > {
fn drop ( & mut self ) {
2022-03-09 19:10:59 +00:00
LOCKS_HELD . with ( | held | {
2022-07-19 17:51:45 +00:00
held . borrow_mut ( ) . remove ( & self . mutex . deps . lock_idx ) ;
2022-01-13 01:00:43 +00:00
} ) ;
}
}
impl < T : Sized > Deref for MutexGuard < '_ , T > {
type Target = T ;
fn deref ( & self ) -> & T {
& self . lock . deref ( )
}
}
impl < T : Sized > DerefMut for MutexGuard < '_ , T > {
fn deref_mut ( & mut self ) -> & mut T {
self . lock . deref_mut ( )
}
}
impl < T > Mutex < T > {
pub fn new ( inner : T ) -> Mutex < T > {
2022-04-12 17:32:35 +00:00
Mutex { inner : StdMutex ::new ( inner ) , deps : LockMetadata ::new ( ) }
2022-01-13 01:00:43 +00:00
}
pub fn lock < ' a > ( & ' a self ) -> LockResult < MutexGuard < ' a , T > > {
2022-03-09 19:10:59 +00:00
LockMetadata ::pre_lock ( & self . deps ) ;
2022-01-13 01:00:43 +00:00
self . inner . lock ( ) . map ( | lock | MutexGuard { mutex : self , lock } ) . map_err ( | _ | ( ) )
}
pub fn try_lock < ' a > ( & ' a self ) -> LockResult < MutexGuard < ' a , T > > {
let res = self . inner . try_lock ( ) . map ( | lock | MutexGuard { mutex : self , lock } ) . map_err ( | _ | ( ) ) ;
if res . is_ok ( ) {
2022-03-09 19:10:59 +00:00
LockMetadata ::try_locked ( & self . deps ) ;
2022-01-13 01:00:43 +00:00
}
res
}
}
2022-03-09 06:24:06 +00:00
pub struct RwLock < T : Sized > {
inner : StdRwLock < T > ,
2022-03-09 19:10:59 +00:00
deps : Arc < LockMetadata > ,
2022-01-13 01:00:43 +00:00
}
2022-03-09 06:24:06 +00:00
pub struct RwLockReadGuard < ' a , T : Sized + ' a > {
2022-03-09 19:10:59 +00:00
lock : & ' a RwLock < T > ,
2022-03-09 06:24:06 +00:00
first_lock : bool ,
2022-03-09 19:10:59 +00:00
guard : StdRwLockReadGuard < ' a , T > ,
2022-01-13 01:00:43 +00:00
}
2022-03-09 06:24:06 +00:00
pub struct RwLockWriteGuard < ' a , T : Sized + ' a > {
2022-03-09 19:10:59 +00:00
lock : & ' a RwLock < T > ,
guard : StdRwLockWriteGuard < ' a , T > ,
2022-01-13 01:00:43 +00:00
}
2022-03-09 06:24:06 +00:00
impl < T : Sized > Deref for RwLockReadGuard < '_ , T > {
2022-01-13 01:00:43 +00:00
type Target = T ;
fn deref ( & self ) -> & T {
2022-03-09 19:10:59 +00:00
& self . guard . deref ( )
2022-01-13 01:00:43 +00:00
}
}
2022-03-09 06:24:06 +00:00
impl < T : Sized > Drop for RwLockReadGuard < '_ , T > {
fn drop ( & mut self ) {
if ! self . first_lock {
// Note that its not strictly true that the first taken read lock will get unlocked
// last, but in practice our locks are always taken as RAII, so it should basically
// always be true.
return ;
}
2022-03-09 19:10:59 +00:00
LOCKS_HELD . with ( | held | {
2022-07-19 17:51:45 +00:00
held . borrow_mut ( ) . remove ( & self . lock . deps . lock_idx ) ;
2022-03-09 06:24:06 +00:00
} ) ;
}
}
impl < T : Sized > Deref for RwLockWriteGuard < '_ , T > {
2022-01-13 01:00:43 +00:00
type Target = T ;
fn deref ( & self ) -> & T {
2022-03-09 19:10:59 +00:00
& self . guard . deref ( )
2022-01-13 01:00:43 +00:00
}
}
2022-03-09 06:24:06 +00:00
impl < T : Sized > Drop for RwLockWriteGuard < '_ , T > {
fn drop ( & mut self ) {
2022-03-09 19:10:59 +00:00
LOCKS_HELD . with ( | held | {
2022-07-19 17:51:45 +00:00
held . borrow_mut ( ) . remove ( & self . lock . deps . lock_idx ) ;
2022-03-09 06:24:06 +00:00
} ) ;
}
}
impl < T : Sized > DerefMut for RwLockWriteGuard < '_ , T > {
2022-01-13 01:00:43 +00:00
fn deref_mut ( & mut self ) -> & mut T {
2022-03-09 19:10:59 +00:00
self . guard . deref_mut ( )
2022-01-13 01:00:43 +00:00
}
}
impl < T > RwLock < T > {
pub fn new ( inner : T ) -> RwLock < T > {
2022-04-12 17:32:35 +00:00
RwLock { inner : StdRwLock ::new ( inner ) , deps : LockMetadata ::new ( ) }
2022-01-13 01:00:43 +00:00
}
pub fn read < ' a > ( & ' a self ) -> LockResult < RwLockReadGuard < ' a , T > > {
2022-03-09 19:10:59 +00:00
let first_lock = LockMetadata ::pre_read_lock ( & self . deps ) ;
self . inner . read ( ) . map ( | guard | RwLockReadGuard { lock : self , guard , first_lock } ) . map_err ( | _ | ( ) )
2022-01-13 01:00:43 +00:00
}
pub fn write < ' a > ( & ' a self ) -> LockResult < RwLockWriteGuard < ' a , T > > {
2022-03-09 19:10:59 +00:00
LockMetadata ::pre_lock ( & self . deps ) ;
self . inner . write ( ) . map ( | guard | RwLockWriteGuard { lock : self , guard } ) . map_err ( | _ | ( ) )
2022-01-13 01:00:43 +00:00
}
pub fn try_write < ' a > ( & ' a self ) -> LockResult < RwLockWriteGuard < ' a , T > > {
2022-03-09 19:10:59 +00:00
let res = self . inner . try_write ( ) . map ( | guard | RwLockWriteGuard { lock : self , guard } ) . map_err ( | _ | ( ) ) ;
2022-03-09 06:24:06 +00:00
if res . is_ok ( ) {
2022-03-09 19:10:59 +00:00
LockMetadata ::try_locked ( & self . deps ) ;
2022-03-09 06:24:06 +00:00
}
res
}
}
2022-04-12 17:32:35 +00:00
pub type FairRwLock < T > = RwLock < T > ;
2022-03-09 06:24:06 +00:00
2022-04-12 17:32:35 +00:00
mod tests {
use super ::{ RwLock , Mutex } ;
#[ test ]
#[ should_panic ]
#[ cfg(not(feature = " backtrace " )) ]
fn recursive_lock_fail ( ) {
let mutex = Mutex ::new ( ( ) ) ;
let _a = mutex . lock ( ) . unwrap ( ) ;
let _b = mutex . lock ( ) . unwrap ( ) ;
}
#[ test ]
fn recursive_read ( ) {
let lock = RwLock ::new ( ( ) ) ;
let _a = lock . read ( ) . unwrap ( ) ;
let _b = lock . read ( ) . unwrap ( ) ;
}
#[ test ]
#[ should_panic ]
fn lockorder_fail ( ) {
let a = Mutex ::new ( ( ) ) ;
let b = Mutex ::new ( ( ) ) ;
{
let _a = a . lock ( ) . unwrap ( ) ;
let _b = b . lock ( ) . unwrap ( ) ;
}
{
let _b = b . lock ( ) . unwrap ( ) ;
let _a = a . lock ( ) . unwrap ( ) ;
}
2022-03-09 06:24:06 +00:00
}
2022-04-12 17:32:35 +00:00
#[ test ]
#[ should_panic ]
fn write_lockorder_fail ( ) {
let a = RwLock ::new ( ( ) ) ;
let b = RwLock ::new ( ( ) ) ;
{
let _a = a . write ( ) . unwrap ( ) ;
let _b = b . write ( ) . unwrap ( ) ;
}
{
let _b = b . write ( ) . unwrap ( ) ;
let _a = a . write ( ) . unwrap ( ) ;
}
2022-03-09 06:24:06 +00:00
}
2022-04-12 17:32:35 +00:00
#[ test ]
#[ should_panic ]
fn read_lockorder_fail ( ) {
let a = RwLock ::new ( ( ) ) ;
let b = RwLock ::new ( ( ) ) ;
{
let _a = a . read ( ) . unwrap ( ) ;
let _b = b . read ( ) . unwrap ( ) ;
}
{
let _b = b . read ( ) . unwrap ( ) ;
let _a = a . read ( ) . unwrap ( ) ;
}
2022-03-09 06:24:06 +00:00
}
2022-04-12 17:32:35 +00:00
#[ test ]
fn read_recursive_no_lockorder ( ) {
// Like the above, but note that no lockorder is implied when we recursively read-lock a
// RwLock, causing this to pass just fine.
let a = RwLock ::new ( ( ) ) ;
let b = RwLock ::new ( ( ) ) ;
let _outer = a . read ( ) . unwrap ( ) ;
{
let _a = a . read ( ) . unwrap ( ) ;
let _b = b . read ( ) . unwrap ( ) ;
}
{
let _b = b . read ( ) . unwrap ( ) ;
let _a = a . read ( ) . unwrap ( ) ;
}
2022-03-09 06:24:06 +00:00
}
2022-04-12 17:32:35 +00:00
#[ test ]
#[ should_panic ]
fn read_write_lockorder_fail ( ) {
let a = RwLock ::new ( ( ) ) ;
let b = RwLock ::new ( ( ) ) ;
{
let _a = a . write ( ) . unwrap ( ) ;
let _b = b . read ( ) . unwrap ( ) ;
}
{
let _b = b . read ( ) . unwrap ( ) ;
let _a = a . write ( ) . unwrap ( ) ;
}
2022-01-13 01:00:43 +00:00
}
}