From 26c1639ab69d6780c97a118f09e42cb42304088a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 21 Sep 2023 22:22:18 +0000 Subject: [PATCH 01/13] Use `Default::default()` to construct `()` as score-updating param In 6b0d94a3029f74de3a7542cbba0d48c2f7e5866b we switched most tests to `Default::default()` for scoring parameters passed to route-fetching. Here we do the same for the scoring parameters when passed to score-updating. --- lightning-background-processor/src/lib.rs | 2 +- lightning/src/util/test_utils.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index efa1a4214..643cae590 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1181,7 +1181,7 @@ mod tests { let network_graph = Arc::new(NetworkGraph::new(network, logger.clone())); let scorer = Arc::new(Mutex::new(TestScorer::new())); let seed = [i as u8; 32]; - let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ())); + let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default())); let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin)); let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into())); let now = Duration::from_secs(genesis_block.header.time as u64); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 4bcaca57f..ae942a058 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -140,10 +140,10 @@ impl<'a> Router for TestRouter<'a> { // Since the path is reversed, the last element in our iteration is the first // hop. if idx == path.hops.len() - 1 { - scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &()); + scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default()); } else { let curr_hop_path_idx = path.hops.len() - 1 - idx; - scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &()); + scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default()); } } } @@ -153,7 +153,7 @@ impl<'a> Router for TestRouter<'a> { let logger = TestLogger::new(); find_route( payer, params, &self.network_graph, first_hops, &logger, - &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(), + &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(), &[42; 32] ) } From 748bebadb43570fbe7e447492b5ed9544529adab Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 21 Sep 2023 22:54:14 +0000 Subject: [PATCH 02/13] Remove unnecessary bounds in scoring In our scoring logic we have a handful of unnecessary bounds, leading to extra diff in the bindings branch when those bounds have to be removed as well as a few cases where bindings generation simply gets confused. Here we remove a number of bounds across the scoring traits and impls, cleaning things up and simplifying bindings changes. --- lightning/src/routing/router.rs | 10 +++++----- lightning/src/routing/scoring.rs | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 1297322fb..66b388971 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -112,12 +112,12 @@ pub trait Router { /// [`find_route`]. /// /// [`ScoreLookUp`]: crate::routing::scoring::ScoreLookUp -pub struct ScorerAccountingForInFlightHtlcs<'a, SP: Sized, Sc: 'a + ScoreLookUp, S: Deref> { +pub struct ScorerAccountingForInFlightHtlcs<'a, S: Deref> where S::Target: ScoreLookUp { scorer: S, // Maps a channel's short channel id and its direction to the liquidity used up. inflight_htlcs: &'a InFlightHtlcs, } -impl<'a, SP: Sized, Sc: ScoreLookUp, S: Deref> ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> { +impl<'a, S: Deref> ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { /// Initialize a new `ScorerAccountingForInFlightHtlcs`. pub fn new(scorer: S, inflight_htlcs: &'a InFlightHtlcs) -> Self { ScorerAccountingForInFlightHtlcs { @@ -128,12 +128,12 @@ impl<'a, SP: Sized, Sc: ScoreLookUp, S: Deref> Sc } #[cfg(c_bindings)] -impl<'a, SP: Sized, Sc: ScoreLookUp, S: Deref> Writeable for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> { +impl<'a, S: Deref> Writeable for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) } } -impl<'a, SP: Sized, Sc: 'a + ScoreLookUp, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> { - type ScoreParams = Sc::ScoreParams; +impl<'a, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { + type ScoreParams = ::ScoreParams; fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 { if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat( source, target, short_channel_id diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index c790f5df5..0764cf1aa 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -122,8 +122,8 @@ pub trait ScoreUpdate $(: $supertrait)* { fn probe_successful(&mut self, path: &Path); } -impl, T: Deref $(+ $supertrait)*> ScoreLookUp for T { - type ScoreParams = SP; +impl $(+ $supertrait)*> ScoreLookUp for T { + type ScoreParams = S::ScoreParams; fn channel_penalty_msat( &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams ) -> u64 { @@ -226,7 +226,7 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { } #[cfg(not(c_bindings))] -impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RwLock { +impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RwLock { type ScoreUpdate = T; type ScoreLookUp = T; @@ -249,7 +249,7 @@ pub struct MultiThreadedLockableScore { } #[cfg(c_bindings)] -impl<'a, SP:Sized, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore { +impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore { type ScoreUpdate = T; type ScoreLookUp = T; type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>; From 0bc0de46fa2f6291891484d3d454d542ed57e318 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 24 Sep 2023 23:59:13 +0000 Subject: [PATCH 03/13] Simplify score bounding with a unified type In a few places we require a unified scorer, which implements both `ScoreLookUp` and `ScoreUpdate`. Rather than double-bounding (which the bindings generator can't handle directly), we use a top-level `Score` trait which requires both and is implemented for all implementers of both supertraits. --- lightning/src/routing/router.rs | 5 ---- lightning/src/routing/scoring.rs | 51 +++++++++++++++++++++----------- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 66b388971..28d452c56 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -127,11 +127,6 @@ impl<'a, S: Deref> ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: Scor } } -#[cfg(c_bindings)] -impl<'a, S: Deref> Writeable for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { - fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) } -} - impl<'a, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { type ScoreParams = ::ScoreParams; fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 { diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 0764cf1aa..1c3f90992 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -89,7 +89,7 @@ macro_rules! define_score { ($($supertrait: path)*) => { /// `ScoreLookUp` is used to determine the penalty for a given channel. /// /// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel. -pub trait ScoreLookUp $(: $supertrait)* { +pub trait ScoreLookUp { /// A configurable type which should contain various passed-in parameters for configuring the scorer, /// on a per-routefinding-call basis through to the scorer methods, /// which are used to determine the parameters for the suitability of channels for use. @@ -108,7 +108,7 @@ pub trait ScoreLookUp $(: $supertrait)* { } /// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt. -pub trait ScoreUpdate $(: $supertrait)* { +pub trait ScoreUpdate { /// Handles updating channel penalties after failing to route through a channel. fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64); @@ -122,7 +122,18 @@ pub trait ScoreUpdate $(: $supertrait)* { fn probe_successful(&mut self, path: &Path); } -impl $(+ $supertrait)*> ScoreLookUp for T { +/// A trait which can both lookup and update routing channel penalty scores. +/// +/// This is used in places where both bounds are required and implemented for all types which +/// implement [`ScoreLookUp`] and [`ScoreUpdate`]. +/// +/// Bindings users may need to manually implement this for their custom scoring implementations. +pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {} + +#[cfg(not(c_bindings))] +impl Score for T {} + +impl> ScoreLookUp for T { type ScoreParams = S::ScoreParams; fn channel_penalty_msat( &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams @@ -131,7 +142,7 @@ impl $(+ $supertrait)*> ScoreLookUp for T { } } -impl $(+ $supertrait)*> ScoreUpdate for T { +impl> ScoreUpdate for T { fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { self.deref_mut().payment_path_failed(path, short_channel_id) } @@ -192,7 +203,7 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex { +impl<'a, T: Score + 'a> LockableScore<'a> for Mutex { type ScoreUpdate = T; type ScoreLookUp = T; @@ -209,7 +220,7 @@ impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex { } #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { +impl<'a, T: Score + 'a> LockableScore<'a> for RefCell { type ScoreUpdate = T; type ScoreLookUp = T; @@ -226,7 +237,7 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { } #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RwLock { +impl<'a, T: Score + 'a> LockableScore<'a> for RwLock { type ScoreUpdate = T; type ScoreLookUp = T; @@ -244,12 +255,12 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RwLock { #[cfg(c_bindings)] /// A concrete implementation of [`LockableScore`] which supports multi-threading. -pub struct MultiThreadedLockableScore { +pub struct MultiThreadedLockableScore { score: RwLock, } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore { +impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore { type ScoreUpdate = T; type ScoreLookUp = T; type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>; @@ -265,17 +276,17 @@ impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for MultiThreadedL } #[cfg(c_bindings)] -impl Writeable for MultiThreadedLockableScore { +impl Writeable for MultiThreadedLockableScore { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.score.read().unwrap().write(writer) } } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore {} +impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore {} #[cfg(c_bindings)] -impl MultiThreadedLockableScore { +impl MultiThreadedLockableScore { /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`]. pub fn new(score: T) -> Self { MultiThreadedLockableScore { score: RwLock::new(score) } @@ -284,14 +295,14 @@ impl MultiThreadedLockableScore { #[cfg(c_bindings)] /// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>); +pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>); #[cfg(c_bindings)] /// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>); +pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>); #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> { +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -300,14 +311,14 @@ impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> { } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.0.write(writer) } } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -316,7 +327,7 @@ impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> { } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.0.deref_mut() } @@ -1417,6 +1428,10 @@ impl>, L: Deref, T: Time> ScoreUpdate for Prob } } +#[cfg(c_bindings)] +impl>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime +where L::Target: Logger {} + mod approx { const BITS: u32 = 64; const HIGHEST_BIT: u32 = BITS - 1; From 087c8f683a5816d738ac68784b62226f255a7471 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 25 Sep 2023 01:05:45 +0000 Subject: [PATCH 04/13] Avoid blanket impls in bindings builds The C bindings implements `Deref` for all traits, so having a blanket `Deref` implementation ends up conflicting with this. --- lightning/src/routing/scoring.rs | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 1c3f90992..207e1d69b 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -133,6 +133,7 @@ pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {} #[cfg(not(c_bindings))] impl Score for T {} +#[cfg(not(c_bindings))] impl> ScoreLookUp for T { type ScoreParams = S::ScoreParams; fn channel_penalty_msat( @@ -142,6 +143,7 @@ impl> ScoreLookUp for T { } } +#[cfg(not(c_bindings))] impl> ScoreUpdate for T { fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { self.deref_mut().payment_path_failed(path, short_channel_id) @@ -310,6 +312,16 @@ impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> { } } +#[cfg(c_bindings)] +impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> { + type ScoreParams = T::ScoreParams; + fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, + target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams + ) -> u64 { + self.0.channel_penalty_msat(short_channel_id, source, target, usage, score_params) + } +} + #[cfg(c_bindings)] impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> { fn write(&self, writer: &mut W) -> Result<(), io::Error> { @@ -333,6 +345,25 @@ impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> { } } +#[cfg(c_bindings)] +impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> { + fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { + self.0.payment_path_failed(path, short_channel_id) + } + + fn payment_path_successful(&mut self, path: &Path) { + self.0.payment_path_successful(path) + } + + fn probe_failed(&mut self, path: &Path, short_channel_id: u64) { + self.0.probe_failed(path, short_channel_id) + } + + fn probe_successful(&mut self, path: &Path) { + self.0.probe_successful(path) + } +} + /// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`]. #[derive(Clone, Copy, Debug, PartialEq)] From 095fca9293536e97538b5c157452f7453b9fcc9c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 22 Sep 2023 01:44:57 +0000 Subject: [PATCH 05/13] Avoid ref to a `Vec` when accessing custom onion TLVs The bindings can't easily return a reference to a `Vec`, so we instead split the implementation into vec/slice depending on the `c_bindings` flag. --- lightning/src/ln/outbound_payment.rs | 16 ++++++++++++++++ lightning/src/ln/payment_tests.rs | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 1642f28ef..b806b138a 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -594,10 +594,26 @@ impl RecipientOnionFields { /// Note that if this field is non-empty, it will contain strictly increasing TLVs, each /// represented by a `(u64, Vec)` for its type number and serialized value respectively. /// This is validated when setting this field using [`Self::with_custom_tlvs`]. + #[cfg(not(c_bindings))] pub fn custom_tlvs(&self) -> &Vec<(u64, Vec)> { &self.custom_tlvs } + /// Gets the custom TLVs that will be sent or have been received. + /// + /// Custom TLVs allow sending extra application-specific data with a payment. They provide + /// additional flexibility on top of payment metadata, as while other implementations may + /// require `payment_metadata` to reflect metadata provided in an invoice, custom TLVs + /// do not have this restriction. + /// + /// Note that if this field is non-empty, it will contain strictly increasing TLVs, each + /// represented by a `(u64, Vec)` for its type number and serialized value respectively. + /// This is validated when setting this field using [`Self::with_custom_tlvs`]. + #[cfg(c_bindings)] + pub fn custom_tlvs(&self) -> Vec<(u64, Vec)> { + self.custom_tlvs.clone() + } + /// When we have received some HTLC(s) towards an MPP payment, as we receive further HTLC(s) we /// have to make sure that some fields match exactly across the parts. For those that aren't /// required to match, if they don't match we should remove them so as to not expose data diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 721767602..26ecbb0bd 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -3781,7 +3781,7 @@ fn test_retry_custom_tlvs() { payment_hash, Some(payment_secret), events.pop().unwrap(), true, None).unwrap(); match payment_claimable { Event::PaymentClaimable { onion_fields, .. } => { - assert_eq!(onion_fields.unwrap().custom_tlvs(), &custom_tlvs); + assert_eq!(&onion_fields.unwrap().custom_tlvs()[..], &custom_tlvs[..]); }, _ => panic!("Unexpected event"), }; From 9c78d8e90ef105b8708574fff38550aeeaee0232 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 23 Sep 2023 23:34:33 +0000 Subject: [PATCH 06/13] Drop unnecessary crate:: prefix when accessing `bitcoin` in macro Unexported macros don't need to use the `$crate` prefix. --- lightning/src/offers/offer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index e0bc63e8b..60621b9dc 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -366,7 +366,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => { /// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet). /// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats) /// for the selected chain. - pub fn chains(&$self) -> Vec<$crate::bitcoin::blockdata::constants::ChainHash> { + pub fn chains(&$self) -> Vec { $contents.chains() } @@ -418,7 +418,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => { } /// The public key used by the recipient to sign invoices. - pub fn signing_pubkey(&$self) -> $crate::bitcoin::secp256k1::PublicKey { + pub fn signing_pubkey(&$self) -> bitcoin::secp256k1::PublicKey { $contents.signing_pubkey() } } } From 09cd4ed9760654ded3e6920b99606ccec026e8f6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 24 Sep 2023 23:41:10 +0000 Subject: [PATCH 07/13] Mark `SpendableOutputDescriptor::to_psbt_input` as no-export Its honestly likely not all that useful as its not materially interoperable with other PSBT libraries. Instead, users should simply fetch the full PSBT and use the inputs from it as they see fit. --- lightning/src/sign/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 39f1001c4..0e81d481e 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -275,6 +275,9 @@ impl SpendableOutputDescriptor { /// /// Note that this does not include any signatures, just the information required to /// construct the transaction and sign it. + /// + /// This is not exported to bindings users as there is no standard serialization for an input. + /// See [`Self::create_spendable_outputs_psbt`] instead. pub fn to_psbt_input(&self) -> bitcoin::psbt::Input { match self { SpendableOutputDescriptor::StaticOutput { output, .. } => { From 6ce4c6cbc50966dae83cf23d727a3d4b48a64533 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 25 Sep 2023 22:14:04 +0000 Subject: [PATCH 08/13] Mark `AChannelManager` no-export The trait itself has no purpose for bindings, as all structs are concretized anyway. Further, the bindings have specific handling for generic bounding traits like this. --- lightning/src/ln/channelmanager.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3f6a2a6b2..8c9b8337e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -840,6 +840,9 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = >; /// A trivial trait which describes any [`ChannelManager`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. pub trait AChannelManager { /// A type implementing [`chain::Watch`]. type Watch: chain::Watch + ?Sized; From 07205a2869bb057bfd53a33c0760cd46178b93ae Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 25 Sep 2023 22:10:27 +0000 Subject: [PATCH 09/13] Make `create_onion_message` a freestanding function The new `create_onion_message` function in `OnionMessenger` is hard to handle - it has various generic requirements indirectly via the struct, but they're not bounded by any of the method parameters. Thus, you can't simply call `OnionMessenger::create_onion_message`, as various bounds are not specified. Instead, we move it to a freestanding function so that it can be called directly without explicitly setting bounds. --- lightning/src/onion_message/messenger.rs | 125 +++++++++++------------ 1 file changed, 61 insertions(+), 64 deletions(-) diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 2a9c830d3..03dc6a5b0 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -246,6 +246,64 @@ pub trait CustomOnionMessageHandler { fn read_custom_message(&self, message_type: u64, buffer: &mut R) -> Result, msgs::DecodeError>; } + +/// Create an onion message with contents `message` to the destination of `path`. +/// Returns (introduction_node_id, onion_msg) +pub fn create_onion_message( + entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1, + path: OnionMessagePath, message: OnionMessageContents, reply_path: Option, +) -> Result<(PublicKey, msgs::OnionMessage), SendError> +where + ES::Target: EntropySource, + NS::Target: NodeSigner, +{ + let OnionMessagePath { intermediate_nodes, mut destination } = path; + if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination { + if blinded_hops.len() < 2 { + return Err(SendError::TooFewBlindedHops); + } + } + + if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) } + + // If we are sending straight to a blinded path and we are the introduction node, we need to + // advance the blinded path by 1 hop so the second hop is the new introduction node. + if intermediate_nodes.len() == 0 { + if let Destination::BlindedPath(ref mut blinded_path) = destination { + let our_node_id = node_signer.get_node_id(Recipient::Node) + .map_err(|()| SendError::GetNodeIdFailed)?; + if blinded_path.introduction_node_id == our_node_id { + advance_path_by_one(blinded_path, node_signer, &secp_ctx) + .map_err(|()| SendError::BlindedPathAdvanceFailed)?; + } + } + } + + let blinding_secret_bytes = entropy_source.get_secure_random_bytes(); + let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted"); + let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 { + (intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret)) + } else { + match destination { + Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)), + Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) => + (introduction_node_id, blinding_point), + } + }; + let (packet_payloads, packet_keys) = packet_payloads_and_keys( + &secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret) + .map_err(|e| SendError::Secp256k1(e))?; + + let prng_seed = entropy_source.get_secure_random_bytes(); + let onion_routing_packet = construct_onion_message_packet( + packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?; + + Ok((introduction_node_id, msgs::OnionMessage { + blinding_point, + onion_routing_packet + })) +} + impl OnionMessenger where @@ -283,13 +341,9 @@ where &self, path: OnionMessagePath, message: OnionMessageContents, reply_path: Option ) -> Result<(), SendError> { - let (introduction_node_id, onion_msg) = Self::create_onion_message( - &self.entropy_source, - &self.node_signer, - &self.secp_ctx, - path, - message, - reply_path + let (introduction_node_id, onion_msg) = create_onion_message( + &self.entropy_source, &self.node_signer, &self.secp_ctx, + path, message, reply_path )?; let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap(); @@ -303,63 +357,6 @@ where } } - /// Create an onion message with contents `message` to the destination of `path`. - /// Returns (introduction_node_id, onion_msg) - pub fn create_onion_message( - entropy_source: &ES, - node_signer: &NS, - secp_ctx: &Secp256k1, - path: OnionMessagePath, - message: OnionMessageContents, - reply_path: Option, - ) -> Result<(PublicKey, msgs::OnionMessage), SendError> { - let OnionMessagePath { intermediate_nodes, mut destination } = path; - if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination { - if blinded_hops.len() < 2 { - return Err(SendError::TooFewBlindedHops); - } - } - - if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) } - - // If we are sending straight to a blinded path and we are the introduction node, we need to - // advance the blinded path by 1 hop so the second hop is the new introduction node. - if intermediate_nodes.len() == 0 { - if let Destination::BlindedPath(ref mut blinded_path) = destination { - let our_node_id = node_signer.get_node_id(Recipient::Node) - .map_err(|()| SendError::GetNodeIdFailed)?; - if blinded_path.introduction_node_id == our_node_id { - advance_path_by_one(blinded_path, node_signer, &secp_ctx) - .map_err(|()| SendError::BlindedPathAdvanceFailed)?; - } - } - } - - let blinding_secret_bytes = entropy_source.get_secure_random_bytes(); - let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted"); - let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 { - (intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret)) - } else { - match destination { - Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)), - Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) => - (introduction_node_id, blinding_point), - } - }; - let (packet_payloads, packet_keys) = packet_payloads_and_keys( - &secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret) - .map_err(|e| SendError::Secp256k1(e))?; - - let prng_seed = entropy_source.get_secure_random_bytes(); - let onion_routing_packet = construct_onion_message_packet( - packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?; - - Ok((introduction_node_id, msgs::OnionMessage { - blinding_point, - onion_routing_packet - })) - } - fn respond_with_onion_message( &self, response: OnionMessageContents, path_id: Option<[u8; 32]>, reply_path: Option From 34f36d5ffecc8920d7a6e5ab92c9d71171580e15 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 27 Sep 2023 23:02:50 +0000 Subject: [PATCH 10/13] Drop various bounds on types passed to `MonitorUpdatingPersister` The new `MonitorUpdatingPersister` has a few redundant type bounds (re-specified on functions after having been specified on the struct itself), which we remove here. Further, it requires a `Deref` which is `Clone`able. This is generally fine in rust, but annoying in bindings, so we simply elide it in favor if a `&Deref`. --- fuzz/src/chanmon_consistency.rs | 2 +- lightning/src/chain/chainmonitor.rs | 2 +- lightning/src/chain/channelmonitor.rs | 8 +++--- lightning/src/util/persist.rs | 38 +++++++++++---------------- 4 files changed, 21 insertions(+), 29 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 6a2007165..2a1b9e9a7 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -155,7 +155,7 @@ impl chain::Watch for TestChainMonitor { }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1; - deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); + deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 261c0471c..aa9508d23 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -767,7 +767,7 @@ where C::Target: chain::Filter, Some(monitor_state) => { let monitor = &monitor_state.monitor; log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor)); - let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger); + let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); let update_id = MonitorUpdateId::from_monitor_update(update); let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a0ac1657e..bd0c15484 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1311,7 +1311,7 @@ impl ChannelMonitor { &self, updates: &ChannelMonitorUpdate, broadcaster: &B, - fee_estimator: F, + fee_estimator: &F, logger: &L, ) -> Result<(), ()> where @@ -2615,7 +2615,7 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); } - pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()> + pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -2655,7 +2655,7 @@ impl ChannelMonitorImpl { panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!"); } let mut ret = Ok(()); - let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator); + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { @@ -4581,7 +4581,7 @@ mod tests { let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks)); assert!( - pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger) + pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger) .is_err()); // Even though we error'd on the first update, we should still have generated an HTLC claim // transaction diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 2a022c37c..35e473c42 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -397,11 +397,7 @@ where pub fn new( kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES, signer_provider: SP, - ) -> Self - where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - { + ) -> Self { MonitorUpdatingPersister { kv_store, logger, @@ -416,12 +412,10 @@ where /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the /// documentation for [`MonitorUpdatingPersister`]. - pub fn read_all_channel_monitors_with_updates( - &self, broadcaster: B, fee_estimator: F, + pub fn read_all_channel_monitors_with_updates( + &self, broadcaster: &B, fee_estimator: &F, ) -> Result::Signer>)>, io::Error> where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -432,8 +426,8 @@ where let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { res.push(self.read_channel_monitor_with_updates( - &broadcaster, - fee_estimator.clone(), + broadcaster, + fee_estimator, monitor_key, )?) } @@ -457,12 +451,10 @@ where /// /// Loading a large number of monitors will be faster if done in parallel. You can use this /// function to accomplish this. Take care to limit the number of parallel readers. - pub fn read_channel_monitor_with_updates( - &self, broadcaster: &B, fee_estimator: F, monitor_key: String, + pub fn read_channel_monitor_with_updates( + &self, broadcaster: &B, fee_estimator: &F, monitor_key: String, ) -> Result<(BlockHash, ChannelMonitor<::Signer>), io::Error> where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -484,7 +476,7 @@ where Err(err) => return Err(err), }; - monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger) + monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger) .map_err(|e| { log_error!( self.logger, @@ -949,17 +941,17 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates( - broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_0.len(), 0); let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates( - broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap(); + &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_1.len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates( - broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); // check that we stored only one monitor assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { @@ -978,7 +970,7 @@ mod tests { } } persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates( - broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap(); + &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); @@ -1043,7 +1035,7 @@ mod tests { check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID); // Make sure the expected number of stale updates is present. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); // The channel should have 0 updates, as it wrote a full monitor and consolidated. @@ -1151,7 +1143,7 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); assert_eq!(persisted_chan_data.len(), 0); // Create some initial channel @@ -1162,7 +1154,7 @@ mod tests { send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000); // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible) - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); persister_0 From add71d42a099a84f8ebfaf9d364bff72329fe2bc Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 28 Sep 2023 02:45:00 +0000 Subject: [PATCH 11/13] Use `crate::prelude::*` to load `Vec`, rather than `crate::Vec` This is kinda dumb, but the bindings get confused when referring to `Vec` absolutely in a `use` statement, and there's no reason not to load our prelude everywhere. --- lightning-invoice/src/payment.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lightning-invoice/src/payment.rs b/lightning-invoice/src/payment.rs index 08be3d54b..024791363 100644 --- a/lightning-invoice/src/payment.rs +++ b/lightning-invoice/src/payment.rs @@ -9,7 +9,8 @@ //! Convenient utilities for paying Lightning invoices. -use crate::{Bolt11Invoice, Vec}; +use crate::Bolt11Invoice; +use crate::prelude::*; use bitcoin_hashes::Hash; From 6b75fafaf3c5b480e56340e7cedb59e1cb9ec4d7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 30 Sep 2023 17:47:45 +0000 Subject: [PATCH 12/13] Fix new unused variable warning in `test_utils.rs` --- lightning/src/util/test_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ae942a058..bd66e4cae 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -423,7 +423,7 @@ impl chainmonitor::Persist fo chain::ChannelMonitorUpdateStatus::Completed } - fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { let mut ret = chain::ChannelMonitorUpdateStatus::Completed; if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { ret = update_ret; From 7a583bb756ece7474065d8c827a2864b641378bd Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 1 Oct 2023 00:00:37 +0000 Subject: [PATCH 13/13] Remove unused `mem::drop` which drops a reference --- lightning/src/chain/chainmonitor.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index aa9508d23..e87d082d9 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -324,7 +324,6 @@ where C::Target: chain::Filter, if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. - core::mem::drop(monitor_state); core::mem::drop(monitor_lock); let _poison = self.monitors.write().unwrap(); log_error!(self.logger, "{}", err_str);