mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-01-19 05:43:55 +01:00
Merge pull request #2595 from TheBlueMatt/2023-09-117-bindings-part1
Various cleanups to make bindings generation simpler
This commit is contained in:
commit
04841acb86
@ -155,7 +155,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
|
||||
};
|
||||
let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
|
||||
read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
|
||||
deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
|
||||
deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
|
||||
let mut ser = VecWriter(Vec::new());
|
||||
deserialized_monitor.write(&mut ser).unwrap();
|
||||
map_entry.insert((update.update_id, ser.0));
|
||||
|
@ -1181,7 +1181,7 @@ mod tests {
|
||||
let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
|
||||
let scorer = Arc::new(Mutex::new(TestScorer::new()));
|
||||
let seed = [i as u8; 32];
|
||||
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
|
||||
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
|
||||
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
|
||||
let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
|
||||
let now = Duration::from_secs(genesis_block.header.time as u64);
|
||||
|
@ -9,7 +9,8 @@
|
||||
|
||||
//! Convenient utilities for paying Lightning invoices.
|
||||
|
||||
use crate::{Bolt11Invoice, Vec};
|
||||
use crate::Bolt11Invoice;
|
||||
use crate::prelude::*;
|
||||
|
||||
use bitcoin_hashes::Hash;
|
||||
|
||||
|
@ -324,7 +324,6 @@ where C::Target: chain::Filter,
|
||||
if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() {
|
||||
// Take the monitors lock for writing so that we poison it and any future
|
||||
// operations going forward fail immediately.
|
||||
core::mem::drop(monitor_state);
|
||||
core::mem::drop(monitor_lock);
|
||||
let _poison = self.monitors.write().unwrap();
|
||||
log_error!(self.logger, "{}", err_str);
|
||||
@ -767,7 +766,7 @@ where C::Target: chain::Filter,
|
||||
Some(monitor_state) => {
|
||||
let monitor = &monitor_state.monitor;
|
||||
log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
|
||||
let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger);
|
||||
let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
|
||||
|
||||
let update_id = MonitorUpdateId::from_monitor_update(update);
|
||||
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
|
||||
|
@ -1311,7 +1311,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
|
||||
&self,
|
||||
updates: &ChannelMonitorUpdate,
|
||||
broadcaster: &B,
|
||||
fee_estimator: F,
|
||||
fee_estimator: &F,
|
||||
logger: &L,
|
||||
) -> Result<(), ()>
|
||||
where
|
||||
@ -2615,7 +2615,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
|
||||
}
|
||||
|
||||
pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()>
|
||||
pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
|
||||
where B::Target: BroadcasterInterface,
|
||||
F::Target: FeeEstimator,
|
||||
L::Target: Logger,
|
||||
@ -2655,7 +2655,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
|
||||
}
|
||||
let mut ret = Ok(());
|
||||
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
|
||||
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
|
||||
for update in updates.updates.iter() {
|
||||
match update {
|
||||
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => {
|
||||
@ -4581,7 +4581,7 @@ mod tests {
|
||||
|
||||
let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks));
|
||||
assert!(
|
||||
pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
|
||||
pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
|
||||
.is_err());
|
||||
// Even though we error'd on the first update, we should still have generated an HTLC claim
|
||||
// transaction
|
||||
|
@ -840,6 +840,9 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
|
||||
>;
|
||||
|
||||
/// A trivial trait which describes any [`ChannelManager`].
|
||||
///
|
||||
/// This is not exported to bindings users as general cover traits aren't useful in other
|
||||
/// languages.
|
||||
pub trait AChannelManager {
|
||||
/// A type implementing [`chain::Watch`].
|
||||
type Watch: chain::Watch<Self::Signer> + ?Sized;
|
||||
|
@ -594,10 +594,26 @@ impl RecipientOnionFields {
|
||||
/// Note that if this field is non-empty, it will contain strictly increasing TLVs, each
|
||||
/// represented by a `(u64, Vec<u8>)` for its type number and serialized value respectively.
|
||||
/// This is validated when setting this field using [`Self::with_custom_tlvs`].
|
||||
#[cfg(not(c_bindings))]
|
||||
pub fn custom_tlvs(&self) -> &Vec<(u64, Vec<u8>)> {
|
||||
&self.custom_tlvs
|
||||
}
|
||||
|
||||
/// Gets the custom TLVs that will be sent or have been received.
|
||||
///
|
||||
/// Custom TLVs allow sending extra application-specific data with a payment. They provide
|
||||
/// additional flexibility on top of payment metadata, as while other implementations may
|
||||
/// require `payment_metadata` to reflect metadata provided in an invoice, custom TLVs
|
||||
/// do not have this restriction.
|
||||
///
|
||||
/// Note that if this field is non-empty, it will contain strictly increasing TLVs, each
|
||||
/// represented by a `(u64, Vec<u8>)` for its type number and serialized value respectively.
|
||||
/// This is validated when setting this field using [`Self::with_custom_tlvs`].
|
||||
#[cfg(c_bindings)]
|
||||
pub fn custom_tlvs(&self) -> Vec<(u64, Vec<u8>)> {
|
||||
self.custom_tlvs.clone()
|
||||
}
|
||||
|
||||
/// When we have received some HTLC(s) towards an MPP payment, as we receive further HTLC(s) we
|
||||
/// have to make sure that some fields match exactly across the parts. For those that aren't
|
||||
/// required to match, if they don't match we should remove them so as to not expose data
|
||||
|
@ -3781,7 +3781,7 @@ fn test_retry_custom_tlvs() {
|
||||
payment_hash, Some(payment_secret), events.pop().unwrap(), true, None).unwrap();
|
||||
match payment_claimable {
|
||||
Event::PaymentClaimable { onion_fields, .. } => {
|
||||
assert_eq!(onion_fields.unwrap().custom_tlvs(), &custom_tlvs);
|
||||
assert_eq!(&onion_fields.unwrap().custom_tlvs()[..], &custom_tlvs[..]);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
@ -366,7 +366,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => {
|
||||
/// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet).
|
||||
/// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats)
|
||||
/// for the selected chain.
|
||||
pub fn chains(&$self) -> Vec<$crate::bitcoin::blockdata::constants::ChainHash> {
|
||||
pub fn chains(&$self) -> Vec<bitcoin::blockdata::constants::ChainHash> {
|
||||
$contents.chains()
|
||||
}
|
||||
|
||||
@ -418,7 +418,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => {
|
||||
}
|
||||
|
||||
/// The public key used by the recipient to sign invoices.
|
||||
pub fn signing_pubkey(&$self) -> $crate::bitcoin::secp256k1::PublicKey {
|
||||
pub fn signing_pubkey(&$self) -> bitcoin::secp256k1::PublicKey {
|
||||
$contents.signing_pubkey()
|
||||
}
|
||||
} }
|
||||
|
@ -246,6 +246,64 @@ pub trait CustomOnionMessageHandler {
|
||||
fn read_custom_message<R: io::Read>(&self, message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError>;
|
||||
}
|
||||
|
||||
|
||||
/// Create an onion message with contents `message` to the destination of `path`.
|
||||
/// Returns (introduction_node_id, onion_msg)
|
||||
pub fn create_onion_message<ES: Deref, NS: Deref, T: CustomOnionMessageContents>(
|
||||
entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1<secp256k1::All>,
|
||||
path: OnionMessagePath, message: OnionMessageContents<T>, reply_path: Option<BlindedPath>,
|
||||
) -> Result<(PublicKey, msgs::OnionMessage), SendError>
|
||||
where
|
||||
ES::Target: EntropySource,
|
||||
NS::Target: NodeSigner,
|
||||
{
|
||||
let OnionMessagePath { intermediate_nodes, mut destination } = path;
|
||||
if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
|
||||
if blinded_hops.len() < 2 {
|
||||
return Err(SendError::TooFewBlindedHops);
|
||||
}
|
||||
}
|
||||
|
||||
if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) }
|
||||
|
||||
// If we are sending straight to a blinded path and we are the introduction node, we need to
|
||||
// advance the blinded path by 1 hop so the second hop is the new introduction node.
|
||||
if intermediate_nodes.len() == 0 {
|
||||
if let Destination::BlindedPath(ref mut blinded_path) = destination {
|
||||
let our_node_id = node_signer.get_node_id(Recipient::Node)
|
||||
.map_err(|()| SendError::GetNodeIdFailed)?;
|
||||
if blinded_path.introduction_node_id == our_node_id {
|
||||
advance_path_by_one(blinded_path, node_signer, &secp_ctx)
|
||||
.map_err(|()| SendError::BlindedPathAdvanceFailed)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
|
||||
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
|
||||
let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
|
||||
(intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret))
|
||||
} else {
|
||||
match destination {
|
||||
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)),
|
||||
Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) =>
|
||||
(introduction_node_id, blinding_point),
|
||||
}
|
||||
};
|
||||
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
|
||||
&secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret)
|
||||
.map_err(|e| SendError::Secp256k1(e))?;
|
||||
|
||||
let prng_seed = entropy_source.get_secure_random_bytes();
|
||||
let onion_routing_packet = construct_onion_message_packet(
|
||||
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
|
||||
|
||||
Ok((introduction_node_id, msgs::OnionMessage {
|
||||
blinding_point,
|
||||
onion_routing_packet
|
||||
}))
|
||||
}
|
||||
|
||||
impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref>
|
||||
OnionMessenger<ES, NS, L, MR, OMH, CMH>
|
||||
where
|
||||
@ -283,13 +341,9 @@ where
|
||||
&self, path: OnionMessagePath, message: OnionMessageContents<T>,
|
||||
reply_path: Option<BlindedPath>
|
||||
) -> Result<(), SendError> {
|
||||
let (introduction_node_id, onion_msg) = Self::create_onion_message(
|
||||
&self.entropy_source,
|
||||
&self.node_signer,
|
||||
&self.secp_ctx,
|
||||
path,
|
||||
message,
|
||||
reply_path
|
||||
let (introduction_node_id, onion_msg) = create_onion_message(
|
||||
&self.entropy_source, &self.node_signer, &self.secp_ctx,
|
||||
path, message, reply_path
|
||||
)?;
|
||||
|
||||
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
|
||||
@ -303,63 +357,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an onion message with contents `message` to the destination of `path`.
|
||||
/// Returns (introduction_node_id, onion_msg)
|
||||
pub fn create_onion_message<T: CustomOnionMessageContents>(
|
||||
entropy_source: &ES,
|
||||
node_signer: &NS,
|
||||
secp_ctx: &Secp256k1<secp256k1::All>,
|
||||
path: OnionMessagePath,
|
||||
message: OnionMessageContents<T>,
|
||||
reply_path: Option<BlindedPath>,
|
||||
) -> Result<(PublicKey, msgs::OnionMessage), SendError> {
|
||||
let OnionMessagePath { intermediate_nodes, mut destination } = path;
|
||||
if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
|
||||
if blinded_hops.len() < 2 {
|
||||
return Err(SendError::TooFewBlindedHops);
|
||||
}
|
||||
}
|
||||
|
||||
if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) }
|
||||
|
||||
// If we are sending straight to a blinded path and we are the introduction node, we need to
|
||||
// advance the blinded path by 1 hop so the second hop is the new introduction node.
|
||||
if intermediate_nodes.len() == 0 {
|
||||
if let Destination::BlindedPath(ref mut blinded_path) = destination {
|
||||
let our_node_id = node_signer.get_node_id(Recipient::Node)
|
||||
.map_err(|()| SendError::GetNodeIdFailed)?;
|
||||
if blinded_path.introduction_node_id == our_node_id {
|
||||
advance_path_by_one(blinded_path, node_signer, &secp_ctx)
|
||||
.map_err(|()| SendError::BlindedPathAdvanceFailed)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
|
||||
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
|
||||
let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
|
||||
(intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret))
|
||||
} else {
|
||||
match destination {
|
||||
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)),
|
||||
Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) =>
|
||||
(introduction_node_id, blinding_point),
|
||||
}
|
||||
};
|
||||
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
|
||||
&secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret)
|
||||
.map_err(|e| SendError::Secp256k1(e))?;
|
||||
|
||||
let prng_seed = entropy_source.get_secure_random_bytes();
|
||||
let onion_routing_packet = construct_onion_message_packet(
|
||||
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
|
||||
|
||||
Ok((introduction_node_id, msgs::OnionMessage {
|
||||
blinding_point,
|
||||
onion_routing_packet
|
||||
}))
|
||||
}
|
||||
|
||||
fn respond_with_onion_message<T: CustomOnionMessageContents>(
|
||||
&self, response: OnionMessageContents<T>, path_id: Option<[u8; 32]>,
|
||||
reply_path: Option<BlindedPath>
|
||||
|
@ -112,12 +112,12 @@ pub trait Router {
|
||||
/// [`find_route`].
|
||||
///
|
||||
/// [`ScoreLookUp`]: crate::routing::scoring::ScoreLookUp
|
||||
pub struct ScorerAccountingForInFlightHtlcs<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> {
|
||||
pub struct ScorerAccountingForInFlightHtlcs<'a, S: Deref> where S::Target: ScoreLookUp {
|
||||
scorer: S,
|
||||
// Maps a channel's short channel id and its direction to the liquidity used up.
|
||||
inflight_htlcs: &'a InFlightHtlcs,
|
||||
}
|
||||
impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
|
||||
impl<'a, S: Deref> ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp {
|
||||
/// Initialize a new `ScorerAccountingForInFlightHtlcs`.
|
||||
pub fn new(scorer: S, inflight_htlcs: &'a InFlightHtlcs) -> Self {
|
||||
ScorerAccountingForInFlightHtlcs {
|
||||
@ -127,13 +127,8 @@ impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> Sc
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> Writeable for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) }
|
||||
}
|
||||
|
||||
impl<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
|
||||
type ScoreParams = Sc::ScoreParams;
|
||||
impl<'a, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp {
|
||||
type ScoreParams = <S::Target as ScoreLookUp>::ScoreParams;
|
||||
fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
|
||||
if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat(
|
||||
source, target, short_channel_id
|
||||
|
@ -89,7 +89,7 @@ macro_rules! define_score { ($($supertrait: path)*) => {
|
||||
/// `ScoreLookUp` is used to determine the penalty for a given channel.
|
||||
///
|
||||
/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
|
||||
pub trait ScoreLookUp $(: $supertrait)* {
|
||||
pub trait ScoreLookUp {
|
||||
/// A configurable type which should contain various passed-in parameters for configuring the scorer,
|
||||
/// on a per-routefinding-call basis through to the scorer methods,
|
||||
/// which are used to determine the parameters for the suitability of channels for use.
|
||||
@ -108,7 +108,7 @@ pub trait ScoreLookUp $(: $supertrait)* {
|
||||
}
|
||||
|
||||
/// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
|
||||
pub trait ScoreUpdate $(: $supertrait)* {
|
||||
pub trait ScoreUpdate {
|
||||
/// Handles updating channel penalties after failing to route through a channel.
|
||||
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
|
||||
|
||||
@ -122,8 +122,20 @@ pub trait ScoreUpdate $(: $supertrait)* {
|
||||
fn probe_successful(&mut self, path: &Path);
|
||||
}
|
||||
|
||||
impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supertrait)*> ScoreLookUp for T {
|
||||
type ScoreParams = SP;
|
||||
/// A trait which can both lookup and update routing channel penalty scores.
|
||||
///
|
||||
/// This is used in places where both bounds are required and implemented for all types which
|
||||
/// implement [`ScoreLookUp`] and [`ScoreUpdate`].
|
||||
///
|
||||
/// Bindings users may need to manually implement this for their custom scoring implementations.
|
||||
pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {}
|
||||
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<T: ScoreLookUp + ScoreUpdate $(+ $supertrait)*> Score for T {}
|
||||
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<S: ScoreLookUp, T: Deref<Target=S>> ScoreLookUp for T {
|
||||
type ScoreParams = S::ScoreParams;
|
||||
fn channel_penalty_msat(
|
||||
&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
|
||||
) -> u64 {
|
||||
@ -131,7 +143,8 @@ impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supert
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ScoreUpdate, T: DerefMut<Target=S> $(+ $supertrait)*> ScoreUpdate for T {
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<S: ScoreUpdate, T: DerefMut<Target=S>> ScoreUpdate for T {
|
||||
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
|
||||
self.deref_mut().payment_path_failed(path, short_channel_id)
|
||||
}
|
||||
@ -192,7 +205,7 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
|
||||
impl<'a, T: Score + 'a> LockableScore<'a> for Mutex<T> {
|
||||
type ScoreUpdate = T;
|
||||
type ScoreLookUp = T;
|
||||
|
||||
@ -209,7 +222,7 @@ impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
|
||||
}
|
||||
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
|
||||
impl<'a, T: Score + 'a> LockableScore<'a> for RefCell<T> {
|
||||
type ScoreUpdate = T;
|
||||
type ScoreLookUp = T;
|
||||
|
||||
@ -226,7 +239,7 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
|
||||
}
|
||||
|
||||
#[cfg(not(c_bindings))]
|
||||
impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> LockableScore<'a> for RwLock<T> {
|
||||
impl<'a, T: Score + 'a> LockableScore<'a> for RwLock<T> {
|
||||
type ScoreUpdate = T;
|
||||
type ScoreLookUp = T;
|
||||
|
||||
@ -244,12 +257,12 @@ impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> Lockabl
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
/// A concrete implementation of [`LockableScore`] which supports multi-threading.
|
||||
pub struct MultiThreadedLockableScore<T: ScoreLookUp + ScoreUpdate> {
|
||||
pub struct MultiThreadedLockableScore<T: Score> {
|
||||
score: RwLock<T>,
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore<T> {
|
||||
impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore<T> {
|
||||
type ScoreUpdate = T;
|
||||
type ScoreLookUp = T;
|
||||
type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
|
||||
@ -265,17 +278,17 @@ impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> Lockable
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<T: ScoreUpdate + ScoreLookUp> Writeable for MultiThreadedLockableScore<T> {
|
||||
impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||
self.score.read().unwrap().write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
|
||||
impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
|
||||
impl<T: Score> MultiThreadedLockableScore<T> {
|
||||
/// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
|
||||
pub fn new(score: T) -> Self {
|
||||
MultiThreadedLockableScore { score: RwLock::new(score) }
|
||||
@ -284,14 +297,14 @@ impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
/// A locked `MultiThreadedLockableScore`.
|
||||
pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>);
|
||||
pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>);
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
/// A locked `MultiThreadedLockableScore`.
|
||||
pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>);
|
||||
pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>);
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
|
||||
impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -300,14 +313,24 @@ impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> {
|
||||
impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> {
|
||||
type ScoreParams = T::ScoreParams;
|
||||
fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId,
|
||||
target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
|
||||
) -> u64 {
|
||||
self.0.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||
self.0.write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
|
||||
impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@ -316,12 +339,31 @@ impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
|
||||
impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.0.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> {
|
||||
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
|
||||
self.0.payment_path_failed(path, short_channel_id)
|
||||
}
|
||||
|
||||
fn payment_path_successful(&mut self, path: &Path) {
|
||||
self.0.payment_path_successful(path)
|
||||
}
|
||||
|
||||
fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
|
||||
self.0.probe_failed(path, short_channel_id)
|
||||
}
|
||||
|
||||
fn probe_successful(&mut self, path: &Path) {
|
||||
self.0.probe_successful(path)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
@ -1417,6 +1459,10 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for Prob
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(c_bindings)]
|
||||
impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T>
|
||||
where L::Target: Logger {}
|
||||
|
||||
mod approx {
|
||||
const BITS: u32 = 64;
|
||||
const HIGHEST_BIT: u32 = BITS - 1;
|
||||
|
@ -275,6 +275,9 @@ impl SpendableOutputDescriptor {
|
||||
///
|
||||
/// Note that this does not include any signatures, just the information required to
|
||||
/// construct the transaction and sign it.
|
||||
///
|
||||
/// This is not exported to bindings users as there is no standard serialization for an input.
|
||||
/// See [`Self::create_spendable_outputs_psbt`] instead.
|
||||
pub fn to_psbt_input(&self) -> bitcoin::psbt::Input {
|
||||
match self {
|
||||
SpendableOutputDescriptor::StaticOutput { output, .. } => {
|
||||
|
@ -397,11 +397,7 @@ where
|
||||
pub fn new(
|
||||
kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
|
||||
signer_provider: SP,
|
||||
) -> Self
|
||||
where
|
||||
ES::Target: EntropySource + Sized,
|
||||
SP::Target: SignerProvider + Sized,
|
||||
{
|
||||
) -> Self {
|
||||
MonitorUpdatingPersister {
|
||||
kv_store,
|
||||
logger,
|
||||
@ -416,12 +412,10 @@ where
|
||||
/// It is extremely important that your [`KVStore::read`] implementation uses the
|
||||
/// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
|
||||
/// documentation for [`MonitorUpdatingPersister`].
|
||||
pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
|
||||
&self, broadcaster: B, fee_estimator: F,
|
||||
pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
|
||||
&self, broadcaster: &B, fee_estimator: &F,
|
||||
) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
|
||||
where
|
||||
ES::Target: EntropySource + Sized,
|
||||
SP::Target: SignerProvider + Sized,
|
||||
B::Target: BroadcasterInterface,
|
||||
F::Target: FeeEstimator,
|
||||
{
|
||||
@ -432,8 +426,8 @@ where
|
||||
let mut res = Vec::with_capacity(monitor_list.len());
|
||||
for monitor_key in monitor_list {
|
||||
res.push(self.read_channel_monitor_with_updates(
|
||||
&broadcaster,
|
||||
fee_estimator.clone(),
|
||||
broadcaster,
|
||||
fee_estimator,
|
||||
monitor_key,
|
||||
)?)
|
||||
}
|
||||
@ -457,12 +451,10 @@ where
|
||||
///
|
||||
/// Loading a large number of monitors will be faster if done in parallel. You can use this
|
||||
/// function to accomplish this. Take care to limit the number of parallel readers.
|
||||
pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
|
||||
&self, broadcaster: &B, fee_estimator: F, monitor_key: String,
|
||||
pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
|
||||
&self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
|
||||
) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
|
||||
where
|
||||
ES::Target: EntropySource + Sized,
|
||||
SP::Target: SignerProvider + Sized,
|
||||
B::Target: BroadcasterInterface,
|
||||
F::Target: FeeEstimator,
|
||||
{
|
||||
@ -484,7 +476,7 @@ where
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
|
||||
monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
|
||||
.map_err(|e| {
|
||||
log_error!(
|
||||
self.logger,
|
||||
@ -949,17 +941,17 @@ mod tests {
|
||||
// Check that the persisted channel data is empty before any channels are
|
||||
// open.
|
||||
let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
|
||||
broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
assert_eq!(persisted_chan_data_0.len(), 0);
|
||||
let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
|
||||
broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
|
||||
&broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
|
||||
assert_eq!(persisted_chan_data_1.len(), 0);
|
||||
|
||||
// Helper to make sure the channel is on the expected update ID.
|
||||
macro_rules! check_persisted_data {
|
||||
($expected_update_id: expr) => {
|
||||
persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
|
||||
broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
// check that we stored only one monitor
|
||||
assert_eq!(persisted_chan_data_0.len(), 1);
|
||||
for (_, mon) in persisted_chan_data_0.iter() {
|
||||
@ -978,7 +970,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
|
||||
broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
|
||||
&broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
|
||||
assert_eq!(persisted_chan_data_1.len(), 1);
|
||||
for (_, mon) in persisted_chan_data_1.iter() {
|
||||
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
||||
@ -1043,7 +1035,7 @@ mod tests {
|
||||
check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
|
||||
|
||||
// Make sure the expected number of stale updates is present.
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
let (_, monitor) = &persisted_chan_data[0];
|
||||
let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
|
||||
// The channel should have 0 updates, as it wrote a full monitor and consolidated.
|
||||
@ -1151,7 +1143,7 @@ mod tests {
|
||||
|
||||
// Check that the persisted channel data is empty before any channels are
|
||||
// open.
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
assert_eq!(persisted_chan_data.len(), 0);
|
||||
|
||||
// Create some initial channel
|
||||
@ -1162,7 +1154,7 @@ mod tests {
|
||||
send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
|
||||
|
||||
// Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
|
||||
let (_, monitor) = &persisted_chan_data[0];
|
||||
let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
|
||||
persister_0
|
||||
|
@ -140,10 +140,10 @@ impl<'a> Router for TestRouter<'a> {
|
||||
// Since the path is reversed, the last element in our iteration is the first
|
||||
// hop.
|
||||
if idx == path.hops.len() - 1 {
|
||||
scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &());
|
||||
scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default());
|
||||
} else {
|
||||
let curr_hop_path_idx = path.hops.len() - 1 - idx;
|
||||
scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &());
|
||||
scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -153,7 +153,7 @@ impl<'a> Router for TestRouter<'a> {
|
||||
let logger = TestLogger::new();
|
||||
find_route(
|
||||
payer, params, &self.network_graph, first_hops, &logger,
|
||||
&ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(),
|
||||
&ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(),
|
||||
&[42; 32]
|
||||
)
|
||||
}
|
||||
@ -423,7 +423,7 @@ impl<Signer: sign::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> fo
|
||||
chain::ChannelMonitorUpdateStatus::Completed
|
||||
}
|
||||
|
||||
fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
|
||||
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
|
||||
let mut ret = chain::ChannelMonitorUpdateStatus::Completed;
|
||||
if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
|
||||
ret = update_ret;
|
||||
|
Loading…
Reference in New Issue
Block a user