mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Merge pull request #194 from TheBlueMatt/2018-09-doc-cleanup
Make docs look nicer by adding explicit spacing
This commit is contained in:
commit
b2a855b57a
10 changed files with 94 additions and 17 deletions
|
@ -1,6 +1,8 @@
|
|||
//! Traits and utility impls which allow other parts of rust-lightning to interact with the
|
||||
//! blockchain - receiving notifications of new blocks and block disconnections and allowing
|
||||
//! rust-lightning to request that you monitor the chain for certain outpoints/transactions.
|
||||
//! blockchain.
|
||||
//!
|
||||
//! Includes traits for monitoring and receiving notifications of new blocks and block
|
||||
//! disconnections, transactio broadcasting, and feerate information requests.
|
||||
|
||||
use bitcoin::blockdata::block::{Block, BlockHeader};
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
|
@ -28,6 +30,7 @@ pub enum ChainError {
|
|||
|
||||
/// An interface to request notification of certain scripts as they appear the
|
||||
/// chain.
|
||||
///
|
||||
/// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
|
||||
/// called from inside the library in response to ChainListener events, P2P events, or timer
|
||||
/// events).
|
||||
|
@ -66,8 +69,10 @@ pub trait ChainListener: Sync + Send {
|
|||
/// Note that if a new transaction/outpoint is watched during a block_connected call, the block
|
||||
/// *must* be re-scanned with the new transaction/outpoints and block_connected should be
|
||||
/// called again with the same header and (at least) the new transactions.
|
||||
///
|
||||
/// Note that if non-new transaction/outpoints may be registered during a call, a second call
|
||||
/// *must not* happen.
|
||||
///
|
||||
/// This also means those counting confirmations using block_connected callbacks should watch
|
||||
/// for duplicate headers and not count them towards confirmations!
|
||||
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]);
|
||||
|
@ -89,15 +94,19 @@ pub enum ConfirmationTarget {
|
|||
|
||||
/// A trait which should be implemented to provide feerate information on a number of time
|
||||
/// horizons.
|
||||
///
|
||||
/// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
|
||||
/// called from inside the library in response to ChainListener events, P2P events, or timer
|
||||
/// events).
|
||||
pub trait FeeEstimator: Sync + Send {
|
||||
/// Gets estimated satoshis of fee required per 1000 Weight-Units. This translates to:
|
||||
/// * satoshis-per-byte * 250
|
||||
/// * ceil(satoshis-per-kbyte / 4)
|
||||
/// Gets estimated satoshis of fee required per 1000 Weight-Units.
|
||||
///
|
||||
/// Must be no smaller than 253 (ie 1 satoshi-per-byte rounded up to ensure later round-downs
|
||||
/// don't put us below 1 satoshi-per-byte).
|
||||
///
|
||||
/// This translates to:
|
||||
/// * satoshis-per-byte * 250
|
||||
/// * ceil(satoshis-per-kbyte / 4)
|
||||
fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u64;
|
||||
}
|
||||
|
||||
|
@ -189,6 +198,7 @@ impl ChainWatchedUtil {
|
|||
}
|
||||
|
||||
/// Utility to capture some common parts of ChainWatchInterface implementors.
|
||||
///
|
||||
/// Keeping a local copy of this in a ChainWatchInterface implementor is likely useful.
|
||||
pub struct ChainWatchInterfaceUtil {
|
||||
network: Network,
|
||||
|
@ -247,6 +257,7 @@ impl ChainWatchInterfaceUtil {
|
|||
}
|
||||
|
||||
/// Notify listeners that a block was connected given a full, unfiltered block.
|
||||
///
|
||||
/// Handles re-scanning the block and calling block_connected again if listeners register new
|
||||
/// watch data during the callbacks for you (see ChainListener::block_connected for more info).
|
||||
pub fn block_connected_with_filtering(&self, block: &Block, height: u32) {
|
||||
|
@ -280,6 +291,7 @@ impl ChainWatchInterfaceUtil {
|
|||
|
||||
/// Notify listeners that a block was connected, given pre-filtered list of transactions in the
|
||||
/// block which matched the filter (probably using does_match_tx).
|
||||
///
|
||||
/// Returns true if notified listeners registered additional watch data (implying that the
|
||||
/// block must be re-scanned and this function called again prior to further block_connected
|
||||
/// calls, see ChainListener::block_connected for more info).
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//! Module provides structs and traits which allow other parts of rust-lightning to interact with
|
||||
//! the blockchain.
|
||||
//! Structs and traits which allow other parts of rust-lightning to interact with the blockchain.
|
||||
|
||||
pub mod chaininterface;
|
||||
pub mod transaction;
|
||||
|
|
|
@ -4,6 +4,7 @@ use bitcoin::util::hash::Sha256dHash;
|
|||
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
|
||||
|
||||
/// A reference to a transaction output.
|
||||
///
|
||||
/// Differs from bitcoin::blockdata::transaction::OutPoint as the index is a u16 instead of u32
|
||||
/// due to LN's restrictions on index values. Should reduce (possibly) unsafe conversions this way.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
//! The top-level channel management and payment tracking stuff lives here.
|
||||
//!
|
||||
//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
|
||||
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
|
||||
//! upon reconnect to the relevant peer(s).
|
||||
//!
|
||||
//! It does not manage routing logic (see ln::router for that) nor does it manage constructing
|
||||
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
|
||||
//! imply it needs to fail HTLCs/payments/channels it manages).
|
||||
|
@ -223,6 +225,7 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum
|
|||
|
||||
/// Manager which keeps track of a number of channels and sends messages to the appropriate
|
||||
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
|
||||
///
|
||||
/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
|
||||
/// to individual Channels.
|
||||
pub struct ChannelManager {
|
||||
|
@ -285,10 +288,14 @@ pub struct ChannelDetails {
|
|||
}
|
||||
|
||||
impl ChannelManager {
|
||||
/// Constructs a new ChannelManager to hold several channels and route between them. This is
|
||||
/// the main "logic hub" for all channel-related actions, and implements ChannelMessageHandler.
|
||||
/// Constructs a new ChannelManager to hold several channels and route between them.
|
||||
///
|
||||
/// This is the main "logic hub" for all channel-related actions, and implements
|
||||
/// ChannelMessageHandler.
|
||||
///
|
||||
/// fee_proportional_millionths is an optional fee to charge any payments routed through us.
|
||||
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
|
||||
///
|
||||
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
|
||||
pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
|
||||
let secp_ctx = Secp256k1::new();
|
||||
|
@ -324,12 +331,15 @@ impl ChannelManager {
|
|||
}
|
||||
|
||||
/// Creates a new outbound channel to the given remote node and with the given value.
|
||||
///
|
||||
/// user_id will be provided back as user_channel_id in FundingGenerationReady and
|
||||
/// FundingBroadcastSafe events to allow tracking of which events correspond with which
|
||||
/// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
|
||||
/// may wish to avoid using 0 for user_id here.
|
||||
///
|
||||
/// If successful, will generate a SendOpenChannel event, so you should probably poll
|
||||
/// PeerManager::process_events afterwards.
|
||||
///
|
||||
/// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k
|
||||
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
|
||||
let chan_keys = if cfg!(feature = "fuzztarget") {
|
||||
|
@ -407,6 +417,7 @@ impl ChannelManager {
|
|||
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
|
||||
/// will be accepted on the given channel, and after additional timeout/the closing of all
|
||||
/// pending HTLCs, the channel will be closed on chain.
|
||||
///
|
||||
/// May generate a SendShutdown event on success, which should be relayed.
|
||||
pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), HandleError> {
|
||||
let (mut res, node_id, chan_option) = {
|
||||
|
@ -947,16 +958,19 @@ impl ChannelManager {
|
|||
}
|
||||
|
||||
/// Sends a payment along a given route.
|
||||
///
|
||||
/// Value parameters are provided via the last hop in route, see documentation for RouteHop
|
||||
/// fields for more info.
|
||||
///
|
||||
/// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
|
||||
/// payment), we don't do anything to stop you! We always try to ensure that if the provided
|
||||
/// next hop knows the preimage to payment_hash they can claim an additional amount as
|
||||
/// specified in the last hop in the route! Thus, you should probably do your own
|
||||
/// payment_preimage tracking (which you should already be doing as they represent "proof of
|
||||
/// payment") and prevent double-sends yourself.
|
||||
/// See-also docs on Channel::send_htlc_and_commit.
|
||||
///
|
||||
/// May generate a SendHTLCs event on success, which should be relayed.
|
||||
///
|
||||
/// Raises APIError::RoutError when invalid route or forward parameter
|
||||
/// (cltv_delta, fee, node public key) is specified
|
||||
pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
|
||||
|
@ -1033,7 +1047,9 @@ impl ChannelManager {
|
|||
}
|
||||
|
||||
/// Call this upon creation of a funding transaction for the given channel.
|
||||
///
|
||||
/// Panics if a funding transaction has already been provided for this channel.
|
||||
///
|
||||
/// May panic if the funding_txo is duplicative with some other channel (note that this should
|
||||
/// be trivially prevented by using unique funding transaction keys per-channel).
|
||||
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
|
||||
|
@ -1107,6 +1123,7 @@ impl ChannelManager {
|
|||
}
|
||||
|
||||
/// Processes HTLCs which are pending waiting on random forward delay.
|
||||
///
|
||||
/// Should only really ever be called in response to an PendingHTLCsForwardable event.
|
||||
/// Will likely generate further events.
|
||||
pub fn process_pending_htlc_forwards(&self) {
|
||||
|
@ -1323,6 +1340,7 @@ impl ChannelManager {
|
|||
/// Provides a payment preimage in response to a PaymentReceived event, returning true and
|
||||
/// generating message events for the net layer to claim the payment, if possible. Thus, you
|
||||
/// should probably kick the net layer to go send messages if this returns true!
|
||||
///
|
||||
/// May panic if called except in response to a PaymentReceived event.
|
||||
pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
|
||||
let mut sha = Sha256::new();
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
|
||||
//! here.
|
||||
//!
|
||||
//! ChannelMonitor objects are generated by ChannelManager in response to relevant
|
||||
//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
|
||||
//! be made in responding to certain messages, see ManyChannelMonitor for more.
|
||||
//!
|
||||
//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
|
||||
//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
|
||||
//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
|
||||
|
@ -40,6 +42,7 @@ use std::{hash,cmp};
|
|||
pub enum ChannelMonitorUpdateErr {
|
||||
/// Used to indicate a temporary failure (eg connection to a watchtower failed, but is expected
|
||||
/// to succeed at some point in the future).
|
||||
///
|
||||
/// Such a failure will "freeze" a channel, preventing us from revoking old states or
|
||||
/// submitting new commitment transactions to the remote party.
|
||||
/// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
|
||||
|
@ -55,12 +58,14 @@ pub enum ChannelMonitorUpdateErr {
|
|||
/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
|
||||
/// events to it, while also taking any add_update_monitor events and passing them to some remote
|
||||
/// server(s).
|
||||
///
|
||||
/// Note that any updates to a channel's monitor *must* be applied to each instance of the
|
||||
/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
|
||||
/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
|
||||
/// which we have revoked, allowing our counterparty to claim all funds in the channel!
|
||||
pub trait ManyChannelMonitor: Send + Sync {
|
||||
/// Adds or updates a monitor for the given `funding_txo`.
|
||||
///
|
||||
/// Implementor must also ensure that the funding_txo outpoint is registered with any relevant
|
||||
/// ChainWatchInterfaces such that the provided monitor receives block_connected callbacks with
|
||||
/// any spends of it.
|
||||
|
@ -69,10 +74,13 @@ pub trait ManyChannelMonitor: Send + Sync {
|
|||
|
||||
/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
|
||||
/// watchtower or watch our own channels.
|
||||
///
|
||||
/// Note that you must provide your own key by which to refer to channels.
|
||||
///
|
||||
/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that
|
||||
/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably
|
||||
/// index by a PublicKey which is required to sign any updates.
|
||||
///
|
||||
/// If you're using this for local monitoring of your own channels, you probably want to use
|
||||
/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
|
||||
pub struct SimpleManyChannelMonitor<Key> {
|
||||
|
@ -180,6 +188,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1;
|
|||
|
||||
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
|
||||
/// on-chain transactions to ensure no loss of funds occurs.
|
||||
///
|
||||
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
|
||||
/// information and are actively monitoring the chain.
|
||||
pub struct ChannelMonitor {
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
//! High level lightning structs and impls live here.
|
||||
//!
|
||||
//! You probably want to create a channelmanager::ChannelManager, and a router::Router first.
|
||||
//! Then, you probably want to pass them both on to a peer_handler::PeerManager and use that to
|
||||
//! create/manage connections and call get_and_clear_pending_events after each action, handling
|
||||
//! them appropriately.
|
||||
//!
|
||||
//! When you want to open/close a channel or send a payment, call into your ChannelManager and when
|
||||
//! you want to learn things about the network topology (eg get a route for sending a payment),
|
||||
//! call into your Router.
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
//! Wire messages, traits representing wire message handlers, and a few error types live here.
|
||||
//!
|
||||
//! For a normal node you probably don't need to use anything here, however, if you wish to split a
|
||||
//! node into an internet-facing route/message socket handling daemon and a separate daemon (or
|
||||
//! server entirely) which handles only channel-related messages you may wish to implement
|
||||
//! ChannelMessageHandler yourself and use it to re-serialize messages and pass them across
|
||||
//! daemons/servers.
|
||||
//!
|
||||
//! Note that if you go with such an architecture (instead of passing raw socket events to a
|
||||
//! non-internet-facing system) you trust the frontend internet-facing system to not lie about the
|
||||
//! source node_id of the mssage, however this does allow you to significantly reduce bandwidth
|
||||
|
@ -484,9 +486,10 @@ pub enum HTLCFailChannelUpdate {
|
|||
},
|
||||
}
|
||||
|
||||
/// A trait to describe an object which can receive channel messages. Messages MAY be called in
|
||||
/// parallel when they originate from different their_node_ids, however they MUST NOT be called in
|
||||
/// parallel when the two calls have the same their_node_id.
|
||||
/// A trait to describe an object which can receive channel messages.
|
||||
///
|
||||
/// Messages MAY be called in parallel when they originate from different their_node_ids, however
|
||||
/// they MUST NOT be called in parallel when the two calls have the same their_node_id.
|
||||
pub trait ChannelMessageHandler : events::EventsProvider + Send + Sync {
|
||||
//Channel init:
|
||||
/// Handle an incoming open_channel message from the given peer.
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//! Top level peer message handling and socket handling logic lives here.
|
||||
//!
|
||||
//! Instead of actually servicing sockets ourselves we require that you implement the
|
||||
//! SocketDescriptor interface and use that to receive actions which you should perform on the
|
||||
//! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then
|
||||
|
@ -32,7 +33,9 @@ pub struct MessageHandler {
|
|||
/// Provides an object which can be used to send data to and which uniquely identifies a connection
|
||||
/// to a remote host. You will need to be able to generate multiple of these which meet Eq and
|
||||
/// implement Hash to meet the PeerManager API.
|
||||
///
|
||||
/// For efficiency, Clone should be relatively cheap for this type.
|
||||
///
|
||||
/// You probably want to just extend an int and put a file descriptor in a struct and implement
|
||||
/// send_data. Note that if you are using a higher-level net library that may close() itself, be
|
||||
/// careful to ensure you don't have races whereby you might register a new connection with an fd
|
||||
|
@ -42,9 +45,11 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
|
|||
/// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected.
|
||||
/// Note that in the disconnected case, a disconnect_event must still fire and further write
|
||||
/// attempts may occur until that time.
|
||||
///
|
||||
/// If the returned size is smaller than data.len() - write_offset, a write_available event must
|
||||
/// trigger the next time more data can be written. Additionally, until the a send_data event
|
||||
/// completes fully, no further read_events should trigger on the same peer!
|
||||
///
|
||||
/// If a read_event on this descriptor had previously returned true (indicating that read
|
||||
/// events should be paused to prevent DoS in the send buffer), resume_read may be set
|
||||
/// indicating that read events on this descriptor should resume. A resume_read of false does
|
||||
|
@ -167,6 +172,7 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
|
|||
}
|
||||
|
||||
/// Get the list of node ids for peers which have completed the initial handshake.
|
||||
///
|
||||
/// For outbound connections, this will be the same as the their_node_id parameter passed in to
|
||||
/// new_outbound_connection, however entries will only appear once the initial handshake has
|
||||
/// completed and we are sure the remote peer has the private key for the given node_id.
|
||||
|
@ -183,7 +189,9 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
|
|||
/// Indicates a new outbound connection has been established to a node with the given node_id.
|
||||
/// Note that if an Err is returned here you MUST NOT call disconnect_event for the new
|
||||
/// descriptor but must disconnect the connection immediately.
|
||||
///
|
||||
/// Returns some bytes to send to the remote node.
|
||||
///
|
||||
/// Panics if descriptor is duplicative with some other descriptor which has not yet has a
|
||||
/// disconnect_event.
|
||||
pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
|
||||
|
@ -213,10 +221,12 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
|
|||
}
|
||||
|
||||
/// Indicates a new inbound connection has been established.
|
||||
///
|
||||
/// May refuse the connection by returning an Err, but will never write bytes to the remote end
|
||||
/// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
|
||||
/// call disconnect_event for the new descriptor but must disconnect the connection
|
||||
/// immediately.
|
||||
///
|
||||
/// Panics if descriptor is duplicative with some other descriptor which has not yet has a
|
||||
/// disconnect_event.
|
||||
pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
|
||||
|
@ -266,12 +276,14 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
|
|||
}
|
||||
|
||||
/// Indicates that there is room to write data to the given socket descriptor.
|
||||
///
|
||||
/// May return an Err to indicate that the connection should be closed.
|
||||
///
|
||||
/// Will most likely call send_data on the descriptor passed in (or the descriptor handed into
|
||||
/// new_*_connection) before returning. Thus, be very careful with reentrancy issues! The
|
||||
/// new_*\_connection) before returning. Thus, be very careful with reentrancy issues! The
|
||||
/// invariants around calling write_event in case a write did not fully complete must still
|
||||
/// hold - be ready to call write_event again if a write call generated here isn't sufficient!
|
||||
/// Panics if the descriptor was not previously registered in a new_*_connection event.
|
||||
/// Panics if the descriptor was not previously registered in a new_\*_connection event.
|
||||
pub fn write_event(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
|
||||
let mut peers = self.peers.lock().unwrap();
|
||||
match peers.peers.get_mut(descriptor) {
|
||||
|
@ -285,16 +297,20 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
|
|||
}
|
||||
|
||||
/// Indicates that data was read from the given socket descriptor.
|
||||
///
|
||||
/// May return an Err to indicate that the connection should be closed.
|
||||
///
|
||||
/// Will very likely call send_data on the descriptor passed in (or a descriptor handed into
|
||||
/// new_*_connection) before returning. Thus, be very careful with reentrancy issues! The
|
||||
/// invariants around calling write_event in case a write did not fully complete must still
|
||||
/// hold. Note that this function will often call send_data on many peers before returning, not
|
||||
/// just this peer!
|
||||
///
|
||||
/// If Ok(true) is returned, further read_events should not be triggered until a write_event on
|
||||
/// this file descriptor has resume_read set (preventing DoS issues in the send buffer). Note
|
||||
/// that this must be true even if a send_data call with resume_read=true was made during the
|
||||
/// course of this function!
|
||||
///
|
||||
/// Panics if the descriptor was not previously registered in a new_*_connection event.
|
||||
pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
|
||||
match self.do_read_event(peer_descriptor, data) {
|
||||
|
@ -935,8 +951,10 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
|
|||
}
|
||||
|
||||
/// Indicates that the given socket descriptor's connection is now closed.
|
||||
///
|
||||
/// This must be called even if a PeerHandleError was given for a read_event or write_event,
|
||||
/// but must NOT be called if a PeerHandleError was provided out of a new_*_connection event!
|
||||
/// but must NOT be called if a PeerHandleError was provided out of a new_\*\_connection event!
|
||||
///
|
||||
/// Panics if the descriptor was not previously registered in a successful new_*_connection event.
|
||||
pub fn disconnect_event(&self, descriptor: &Descriptor) {
|
||||
self.disconnect_event_internal(descriptor, false);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//! The top-level routing/network map tracking logic lives here.
|
||||
//!
|
||||
//! You probably want to create a Router and use that as your RoutingMessageHandler and then
|
||||
//! interrogate it to get routes for your own payments.
|
||||
|
||||
|
@ -523,13 +524,18 @@ impl Router {
|
|||
}
|
||||
|
||||
/// Gets a route from us to the given target node.
|
||||
///
|
||||
/// Extra routing hops between known nodes and the target will be used if they are included in
|
||||
/// last_hops.
|
||||
///
|
||||
/// If some channels aren't announced, it may be useful to fill in a first_hops with the
|
||||
/// results from a local ChannelManager::list_usable_channels() call. If it is filled in, our
|
||||
/// (this Router's) view of our local channels will be ignored, and only those in first_hops
|
||||
/// will be used. Panics if first_hops contains channels without short_channel_ids
|
||||
/// will be used.
|
||||
///
|
||||
/// Panics if first_hops contains channels without short_channel_ids
|
||||
/// (ChannelManager::list_usable_channels will never include such channels).
|
||||
///
|
||||
/// The fees on channels from us to next-hops are ignored (as they are assumed to all be
|
||||
/// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat
|
||||
/// *is* checked as they may change based on the receiving node.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
//! Events are returned from various bits in the library which indicate some action must be taken
|
||||
//! by the client.
|
||||
//!
|
||||
//! Because we don't have a built-in runtime, its up to the client to call events at a time in the
|
||||
//! future, as well as generate and broadcast funding transactions handle payment preimages and a
|
||||
//! few other things.
|
||||
|
@ -82,6 +83,7 @@ pub enum Event {
|
|||
// TODO: Move these into a separate struct and make a top-level enum
|
||||
/// Used to indicate that we've initialted a channel open and should send the open_channel
|
||||
/// message provided to the given peer.
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
SendOpenChannel {
|
||||
/// The node_id of the node which should receive this message
|
||||
|
@ -90,6 +92,7 @@ pub enum Event {
|
|||
msg: msgs::OpenChannel,
|
||||
},
|
||||
/// Used to indicate that a funding_created message should be sent to the peer with the given node_id.
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
SendFundingCreated {
|
||||
/// The node_id of the node which should receive this message
|
||||
|
@ -98,6 +101,7 @@ pub enum Event {
|
|||
msg: msgs::FundingCreated,
|
||||
},
|
||||
/// Used to indicate that a funding_locked message should be sent to the peer with the given node_id.
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
SendFundingLocked {
|
||||
/// The node_id of the node which should receive these message(s)
|
||||
|
@ -109,6 +113,7 @@ pub enum Event {
|
|||
},
|
||||
/// Used to indicate that a series of HTLC update messages, as well as a commitment_signed
|
||||
/// message should be sent to the peer with the given node_id.
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
UpdateHTLCs {
|
||||
/// The node_id of the node which should receive these message(s)
|
||||
|
@ -117,6 +122,7 @@ pub enum Event {
|
|||
updates: msgs::CommitmentUpdate,
|
||||
},
|
||||
/// Used to indicate that a shutdown message should be sent to the peer with the given node_id.
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
SendShutdown {
|
||||
/// The node_id of the node which should receive this message
|
||||
|
@ -126,6 +132,7 @@ pub enum Event {
|
|||
},
|
||||
/// Used to indicate that a channel_announcement and channel_update should be broadcast to all
|
||||
/// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2).
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
BroadcastChannelAnnouncement {
|
||||
/// The channel_announcement which should be sent.
|
||||
|
@ -134,6 +141,7 @@ pub enum Event {
|
|||
update_msg: msgs::ChannelUpdate,
|
||||
},
|
||||
/// Used to indicate that a channel_update should be broadcast to all peers.
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
BroadcastChannelUpdate {
|
||||
/// The channel_update which should be sent.
|
||||
|
@ -142,6 +150,7 @@ pub enum Event {
|
|||
|
||||
//Error handling
|
||||
/// Broadcast an error downstream to be handled
|
||||
///
|
||||
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
|
||||
HandleError {
|
||||
/// The node_id of the node which should receive this message
|
||||
|
|
Loading…
Add table
Reference in a new issue