// This file is Copyright its original authors, visible in version control // history. // // This file is licensed under the Apache License, Version 2.0 or the MIT license // , at your option. // You may not use this file except in accordance with one or both of these // licenses. //! Top level peer message handling and socket handling logic lives here. //! //! Instead of actually servicing sockets ourselves we require that you implement the //! SocketDescriptor interface and use that to receive actions which you should perform on the //! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then //! call into the provided message handlers (probably a ChannelManager and NetGraphmsgHandler) with messages //! they should handle, and encoding/sending response messages. use bitcoin::secp256k1::key::{SecretKey,PublicKey}; use ln::features::InitFeatures; use ln::msgs; use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, RoutingMessageHandler}; use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; use util::ser::{VecWriter, Writeable, Writer}; use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep}; use ln::wire; use ln::wire::Encode; use util::atomic_counter::AtomicCounter; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::logger::Logger; use routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; use prelude::*; use io; use alloc::collections::LinkedList; use sync::{Arc, Mutex, MutexGuard, RwLock}; use core::{cmp, hash, fmt, mem}; use core::ops::Deref; use core::convert::Infallible; #[cfg(feature = "std")] use std::error; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256::HashEngine as Sha256Engine; use bitcoin::hashes::{HashEngine, Hash}; /// Handler for BOLT1-compliant messages. pub trait CustomMessageHandler: wire::CustomMessageReader { /// Called with the message type that was received and the buffer to be read. /// Can return a `MessageHandlingError` if the message could not be handled. fn handle_custom_message(&self, msg: Self::CustomMessage, sender_node_id: &PublicKey) -> Result<(), LightningError>; /// Gets the list of pending messages which were generated by the custom message /// handler, clearing the list in the process. The first tuple element must /// correspond to the intended recipients node ids. If no connection to one of the /// specified node does not exist, the message is simply not sent to it. fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>; } /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. pub struct IgnoringMessageHandler{} impl MessageSendEventsProvider for IgnoringMessageHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { Vec::new() } } impl RoutingMessageHandler for IgnoringMessageHandler { fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result { Ok(false) } fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result { Ok(false) } fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result { Ok(false) } fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option, Option)> { Vec::new() } fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec { Vec::new() } fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {} fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } } impl Deref for IgnoringMessageHandler { type Target = IgnoringMessageHandler; fn deref(&self) -> &Self { self } } // Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a // method that takes self for it. impl wire::Type for Infallible { fn type_id(&self) -> u16 { unreachable!(); } } impl Writeable for Infallible { fn write(&self, _: &mut W) -> Result<(), io::Error> { unreachable!(); } } impl wire::CustomMessageReader for IgnoringMessageHandler { type CustomMessage = Infallible; fn read(&self, _message_type: u16, _buffer: &mut R) -> Result, msgs::DecodeError> { Ok(None) } } impl CustomMessageHandler for IgnoringMessageHandler { fn handle_custom_message(&self, _msg: Infallible, _sender_node_id: &PublicKey) -> Result<(), LightningError> { // Since we always return `None` in the read the handle method should never be called. unreachable!(); } fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() } } /// A dummy struct which implements `ChannelMessageHandler` without having any channels. /// You can provide one of these as the route_handler in a MessageHandler. pub struct ErroringMessageHandler { message_queue: Mutex> } impl ErroringMessageHandler { /// Constructs a new ErroringMessageHandler pub fn new() -> Self { Self { message_queue: Mutex::new(Vec::new()) } } fn push_error(&self, node_id: &PublicKey, channel_id: [u8; 32]) { self.message_queue.lock().unwrap().push(MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: "We do not support channel messages, sorry.".to_owned() }, }, node_id: node_id.clone(), }); } } impl MessageSendEventsProvider for ErroringMessageHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { let mut res = Vec::new(); mem::swap(&mut res, &mut self.message_queue.lock().unwrap()); res } } impl ChannelMessageHandler for ErroringMessageHandler { // Any messages which are related to a specific channel generate an error message to let the // peer know we don't care about channels. fn handle_open_channel(&self, their_node_id: &PublicKey, _their_features: InitFeatures, msg: &msgs::OpenChannel) { ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); } fn handle_accept_channel(&self, their_node_id: &PublicKey, _their_features: InitFeatures, msg: &msgs::AcceptChannel) { ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) { ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); } fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_shutdown(&self, their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them. fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {} fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {} fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {} } impl Deref for ErroringMessageHandler { type Target = ErroringMessageHandler; fn deref(&self) -> &Self { self } } /// Provides references to trait impls which handle different types of messages. pub struct MessageHandler where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler { /// A message handler which handles messages specific to channels. Usually this is just a /// [`ChannelManager`] object or an [`ErroringMessageHandler`]. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager pub chan_handler: CM, /// A message handler which handles messages updating our knowledge of the network channel /// graph. Usually this is just a [`NetGraphMsgHandler`] object or an /// [`IgnoringMessageHandler`]. /// /// [`NetGraphMsgHandler`]: crate::routing::network_graph::NetGraphMsgHandler pub route_handler: RM, } /// Provides an object which can be used to send data to and which uniquely identifies a connection /// to a remote host. You will need to be able to generate multiple of these which meet Eq and /// implement Hash to meet the PeerManager API. /// /// For efficiency, Clone should be relatively cheap for this type. /// /// Two descriptors may compare equal (by [`cmp::Eq`] and [`hash::Hash`]) as long as the original /// has been disconnected, the [`PeerManager`] has been informed of the disconnection (either by it /// having triggered the disconnection or a call to [`PeerManager::socket_disconnected`]), and no /// further calls to the [`PeerManager`] related to the original socket occur. This allows you to /// use a file descriptor for your SocketDescriptor directly, however for simplicity you may wish /// to simply use another value which is guaranteed to be globally unique instead. pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone { /// Attempts to send some data from the given slice to the peer. /// /// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected. /// Note that in the disconnected case, [`PeerManager::socket_disconnected`] must still be /// called and further write attempts may occur until that time. /// /// If the returned size is smaller than `data.len()`, a /// [`PeerManager::write_buffer_space_avail`] call must be made the next time more data can be /// written. Additionally, until a `send_data` event completes fully, no further /// [`PeerManager::read_event`] calls should be made for the same peer! Because this is to /// prevent denial-of-service issues, you should not read or buffer any data from the socket /// until then. /// /// If a [`PeerManager::read_event`] call on this descriptor had previously returned true /// (indicating that read events should be paused to prevent DoS in the send buffer), /// `resume_read` may be set indicating that read events on this descriptor should resume. A /// `resume_read` of false carries no meaning, and should not cause any action. fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize; /// Disconnect the socket pointed to by this SocketDescriptor. /// /// You do *not* need to call [`PeerManager::socket_disconnected`] with this socket after this /// call (doing so is a noop). fn disconnect_socket(&mut self); } /// Error for PeerManager errors. If you get one of these, you must disconnect the socket and /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the /// descriptor. #[derive(Clone)] pub struct PeerHandleError { /// Used to indicate that we probably can't make any future connections to this peer, implying /// we should go ahead and force-close any channels we have with it. pub no_connection_possible: bool, } impl fmt::Debug for PeerHandleError { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter.write_str("Peer Sent Invalid Data") } } impl fmt::Display for PeerHandleError { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter.write_str("Peer Sent Invalid Data") } } #[cfg(feature = "std")] impl error::Error for PeerHandleError { fn description(&self) -> &str { "Peer Sent Invalid Data" } } enum InitSyncTracker{ NoSyncRequested, ChannelsSyncing(u64), NodesSyncing(PublicKey), } /// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop /// forwarding gossip messages to peers altogether. const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2; /// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until /// we have fewer than this many messages in the outbound buffer again. /// We also use this as the target number of outbound gossip messages to keep in the write buffer, /// refilled as we send bytes. const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10; /// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to /// the peer. const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO; /// If we've sent a ping, and are still awaiting a response, we may need to churn our way through /// the socket receive buffer before receiving the ping. /// /// On a fairly old Arm64 board, with Linux defaults, this can take as long as 20 seconds, not /// including any network delays, outbound traffic, or the same for messages from other peers. /// /// Thus, to avoid needlessly disconnecting a peer, we allow a peer to take this many timer ticks /// per connected peer to respond to a ping, as long as they send us at least one message during /// each tick, ensuring we aren't actually just disconnected. /// With a timer tick interval of ten seconds, this translates to about 40 seconds per connected /// peer. /// /// When we improve parallelism somewhat we should reduce this to e.g. this many timer ticks per /// two connected peers, assuming most LDK-running systems have at least two cores. const MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER: i8 = 4; /// This is the minimum number of messages we expect a peer to be able to handle within one timer /// tick. Once we have sent this many messages since the last ping, we send a ping right away to /// ensures we don't just fill up our send buffer and leave the peer with too many messages to /// process before the next ping. const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32; struct Peer { channel_encryptor: PeerChannelEncryptor, their_node_id: Option, their_features: Option, their_net_address: Option, pending_outbound_buffer: LinkedList>, pending_outbound_buffer_first_msg_offset: usize, awaiting_write_event: bool, pending_read_buffer: Vec, pending_read_buffer_pos: usize, pending_read_is_header: bool, sync_status: InitSyncTracker, msgs_sent_since_pong: usize, awaiting_pong_timer_tick_intervals: i8, received_message_since_timer_tick: bool, sent_gossip_timestamp_filter: bool, } impl Peer { /// Returns true if the channel announcements/updates for the given channel should be /// forwarded to this peer. /// If we are sending our routing table to this peer and we have not yet sent channel /// announcements/updates for the given channel_id then we will send it when we get to that /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already /// sent the old versions, we should send the update, and so return true here. fn should_forward_channel_announcement(&self, channel_id: u64) -> bool { if self.their_features.as_ref().unwrap().supports_gossip_queries() && !self.sent_gossip_timestamp_filter { return false; } match self.sync_status { InitSyncTracker::NoSyncRequested => true, InitSyncTracker::ChannelsSyncing(i) => i < channel_id, InitSyncTracker::NodesSyncing(_) => true, } } /// Similar to the above, but for node announcements indexed by node_id. fn should_forward_node_announcement(&self, node_id: PublicKey) -> bool { if self.their_features.as_ref().unwrap().supports_gossip_queries() && !self.sent_gossip_timestamp_filter { return false; } match self.sync_status { InitSyncTracker::NoSyncRequested => true, InitSyncTracker::ChannelsSyncing(_) => false, InitSyncTracker::NodesSyncing(pk) => pk < node_id, } } } struct PeerHolder { /// Peer is under its own mutex for sending and receiving bytes, but note that we do *not* hold /// this mutex while we're processing a message. This is fine as [`PeerManager::read_event`] /// requires that there be no parallel calls for a given peer, so mutual exclusion of messages /// handed to the `MessageHandler`s for a given peer is already guaranteed. peers: HashMap>, } /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents /// issues such as overly long function definitions. /// /// (C-not exported) as Arcs don't make sense in bindings pub type SimpleArcPeerManager = PeerManager>, Arc, Arc, Arc>>, Arc, Arc>; /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference /// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't /// need a PeerManager with a static lifetime. You'll need a static lifetime in cases such as /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes). /// But if this is not necessary, using a reference is more efficient. Defining these type aliases /// helps with issues such as long function definitions. /// /// (C-not exported) as Arcs don't make sense in bindings pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager, &'e NetGraphMsgHandler<&'g NetworkGraph, &'h C, &'f L>, &'f L, IgnoringMessageHandler>; /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls /// socket events into messages which it passes on to its [`MessageHandler`]. /// /// Locks are taken internally, so you must never assume that reentrancy from a /// [`SocketDescriptor`] call back into [`PeerManager`] methods will not deadlock. /// /// Calls to [`read_event`] will decode relevant messages and pass them to the /// [`ChannelMessageHandler`], likely doing message processing in-line. Thus, the primary form of /// parallelism in Rust-Lightning is in calls to [`read_event`]. Note, however, that calls to any /// [`PeerManager`] functions related to the same connection must occur only in serial, making new /// calls only after previous ones have returned. /// /// Rather than using a plain PeerManager, it is preferable to use either a SimpleArcPeerManager /// a SimpleRefPeerManager, for conciseness. See their documentation for more details, but /// essentially you should default to using a SimpleRefPeerManager, and use a /// SimpleArcPeerManager when you require a PeerManager with a static lifetime, such as when /// you're using lightning-net-tokio. /// /// [`read_event`]: PeerManager::read_event pub struct PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler { message_handler: MessageHandler, peers: RwLock>, /// Only add to this set when noise completes. /// Locked *after* peers. When an item is removed, it must be removed with the `peers` write /// lock held. Entries may be added with only the `peers` read lock held (though the /// `Descriptor` value must already exist in `peers`). node_id_to_descriptor: Mutex>, /// We can only have one thread processing events at once, but we don't usually need the full /// `peers` write lock to do so, so instead we block on this empty mutex when entering /// `process_events`. event_processing_lock: Mutex<()>, our_node_secret: SecretKey, ephemeral_key_midstate: Sha256Engine, custom_message_handler: CMH, peer_counter: AtomicCounter, logger: L, } enum MessageHandlingError { PeerHandleError(PeerHandleError), LightningError(LightningError), } impl From for MessageHandlingError { fn from(error: PeerHandleError) -> Self { MessageHandlingError::PeerHandleError(error) } } impl From for MessageHandlingError { fn from(error: LightningError) -> Self { MessageHandlingError::LightningError(error) } } macro_rules! encode_msg { ($msg: expr) => {{ let mut buffer = VecWriter(Vec::new()); wire::write($msg, &mut buffer).unwrap(); buffer.0 }} } impl PeerManager where CM::Target: ChannelMessageHandler, L::Target: Logger { /// Constructs a new PeerManager with the given ChannelMessageHandler. No routing message /// handler is used and network graph messages are ignored. /// /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be /// cryptographically secure random bytes. /// /// (C-not exported) as we can't export a PeerManager with a dummy route handler pub fn new_channel_only(channel_message_handler: CM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self { Self::new(MessageHandler { chan_handler: channel_message_handler, route_handler: IgnoringMessageHandler{}, }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{}) } } impl PeerManager where RM::Target: RoutingMessageHandler, L::Target: Logger { /// Constructs a new PeerManager with the given RoutingMessageHandler. No channel message /// handler is used and messages related to channels will be ignored (or generate error /// messages). Note that some other lightning implementations time-out connections after some /// time if no channel is built with the peer. /// /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be /// cryptographically secure random bytes. /// /// (C-not exported) as we can't export a PeerManager with a dummy channel handler pub fn new_routing_only(routing_message_handler: RM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self { Self::new(MessageHandler { chan_handler: ErroringMessageHandler::new(), route_handler: routing_message_handler, }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{}) } } /// A simple wrapper that optionally prints " from " for an optional pubkey. /// This works around `format!()` taking a reference to each argument, preventing /// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling /// due to lifetime errors. struct OptionalFromDebugger<'a>(&'a Option); impl core::fmt::Display for OptionalFromDebugger<'_> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { if let Some(node_id) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) } } } /// A function used to filter out local or private addresses /// https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml /// https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml fn filter_addresses(ip_address: Option) -> Option { match ip_address{ // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8) Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None, // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8) Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None, // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10) Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None, // For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8) Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None, // For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16) Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None, // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12) Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None, // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16) Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None, // For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24) Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None, // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3) Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address, // For remaining addresses Some(NetAddress::IPv6{addr: _, port: _}) => None, Some(..) => ip_address, None => None, } } impl PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler { /// Constructs a new PeerManager with the given message handlers and node_id secret key /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be /// cryptographically secure random bytes. pub fn new(message_handler: MessageHandler, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self { let mut ephemeral_key_midstate = Sha256::engine(); ephemeral_key_midstate.input(ephemeral_random_data); PeerManager { message_handler, peers: RwLock::new(PeerHolder { peers: HashMap::new(), }), node_id_to_descriptor: Mutex::new(HashMap::new()), event_processing_lock: Mutex::new(()), our_node_secret, ephemeral_key_midstate, peer_counter: AtomicCounter::new(), logger, custom_message_handler, } } /// Get the list of node ids for peers which have completed the initial handshake. /// /// For outbound connections, this will be the same as the their_node_id parameter passed in to /// new_outbound_connection, however entries will only appear once the initial handshake has /// completed and we are sure the remote peer has the private key for the given node_id. pub fn get_peer_node_ids(&self) -> Vec { let peers = self.peers.read().unwrap(); peers.peers.values().filter_map(|peer_mutex| { let p = peer_mutex.lock().unwrap(); if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() { return None; } p.their_node_id }).collect() } fn get_ephemeral_key(&self) -> SecretKey { let mut ephemeral_hash = self.ephemeral_key_midstate.clone(); let counter = self.peer_counter.get_increment(); ephemeral_hash.input(&counter.to_le_bytes()); SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!") } /// Indicates a new outbound connection has been established to a node with the given node_id /// and an optional remote network address. /// /// The remote network address adds the option to report a remote IP address back to a connecting /// peer using the init message. /// The user should pass the remote network address of the host they are connected to. /// /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new /// descriptor but must disconnect the connection immediately. /// /// Returns a small number of bytes to send to the remote node (currently always 50). /// /// Panics if descriptor is duplicative with some other descriptor which has not yet been /// [`socket_disconnected()`]. /// /// [`socket_disconnected()`]: PeerManager::socket_disconnected pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option) -> Result, PeerHandleError> { let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key()); let res = peer_encryptor.get_act_one().to_vec(); let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes let mut peers = self.peers.write().unwrap(); if peers.peers.insert(descriptor, Mutex::new(Peer { channel_encryptor: peer_encryptor, their_node_id: None, their_features: None, their_net_address: remote_network_address, pending_outbound_buffer: LinkedList::new(), pending_outbound_buffer_first_msg_offset: 0, awaiting_write_event: false, pending_read_buffer, pending_read_buffer_pos: 0, pending_read_is_header: false, sync_status: InitSyncTracker::NoSyncRequested, msgs_sent_since_pong: 0, awaiting_pong_timer_tick_intervals: 0, received_message_since_timer_tick: false, sent_gossip_timestamp_filter: false, })).is_some() { panic!("PeerManager driver duplicated descriptors!"); }; Ok(res) } /// Indicates a new inbound connection has been established to a node with an optional remote /// network address. /// /// The remote network address adds the option to report a remote IP address back to a connecting /// peer using the init message. /// The user should pass the remote network address of the host they are connected to. /// /// May refuse the connection by returning an Err, but will never write bytes to the remote end /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT /// call socket_disconnected for the new descriptor but must disconnect the connection /// immediately. /// /// Panics if descriptor is duplicative with some other descriptor which has not yet been /// [`socket_disconnected()`]. /// /// [`socket_disconnected()`]: PeerManager::socket_disconnected pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option) -> Result<(), PeerHandleError> { let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret); let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes let mut peers = self.peers.write().unwrap(); if peers.peers.insert(descriptor, Mutex::new(Peer { channel_encryptor: peer_encryptor, their_node_id: None, their_features: None, their_net_address: remote_network_address, pending_outbound_buffer: LinkedList::new(), pending_outbound_buffer_first_msg_offset: 0, awaiting_write_event: false, pending_read_buffer, pending_read_buffer_pos: 0, pending_read_is_header: false, sync_status: InitSyncTracker::NoSyncRequested, msgs_sent_since_pong: 0, awaiting_pong_timer_tick_intervals: 0, received_message_since_timer_tick: false, sent_gossip_timestamp_filter: false, })).is_some() { panic!("PeerManager driver duplicated descriptors!"); }; Ok(()) } fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) { while !peer.awaiting_write_event { if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE && peer.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK { match peer.sync_status { InitSyncTracker::NoSyncRequested => {}, InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => { let steps = ((OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len() + 2) / 3) as u8; let all_messages = self.message_handler.route_handler.get_next_channel_announcements(c, steps); for &(ref announce, ref update_a_option, ref update_b_option) in all_messages.iter() { self.enqueue_message(peer, announce); if let &Some(ref update_a) = update_a_option { self.enqueue_message(peer, update_a); } if let &Some(ref update_b) = update_b_option { self.enqueue_message(peer, update_b); } peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1); } if all_messages.is_empty() || all_messages.len() != steps as usize { peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff); } }, InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => { let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8; let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps); for msg in all_messages.iter() { self.enqueue_message(peer, msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); } if all_messages.is_empty() || all_messages.len() != steps as usize { peer.sync_status = InitSyncTracker::NoSyncRequested; } }, InitSyncTracker::ChannelsSyncing(_) => unreachable!(), InitSyncTracker::NodesSyncing(key) => { let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8; let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps); for msg in all_messages.iter() { self.enqueue_message(peer, msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); } if all_messages.is_empty() || all_messages.len() != steps as usize { peer.sync_status = InitSyncTracker::NoSyncRequested; } }, } } if peer.msgs_sent_since_pong >= BUFFER_DRAIN_MSGS_PER_TICK { self.maybe_send_extra_ping(peer); } if { let next_buff = match peer.pending_outbound_buffer.front() { None => return, Some(buff) => buff, }; let should_be_reading = peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE; let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..]; let data_sent = descriptor.send_data(pending, should_be_reading); peer.pending_outbound_buffer_first_msg_offset += data_sent; if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false } } { peer.pending_outbound_buffer_first_msg_offset = 0; peer.pending_outbound_buffer.pop_front(); } else { peer.awaiting_write_event = true; } } } /// Indicates that there is room to write data to the given socket descriptor. /// /// May return an Err to indicate that the connection should be closed. /// /// May call [`send_data`] on the descriptor passed in (or an equal descriptor) before /// returning. Thus, be very careful with reentrancy issues! The invariants around calling /// [`write_buffer_space_avail`] in case a write did not fully complete must still hold - be /// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't /// sufficient! /// /// [`send_data`]: SocketDescriptor::send_data /// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> { let peers = self.peers.read().unwrap(); match peers.peers.get(descriptor) { None => { // This is most likely a simple race condition where the user found that the socket // was writeable, then we told the user to `disconnect_socket()`, then they called // this method. Return an error to make sure we get disconnected. return Err(PeerHandleError { no_connection_possible: false }); }, Some(peer_mutex) => { let mut peer = peer_mutex.lock().unwrap(); peer.awaiting_write_event = false; self.do_attempt_write_data(descriptor, &mut peer); } }; Ok(()) } /// Indicates that data was read from the given socket descriptor. /// /// May return an Err to indicate that the connection should be closed. /// /// Will *not* call back into [`send_data`] on any descriptors to avoid reentrancy complexity. /// Thus, however, you should call [`process_events`] after any `read_event` to generate /// [`send_data`] calls to handle responses. /// /// If `Ok(true)` is returned, further read_events should not be triggered until a /// [`send_data`] call on this descriptor has `resume_read` set (preventing DoS issues in the /// send buffer). /// /// [`send_data`]: SocketDescriptor::send_data /// [`process_events`]: PeerManager::process_events pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result { match self.do_read_event(peer_descriptor, data) { Ok(res) => Ok(res), Err(e) => { log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error"); self.disconnect_event_internal(peer_descriptor, e.no_connection_possible); Err(e) } } } /// Append a message to a peer's pending outbound/write buffer fn enqueue_encoded_message(&self, peer: &mut Peer, encoded_message: &Vec) { peer.msgs_sent_since_pong += 1; peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..])); } /// Append a message to a peer's pending outbound/write buffer fn enqueue_message(&self, peer: &mut Peer, message: &M) { let mut buffer = VecWriter(Vec::with_capacity(2048)); wire::write(message, &mut buffer).unwrap(); // crash if the write failed if is_gossip_msg(message.type_id()) { log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); } else { log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())) } self.enqueue_encoded_message(peer, &buffer.0); } fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result { let mut pause_read = false; let peers = self.peers.read().unwrap(); let mut msgs_to_forward = Vec::new(); let mut peer_node_id = None; match peers.peers.get(peer_descriptor) { None => { // This is most likely a simple race condition where the user read some bytes // from the socket, then we told the user to `disconnect_socket()`, then they // called this method. Return an error to make sure we get disconnected. return Err(PeerHandleError { no_connection_possible: false }); }, Some(peer_mutex) => { let mut read_pos = 0; while read_pos < data.len() { macro_rules! try_potential_handleerror { ($peer: expr, $thing: expr) => { match $thing { Ok(x) => x, Err(e) => { match e.action { msgs::ErrorAction::DisconnectPeer { msg: _ } => { //TODO: Try to push msg log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err); return Err(PeerHandleError{ no_connection_possible: false }); }, msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); continue }, msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these msgs::ErrorAction::IgnoreError => { log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); self.enqueue_message($peer, &msg); continue; }, msgs::ErrorAction::SendWarningMessage { msg, log_level } => { log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); self.enqueue_message($peer, &msg); continue; }, } } } } } let mut peer_lock = peer_mutex.lock().unwrap(); let peer = &mut *peer_lock; let mut msg_to_handle = None; if peer_node_id.is_none() { peer_node_id = peer.their_node_id.clone(); } assert!(peer.pending_read_buffer.len() > 0); assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos); { let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos); peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]); read_pos += data_to_copy; peer.pending_read_buffer_pos += data_to_copy; } if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() { peer.pending_read_buffer_pos = 0; macro_rules! insert_node_id { () => { match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap()) { hash_map::Entry::Occupied(_) => { log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap())); peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event return Err(PeerHandleError{ no_connection_possible: false }) }, hash_map::Entry::Vacant(entry) => { log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap())); entry.insert(peer_descriptor.clone()) }, }; } } let next_step = peer.channel_encryptor.get_noise_step(); match next_step { NextNoiseStep::ActOne => { let act_two = try_potential_handleerror!(peer, peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], &self.our_node_secret, self.get_ephemeral_key())).to_vec(); peer.pending_outbound_buffer.push_back(act_two); peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long }, NextNoiseStep::ActTwo => { let (act_three, their_node_id) = try_potential_handleerror!(peer, peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], &self.our_node_secret)); peer.pending_outbound_buffer.push_back(act_three.to_vec()); peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes peer.pending_read_is_header = true; peer.their_node_id = Some(their_node_id); insert_node_id!(); let features = InitFeatures::known(); let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) }; self.enqueue_message(peer, &resp); peer.awaiting_pong_timer_tick_intervals = 0; }, NextNoiseStep::ActThree => { let their_node_id = try_potential_handleerror!(peer, peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..])); peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes peer.pending_read_is_header = true; peer.their_node_id = Some(their_node_id); insert_node_id!(); let features = InitFeatures::known(); let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) }; self.enqueue_message(peer, &resp); peer.awaiting_pong_timer_tick_intervals = 0; }, NextNoiseStep::NoiseComplete => { if peer.pending_read_is_header { let msg_len = try_potential_handleerror!(peer, peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..])); peer.pending_read_buffer = Vec::with_capacity(msg_len as usize + 16); peer.pending_read_buffer.resize(msg_len as usize + 16, 0); if msg_len < 2 { // Need at least the message type tag return Err(PeerHandleError{ no_connection_possible: false }); } peer.pending_read_is_header = false; } else { let msg_data = try_potential_handleerror!(peer, peer.channel_encryptor.decrypt_message(&peer.pending_read_buffer[..])); assert!(msg_data.len() >= 2); // Reset read buffer peer.pending_read_buffer = [0; 18].to_vec(); peer.pending_read_is_header = true; let mut reader = io::Cursor::new(&msg_data[..]); let message_result = wire::read(&mut reader, &*self.custom_message_handler); let message = match message_result { Ok(x) => x, Err(e) => { match e { // Note that to avoid recursion we never call // `do_attempt_write_data` from here, causing // the messages enqueued here to not actually // be sent before the peer is disconnected. (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => { log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); continue; } (msgs::DecodeError::UnsupportedCompression, _) => { log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() }); continue; } (_, Some(ty)) if is_gossip_msg(ty) => { log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message"); self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unreadable/bogus gossip message".to_owned() }); continue; } (msgs::DecodeError::UnknownRequiredFeature, ty) => { log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) }); return Err(PeerHandleError { no_connection_possible: false }); } (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }), (msgs::DecodeError::InvalidValue, _) => { log_debug!(self.logger, "Got an invalid value while deserializing message"); return Err(PeerHandleError { no_connection_possible: false }); } (msgs::DecodeError::ShortRead, _) => { log_debug!(self.logger, "Deserialization failed due to shortness of message"); return Err(PeerHandleError { no_connection_possible: false }); } (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }), (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }), } } }; msg_to_handle = Some(message); } } } } pause_read = peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE; if let Some(message) = msg_to_handle { match self.handle_message(&peer_mutex, peer_lock, message) { Err(handling_error) => match handling_error { MessageHandlingError::PeerHandleError(e) => { return Err(e) }, MessageHandlingError::LightningError(e) => { try_potential_handleerror!(&mut peer_mutex.lock().unwrap(), Err(e)); }, }, Ok(Some(msg)) => { msgs_to_forward.push(msg); }, Ok(None) => {}, } } } } } for msg in msgs_to_forward.drain(..) { self.forward_broadcast_msg(&*peers, &msg, peer_node_id.as_ref()); } Ok(pause_read) } /// Process an incoming message and return a decision (ok, lightning error, peer handling error) regarding the next action with the peer /// Returns the message back if it needs to be broadcasted to all other peers. fn handle_message( &self, peer_mutex: &Mutex, mut peer_lock: MutexGuard, message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages"); peer_lock.received_message_since_timer_tick = true; // Need an Init as first message if let wire::Message::Init(msg) = message { if msg.features.requires_unknown_bits() { log_debug!(self.logger, "Peer features required unknown version bits"); return Err(PeerHandleError{ no_connection_possible: true }.into()); } if peer_lock.their_features.is_some() { return Err(PeerHandleError{ no_connection_possible: false }.into()); } log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features); // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter. if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() { peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0); } if !msg.features.supports_static_remote_key() { log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(their_node_id)); return Err(PeerHandleError{ no_connection_possible: true }.into()); } self.message_handler.route_handler.peer_connected(&their_node_id, &msg); self.message_handler.chan_handler.peer_connected(&their_node_id, &msg); peer_lock.their_features = Some(msg.features); return Ok(None); } else if peer_lock.their_features.is_none() { log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id)); return Err(PeerHandleError{ no_connection_possible: false }.into()); } if let wire::Message::GossipTimestampFilter(_msg) = message { // When supporting gossip messages, start inital gossip sync only after we receive // a GossipTimestampFilter if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() && !peer_lock.sent_gossip_timestamp_filter { peer_lock.sent_gossip_timestamp_filter = true; peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0); } return Ok(None); } let their_features = peer_lock.their_features.clone(); mem::drop(peer_lock); if is_gossip_msg(message.type_id()) { log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id)); } else { log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id)); } let mut should_forward = None; match message { // Setup and Control messages: wire::Message::Init(_) => { // Handled above }, wire::Message::GossipTimestampFilter(_) => { // Handled above }, wire::Message::Error(msg) => { let mut data_is_printable = true; for b in msg.data.bytes() { if b < 32 || b > 126 { data_is_printable = false; break; } } if data_is_printable { log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), msg.data); } else { log_debug!(self.logger, "Got Err message from {} with non-ASCII error message", log_pubkey!(their_node_id)); } self.message_handler.chan_handler.handle_error(&their_node_id, &msg); if msg.channel_id == [0; 32] { return Err(PeerHandleError{ no_connection_possible: true }.into()); } }, wire::Message::Warning(msg) => { let mut data_is_printable = true; for b in msg.data.bytes() { if b < 32 || b > 126 { data_is_printable = false; break; } } if data_is_printable { log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), msg.data); } else { log_debug!(self.logger, "Got warning message from {} with non-ASCII error message", log_pubkey!(their_node_id)); } }, wire::Message::Ping(msg) => { if msg.ponglen < 65532 { let resp = msgs::Pong { byteslen: msg.ponglen }; self.enqueue_message(&mut *peer_mutex.lock().unwrap(), &resp); } }, wire::Message::Pong(_msg) => { let mut peer_lock = peer_mutex.lock().unwrap(); peer_lock.awaiting_pong_timer_tick_intervals = 0; peer_lock.msgs_sent_since_pong = 0; }, // Channel messages: wire::Message::OpenChannel(msg) => { self.message_handler.chan_handler.handle_open_channel(&their_node_id, their_features.clone().unwrap(), &msg); }, wire::Message::AcceptChannel(msg) => { self.message_handler.chan_handler.handle_accept_channel(&their_node_id, their_features.clone().unwrap(), &msg); }, wire::Message::FundingCreated(msg) => { self.message_handler.chan_handler.handle_funding_created(&their_node_id, &msg); }, wire::Message::FundingSigned(msg) => { self.message_handler.chan_handler.handle_funding_signed(&their_node_id, &msg); }, wire::Message::FundingLocked(msg) => { self.message_handler.chan_handler.handle_funding_locked(&their_node_id, &msg); }, wire::Message::Shutdown(msg) => { self.message_handler.chan_handler.handle_shutdown(&their_node_id, their_features.as_ref().unwrap(), &msg); }, wire::Message::ClosingSigned(msg) => { self.message_handler.chan_handler.handle_closing_signed(&their_node_id, &msg); }, // Commitment messages: wire::Message::UpdateAddHTLC(msg) => { self.message_handler.chan_handler.handle_update_add_htlc(&their_node_id, &msg); }, wire::Message::UpdateFulfillHTLC(msg) => { self.message_handler.chan_handler.handle_update_fulfill_htlc(&their_node_id, &msg); }, wire::Message::UpdateFailHTLC(msg) => { self.message_handler.chan_handler.handle_update_fail_htlc(&their_node_id, &msg); }, wire::Message::UpdateFailMalformedHTLC(msg) => { self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&their_node_id, &msg); }, wire::Message::CommitmentSigned(msg) => { self.message_handler.chan_handler.handle_commitment_signed(&their_node_id, &msg); }, wire::Message::RevokeAndACK(msg) => { self.message_handler.chan_handler.handle_revoke_and_ack(&their_node_id, &msg); }, wire::Message::UpdateFee(msg) => { self.message_handler.chan_handler.handle_update_fee(&their_node_id, &msg); }, wire::Message::ChannelReestablish(msg) => { self.message_handler.chan_handler.handle_channel_reestablish(&their_node_id, &msg); }, // Routing messages: wire::Message::AnnouncementSignatures(msg) => { self.message_handler.chan_handler.handle_announcement_signatures(&their_node_id, &msg); }, wire::Message::ChannelAnnouncement(msg) => { if self.message_handler.route_handler.handle_channel_announcement(&msg) .map_err(|e| -> MessageHandlingError { e.into() })? { should_forward = Some(wire::Message::ChannelAnnouncement(msg)); } }, wire::Message::NodeAnnouncement(msg) => { if self.message_handler.route_handler.handle_node_announcement(&msg) .map_err(|e| -> MessageHandlingError { e.into() })? { should_forward = Some(wire::Message::NodeAnnouncement(msg)); } }, wire::Message::ChannelUpdate(msg) => { self.message_handler.chan_handler.handle_channel_update(&their_node_id, &msg); if self.message_handler.route_handler.handle_channel_update(&msg) .map_err(|e| -> MessageHandlingError { e.into() })? { should_forward = Some(wire::Message::ChannelUpdate(msg)); } }, wire::Message::QueryShortChannelIds(msg) => { self.message_handler.route_handler.handle_query_short_channel_ids(&their_node_id, msg)?; }, wire::Message::ReplyShortChannelIdsEnd(msg) => { self.message_handler.route_handler.handle_reply_short_channel_ids_end(&their_node_id, msg)?; }, wire::Message::QueryChannelRange(msg) => { self.message_handler.route_handler.handle_query_channel_range(&their_node_id, msg)?; }, wire::Message::ReplyChannelRange(msg) => { self.message_handler.route_handler.handle_reply_channel_range(&their_node_id, msg)?; }, // Unknown messages: wire::Message::Unknown(type_id) if message.is_even() => { log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id); // Fail the channel if message is an even, unknown type as per BOLT #1. return Err(PeerHandleError{ no_connection_possible: true }.into()); }, wire::Message::Unknown(type_id) => { log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id); }, wire::Message::Custom(custom) => { self.custom_message_handler.handle_custom_message(custom, &their_node_id)?; }, }; Ok(should_forward) } fn forward_broadcast_msg(&self, peers: &PeerHolder, msg: &wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) { match msg { wire::Message::ChannelAnnouncement(ref msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer_mutex) in peers.peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() || !peer.should_forward_channel_announcement(msg.contents.short_channel_id) { continue } if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO { log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) || peer.their_node_id.as_ref() == Some(&msg.contents.node_id_2) { continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } self.enqueue_encoded_message(&mut *peer, &encoded_msg); } }, wire::Message::NodeAnnouncement(ref msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer_mutex) in peers.peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() || !peer.should_forward_node_announcement(msg.contents.node_id) { continue } if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO { log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) { continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } self.enqueue_encoded_message(&mut *peer, &encoded_msg); } }, wire::Message::ChannelUpdate(ref msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer_mutex) in peers.peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() || !peer.should_forward_channel_announcement(msg.contents.short_channel_id) { continue } if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO { log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { continue; } self.enqueue_encoded_message(&mut *peer, &encoded_msg); } }, _ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"), } } /// Checks for any events generated by our handlers and processes them. Includes sending most /// response messages as well as messages generated by calls to handler functions directly (eg /// functions like [`ChannelManager::process_pending_htlc_forwards`] or [`send_payment`]). /// /// May call [`send_data`] on [`SocketDescriptor`]s. Thus, be very careful with reentrancy /// issues! /// /// You don't have to call this function explicitly if you are using [`lightning-net-tokio`] /// or one of the other clients provided in our language bindings. /// /// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards /// [`send_data`]: SocketDescriptor::send_data pub fn process_events(&self) { let _single_processor_lock = self.event_processing_lock.lock().unwrap(); let mut peers_to_disconnect = HashMap::new(); let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events(); events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events()); { // TODO: There are some DoS attacks here where you can flood someone's outbound send // buffer by doing things like announcing channels on another node. We should be willing to // drop optional-ish messages when send buffers get full! let peers_lock = self.peers.read().unwrap(); let peers = &*peers_lock; macro_rules! get_peer_for_forwarding { ($node_id: expr) => { { if peers_to_disconnect.get($node_id).is_some() { // If we've "disconnected" this peer, do not send to it. continue; } let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned(); match descriptor_opt { Some(descriptor) => match peers.peers.get(&descriptor) { Some(peer_mutex) => { let peer_lock = peer_mutex.lock().unwrap(); if peer_lock.their_features.is_none() { continue; } peer_lock }, None => { debug_assert!(false, "Inconsistent peers set state!"); continue; } }, None => { continue; }, } } } } for event in events_generated.drain(..) { match event { MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id), log_funding_channel_id!(msg.funding_txid, msg.funding_output_index)); // TODO: If the peer is gone we should generate a DiscardFunding event // indicating to the wallet that they should just throw away this funding transaction self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}", log_pubkey!(node_id), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), log_bytes!(commitment_signed.channel_id)); let mut peer = get_peer_for_forwarding!(node_id); for msg in update_add_htlcs { self.enqueue_message(&mut *peer, msg); } for msg in update_fulfill_htlcs { self.enqueue_message(&mut *peer, msg); } for msg in update_fail_htlcs { self.enqueue_message(&mut *peer, msg); } for msg in update_fail_malformed_htlcs { self.enqueue_message(&mut *peer, msg); } if let &Some(ref msg) = update_fee { self.enqueue_message(&mut *peer, msg); } self.enqueue_message(&mut *peer, commitment_signed); }, MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendShutdown { ref node_id, ref msg } => { log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => { log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); match self.message_handler.route_handler.handle_channel_announcement(&msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None), _ => {}, } match self.message_handler.route_handler.handle_channel_update(&update_msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None), _ => {}, } }, MessageSendEvent::BroadcastNodeAnnouncement { msg } => { log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler"); match self.message_handler.route_handler.handle_node_announcement(&msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None), _ => {}, } }, MessageSendEvent::BroadcastChannelUpdate { msg } => { log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); match self.message_handler.route_handler.handle_channel_update(&msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None), _ => {}, } }, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}", log_pubkey!(node_id), msg.contents.short_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::HandleError { ref node_id, ref action } => { match *action { msgs::ErrorAction::DisconnectPeer { ref msg } => { // We do not have the peers write lock, so we just store that we're // about to disconenct the peer and do it after we finish // processing most messages. peers_to_disconnect.insert(*node_id, msg.clone()); }, msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); }, msgs::ErrorAction::IgnoreDuplicateGossip => {}, msgs::ErrorAction::IgnoreError => { log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); }, msgs::ErrorAction::SendErrorMessage { ref msg } => { log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), msg.data); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => { log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), msg.data); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, } }, MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => { self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", log_pubkey!(node_id), msg.short_channel_ids.len(), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => { self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } } } for (node_id, msg) in self.custom_message_handler.get_and_clear_pending_msg() { if peers_to_disconnect.get(&node_id).is_some() { continue; } self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg); } for (descriptor, peer_mutex) in peers.peers.iter() { self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap()); } } if !peers_to_disconnect.is_empty() { let mut peers_lock = self.peers.write().unwrap(); let peers = &mut *peers_lock; for (node_id, msg) in peers_to_disconnect.drain() { // Note that since we are holding the peers *write* lock we can // remove from node_id_to_descriptor immediately (as no other // thread can be holding the peer lock if we have the global write // lock). if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) { if let Some(peer_mutex) = peers.peers.remove(&descriptor) { if let Some(msg) = msg { log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), msg.data); let mut peer = peer_mutex.lock().unwrap(); self.enqueue_message(&mut *peer, &msg); // This isn't guaranteed to work, but if there is enough free // room in the send buffer, put the error message there... self.do_attempt_write_data(&mut descriptor, &mut *peer); } else { log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); } } descriptor.disconnect_socket(); self.message_handler.chan_handler.peer_disconnected(&node_id, false); } } } } /// Indicates that the given socket descriptor's connection is now closed. pub fn socket_disconnected(&self, descriptor: &Descriptor) { self.disconnect_event_internal(descriptor, false); } fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) { let mut peers = self.peers.write().unwrap(); let peer_option = peers.peers.remove(descriptor); match peer_option { None => { // This is most likely a simple race condition where the user found that the socket // was disconnected, then we told the user to `disconnect_socket()`, then they // called this method. Either way we're disconnected, return. }, Some(peer_lock) => { let peer = peer_lock.lock().unwrap(); match peer.their_node_id { Some(node_id) => { log_trace!(self.logger, "Handling disconnection of peer {}, with {}future connection to the peer possible.", log_pubkey!(node_id), if no_connection_possible { "no " } else { "" }); self.node_id_to_descriptor.lock().unwrap().remove(&node_id); self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible); }, None => {} } } }; } /// Disconnect a peer given its node id. /// /// Set `no_connection_possible` to true to prevent any further connection with this peer, /// force-closing any channels we have with it. /// /// If a peer is connected, this will call [`disconnect_socket`] on the descriptor for the /// peer. Thus, be very careful about reentrancy issues. /// /// [`disconnect_socket`]: SocketDescriptor::disconnect_socket pub fn disconnect_by_node_id(&self, node_id: PublicKey, no_connection_possible: bool) { let mut peers_lock = self.peers.write().unwrap(); if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) { log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id); peers_lock.peers.remove(&descriptor); self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible); descriptor.disconnect_socket(); } } /// Disconnects all currently-connected peers. This is useful on platforms where there may be /// an indication that TCP sockets have stalled even if we weren't around to time them out /// using regular ping/pongs. pub fn disconnect_all_peers(&self) { let mut peers_lock = self.peers.write().unwrap(); self.node_id_to_descriptor.lock().unwrap().clear(); let peers = &mut *peers_lock; for (mut descriptor, peer) in peers.peers.drain() { if let Some(node_id) = peer.lock().unwrap().their_node_id { log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id); self.message_handler.chan_handler.peer_disconnected(&node_id, false); } descriptor.disconnect_socket(); } } /// This is called when we're blocked on sending additional gossip messages until we receive a /// pong. If we aren't waiting on a pong, we take this opportunity to send a ping (setting /// `awaiting_pong_timer_tick_intervals` to a special flag value to indicate this). fn maybe_send_extra_ping(&self, peer: &mut Peer) { if peer.awaiting_pong_timer_tick_intervals == 0 { peer.awaiting_pong_timer_tick_intervals = -1; let ping = msgs::Ping { ponglen: 0, byteslen: 64, }; self.enqueue_message(peer, &ping); } } /// Send pings to each peer and disconnect those which did not respond to the last round of /// pings. /// /// This may be called on any timescale you want, however, roughly once every ten seconds is /// preferred. The call rate determines both how often we send a ping to our peers and how much /// time they have to respond before we disconnect them. /// /// May call [`send_data`] on all [`SocketDescriptor`]s. Thus, be very careful with reentrancy /// issues! /// /// [`send_data`]: SocketDescriptor::send_data pub fn timer_tick_occurred(&self) { let mut descriptors_needing_disconnect = Vec::new(); { let peers_lock = self.peers.read().unwrap(); for (descriptor, peer_mutex) in peers_lock.peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() { // The peer needs to complete its handshake before we can exchange messages. We // give peers one timer tick to complete handshake, reusing // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken // for handshake completion. if peer.awaiting_pong_timer_tick_intervals != 0 { descriptors_needing_disconnect.push(descriptor.clone()); } else { peer.awaiting_pong_timer_tick_intervals = 1; } continue; } if peer.awaiting_pong_timer_tick_intervals == -1 { // Magic value set in `maybe_send_extra_ping`. peer.awaiting_pong_timer_tick_intervals = 1; peer.received_message_since_timer_tick = false; continue; } if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick) || peer.awaiting_pong_timer_tick_intervals as u64 > MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.peers.len() as u64 { descriptors_needing_disconnect.push(descriptor.clone()); continue; } peer.received_message_since_timer_tick = false; if peer.awaiting_pong_timer_tick_intervals > 0 { peer.awaiting_pong_timer_tick_intervals += 1; continue; } peer.awaiting_pong_timer_tick_intervals = 1; let ping = msgs::Ping { ponglen: 0, byteslen: 64, }; self.enqueue_message(&mut *peer, &ping); self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer); } } if !descriptors_needing_disconnect.is_empty() { { let mut peers_lock = self.peers.write().unwrap(); for descriptor in descriptors_needing_disconnect.iter() { if let Some(peer) = peers_lock.peers.remove(&descriptor) { if let Some(node_id) = peer.lock().unwrap().their_node_id { log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id); self.node_id_to_descriptor.lock().unwrap().remove(&node_id); self.message_handler.chan_handler.peer_disconnected(&node_id, false); } } } } for mut descriptor in descriptors_needing_disconnect.drain(..) { descriptor.disconnect_socket(); } } } } fn is_gossip_msg(type_id: u16) -> bool { match type_id { msgs::ChannelAnnouncement::TYPE | msgs::ChannelUpdate::TYPE | msgs::NodeAnnouncement::TYPE | msgs::QueryChannelRange::TYPE | msgs::ReplyChannelRange::TYPE | msgs::QueryShortChannelIds::TYPE | msgs::ReplyShortChannelIdsEnd::TYPE => true, _ => false } } #[cfg(test)] mod tests { use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses}; use ln::msgs; use ln::msgs::NetAddress; use util::events; use util::test_utils; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::key::{SecretKey, PublicKey}; use prelude::*; use sync::{Arc, Mutex}; use core::sync::atomic::Ordering; #[derive(Clone)] struct FileDescriptor { fd: u16, outbound_data: Arc>>, } impl PartialEq for FileDescriptor { fn eq(&self, other: &Self) -> bool { self.fd == other.fd } } impl Eq for FileDescriptor { } impl core::hash::Hash for FileDescriptor { fn hash(&self, hasher: &mut H) { self.fd.hash(hasher) } } impl SocketDescriptor for FileDescriptor { fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize { self.outbound_data.lock().unwrap().extend_from_slice(data); data.len() } fn disconnect_socket(&mut self) {} } struct PeerManagerCfg { chan_handler: test_utils::TestChannelMessageHandler, routing_handler: test_utils::TestRoutingMessageHandler, logger: test_utils::TestLogger, } fn create_peermgr_cfgs(peer_count: usize) -> Vec { let mut cfgs = Vec::new(); for _ in 0..peer_count { cfgs.push( PeerManagerCfg{ chan_handler: test_utils::TestChannelMessageHandler::new(), logger: test_utils::TestLogger::new(), routing_handler: test_utils::TestRoutingMessageHandler::new(), } ); } cfgs } fn create_network<'a>(peer_count: usize, cfgs: &'a Vec) -> Vec> { let mut peers = Vec::new(); for i in 0..peer_count { let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap(); let ephemeral_bytes = [i as u8; 32]; let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler }; let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {}); peers.push(peer); } peers } fn establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor) { let secp_ctx = Secp256k1::new(); let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret); let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), None).unwrap(); assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); peer_a.process_events(); assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); peer_b.process_events(); assert_eq!(peer_a.read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); peer_a.process_events(); assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); (fd_a.clone(), fd_b.clone()) } #[test] fn test_disconnect_peer() { // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and // push a DisconnectPeer event to remove the node flagged by id let cfgs = create_peermgr_cfgs(2); let chan_handler = test_utils::TestChannelMessageHandler::new(); let mut peers = create_network(2, &cfgs); establish_connection(&peers[0], &peers[1]); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1); let secp_ctx = Secp256k1::new(); let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret); chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError { node_id: their_id, action: msgs::ErrorAction::DisconnectPeer { msg: None }, }); assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1); peers[0].message_handler.chan_handler = &chan_handler; peers[0].process_events(); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 0); } #[test] fn test_timer_tick_occurred() { // Create peers, a vector of two peer managers, perform initial set up and check that peers[0] has one Peer. let cfgs = create_peermgr_cfgs(2); let peers = create_network(2, &cfgs); establish_connection(&peers[0], &peers[1]); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1); // peers[0] awaiting_pong is set to true, but the Peer is still connected peers[0].timer_tick_occurred(); peers[0].process_events(); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1); // Since timer_tick_occurred() is called again when awaiting_pong is true, all Peers are disconnected peers[0].timer_tick_occurred(); peers[0].process_events(); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 0); } #[test] fn test_do_attempt_write_data() { // Create 2 peers with custom TestRoutingMessageHandlers and connect them. let cfgs = create_peermgr_cfgs(2); cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release); cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release); let peers = create_network(2, &cfgs); // By calling establish_connect, we trigger do_attempt_write_data between // the peers. Previously this function would mistakenly enter an infinite loop // when there were more channel messages available than could fit into a peer's // buffer. This issue would now be detected by this test (because we use custom // RoutingMessageHandlers that intentionally return more channel messages // than can fit into a peer's buffer). let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]); // Make each peer to read the messages that the other peer just wrote to them. Note that // due to the max-message-before-ping limits this may take a few iterations to complete. for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 { peers[1].process_events(); let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0); assert!(!a_read_data.is_empty()); peers[0].read_event(&mut fd_a, &a_read_data).unwrap(); peers[0].process_events(); let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0); assert!(!b_read_data.is_empty()); peers[1].read_event(&mut fd_b, &b_read_data).unwrap(); peers[0].process_events(); assert_eq!(fd_a.outbound_data.lock().unwrap().len(), 0, "Until A receives data, it shouldn't send more messages"); } // Check that each peer has received the expected number of channel updates and channel // announcements. assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100); assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50); assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100); assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50); } #[test] fn test_handshake_timeout() { // Tests that we time out a peer still waiting on handshake completion after a full timer // tick. let cfgs = create_peermgr_cfgs(2); cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release); cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release); let peers = create_network(2, &cfgs); let secp_ctx = Secp256k1::new(); let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret); let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) }; let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap(); peers[0].new_inbound_connection(fd_a.clone(), None).unwrap(); // If we get a single timer tick before completion, that's fine assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1); peers[0].timer_tick_occurred(); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 1); assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false); peers[0].process_events(); assert_eq!(peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); peers[1].process_events(); // ...but if we get a second timer tick, we should disconnect the peer peers[0].timer_tick_occurred(); assert_eq!(peers[0].peers.read().unwrap().peers.len(), 0); assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err()); } #[test] fn test_filter_addresses(){ // Tests the filter_addresses function. // For (10/8) let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (0/8) let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (100.64/10) let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (127/8) let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (169.254/16) let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (172.16/12) let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (192.168/16) let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (192.88.99/24) let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For other IPv4 addresses let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); // For (2000::/3) let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); // For other IPv6 addresses let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000}; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (None) assert_eq!(filter_addresses(None), None); } }