2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2020-05-02 15:05:04 -04:00
//! The top-level network map tracking logic lives here.
use bitcoin ::secp256k1 ::key ::PublicKey ;
use bitcoin ::secp256k1 ::Secp256k1 ;
use bitcoin ::secp256k1 ;
use bitcoin ::hashes ::sha256d ::Hash as Sha256dHash ;
use bitcoin ::hashes ::Hash ;
use bitcoin ::blockdata ::script ::Builder ;
2020-07-17 22:08:34 -07:00
use bitcoin ::blockdata ::transaction ::TxOut ;
2020-05-02 15:05:04 -04:00
use bitcoin ::blockdata ::opcodes ;
2020-10-22 10:51:54 -04:00
use bitcoin ::hash_types ::BlockHash ;
2020-05-02 15:05:04 -04:00
2020-07-17 22:08:34 -07:00
use chain ;
use chain ::Access ;
2020-05-02 15:05:04 -04:00
use ln ::features ::{ ChannelFeatures , NodeFeatures } ;
2020-12-03 12:00:36 -05:00
use ln ::msgs ::{ DecodeError , ErrorAction , Init , LightningError , RoutingMessageHandler , NetAddress , MAX_VALUE_MSAT } ;
2020-05-30 23:20:17 -04:00
use ln ::msgs ::{ ChannelAnnouncement , ChannelUpdate , NodeAnnouncement , OptionalField } ;
2020-10-22 10:51:54 -04:00
use ln ::msgs ::{ QueryChannelRange , ReplyChannelRange , QueryShortChannelIds , ReplyShortChannelIdsEnd } ;
2020-05-02 15:05:04 -04:00
use ln ::msgs ;
2020-05-06 18:34:37 -04:00
use util ::ser ::{ Writeable , Readable , Writer } ;
2021-06-22 01:33:44 +00:00
use util ::logger ::{ Logger , Level } ;
2021-08-12 16:02:42 -05:00
use util ::events ::{ Event , EventHandler , MessageSendEvent , MessageSendEventsProvider } ;
2021-03-09 15:34:52 -05:00
use util ::scid_utils ::{ block_from_scid , scid_from_parts , MAX_SCID_BLOCK } ;
2020-05-02 15:05:04 -04:00
2021-08-01 18:22:06 +02:00
use io ;
2021-05-19 04:21:39 +00:00
use prelude ::* ;
use alloc ::collections ::{ BTreeMap , btree_map ::Entry as BtreeEntry } ;
2021-05-23 23:22:46 +00:00
use core ::{ cmp , fmt } ;
2021-07-19 15:01:58 +02:00
use sync ::{ RwLock , RwLockReadGuard } ;
2021-05-23 23:22:46 +00:00
use core ::sync ::atomic ::{ AtomicUsize , Ordering } ;
2021-07-19 15:01:58 +02:00
use sync ::Mutex ;
2021-05-23 23:22:46 +00:00
use core ::ops ::Deref ;
2020-07-13 13:16:32 +09:00
use bitcoin ::hashes ::hex ::ToHex ;
2020-05-02 15:05:04 -04:00
2021-02-15 15:25:37 -05:00
/// The maximum number of extra bytes which we do not understand in a gossip message before we will
/// refuse to relay the message.
const MAX_EXCESS_BYTES_FOR_RELAY : usize = 1024 ;
2021-03-13 14:51:36 -05:00
/// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations.
const MAX_SCIDS_PER_REPLY : usize = 8000 ;
2020-06-11 15:34:28 -04:00
/// Represents the network as nodes and channels between them
pub struct NetworkGraph {
2020-11-29 15:20:35 -05:00
genesis_hash : BlockHash ,
2021-08-09 22:24:41 -05:00
// Lock order: channels -> nodes
channels : RwLock < BTreeMap < u64 , ChannelInfo > > ,
nodes : RwLock < BTreeMap < PublicKey , NodeInfo > > ,
2020-06-11 15:34:28 -04:00
}
2021-08-16 18:40:19 -05:00
/// A read-only view of [`NetworkGraph`].
pub struct ReadOnlyNetworkGraph < ' a > {
channels : RwLockReadGuard < ' a , BTreeMap < u64 , ChannelInfo > > ,
nodes : RwLockReadGuard < ' a , BTreeMap < PublicKey , NodeInfo > > ,
}
2021-08-12 15:30:53 -05:00
/// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion
/// return packet by a node along the route. See [BOLT #4] for details.
///
/// [BOLT #4]: https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md
#[ derive(Clone, Debug, PartialEq) ]
pub enum NetworkUpdate {
/// An error indicating a `channel_update` messages should be applied via
/// [`NetworkGraph::update_channel`].
ChannelUpdateMessage {
/// The update to apply via [`NetworkGraph::update_channel`].
msg : ChannelUpdate ,
} ,
/// An error indicating only that a channel has been closed, which should be applied via
/// [`NetworkGraph::close_channel_from_update`].
ChannelClosed {
/// The short channel id of the closed channel.
short_channel_id : u64 ,
/// Whether the channel should be permanently removed or temporarily disabled until a new
/// `channel_update` message is received.
is_permanent : bool ,
} ,
/// An error indicating only that a node has failed, which should be applied via
/// [`NetworkGraph::fail_node`].
NodeFailure {
/// The node id of the failed node.
node_id : PublicKey ,
/// Whether the node should be permanently removed from consideration or can be restored
/// when a new `channel_update` message is received.
is_permanent : bool ,
}
}
impl_writeable_tlv_based_enum_upgradable! ( NetworkUpdate ,
( 0 , ChannelUpdateMessage ) = > {
( 0 , msg , required ) ,
} ,
( 2 , ChannelClosed ) = > {
( 0 , short_channel_id , required ) ,
( 2 , is_permanent , required ) ,
} ,
( 4 , NodeFailure ) = > {
( 0 , node_id , required ) ,
( 2 , is_permanent , required ) ,
} ,
) ;
2021-08-12 16:02:42 -05:00
impl < C : Deref , L : Deref > EventHandler for NetGraphMsgHandler < C , L >
where C ::Target : chain ::Access , L ::Target : Logger {
fn handle_event ( & self , event : & Event ) {
if let Event ::PaymentFailed { payment_hash : _ , rejected_by_dest : _ , network_update , .. } = event {
if let Some ( network_update ) = network_update {
self . handle_network_update ( network_update ) ;
}
}
}
}
2020-05-06 19:04:44 -04:00
/// Receives and validates network updates from peers,
/// stores authentic and relevant data as a network graph.
/// This network graph is then used for routing payments.
/// Provides interface to help with initial routing sync by
/// serving historical announcements.
2021-08-12 16:02:42 -05:00
///
/// Serves as an [`EventHandler`] for applying updates from [`Event::PaymentFailed`] to the
/// [`NetworkGraph`].
pub struct NetGraphMsgHandler < C : Deref , L : Deref >
where C ::Target : chain ::Access , L ::Target : Logger
{
2020-05-02 15:05:04 -04:00
secp_ctx : Secp256k1 < secp256k1 ::VerifyOnly > ,
/// Representation of the payment channel network
2021-08-10 09:47:27 -05:00
pub network_graph : NetworkGraph ,
2020-07-17 22:08:34 -07:00
chain_access : Option < C > ,
2020-05-02 15:05:04 -04:00
full_syncs_requested : AtomicUsize ,
2020-12-31 20:19:21 -05:00
pending_events : Mutex < Vec < MessageSendEvent > > ,
2020-03-02 12:55:53 -05:00
logger : L ,
2020-05-02 15:05:04 -04:00
}
2021-08-12 16:02:42 -05:00
impl < C : Deref , L : Deref > NetGraphMsgHandler < C , L >
where C ::Target : chain ::Access , L ::Target : Logger
{
2020-05-06 18:34:37 -04:00
/// Creates a new tracker of the actual state of the network of channels and nodes,
2021-08-12 16:02:42 -05:00
/// assuming an existing Network Graph.
2020-05-06 19:04:44 -04:00
/// Chain monitor is used to make sure announced channels exist on-chain,
/// channel data is correct, and that the announcement is signed with
/// channel owners' keys.
2021-08-12 16:02:42 -05:00
pub fn new ( network_graph : NetworkGraph , chain_access : Option < C > , logger : L ) -> Self {
2020-05-06 18:34:37 -04:00
NetGraphMsgHandler {
secp_ctx : Secp256k1 ::verification_only ( ) ,
2021-08-10 09:47:27 -05:00
network_graph ,
2020-05-06 18:34:37 -04:00
full_syncs_requested : AtomicUsize ::new ( 0 ) ,
2020-07-17 22:08:34 -07:00
chain_access ,
2020-10-22 10:51:54 -04:00
pending_events : Mutex ::new ( vec! [ ] ) ,
2020-03-02 12:55:53 -05:00
logger ,
2020-05-06 18:34:37 -04:00
}
}
2020-08-24 14:14:05 -04:00
2020-08-10 13:50:29 +03:00
/// Adds a provider used to check new announcements. Does not affect
/// existing announcements unless they are updated.
/// Add, update or remove the provider would replace the current one.
pub fn add_chain_access ( & mut self , chain_access : Option < C > ) {
self . chain_access = chain_access ;
}
2020-12-09 15:06:54 -05:00
/// Returns true when a full routing table sync should be performed with a peer.
fn should_request_full_sync ( & self , _node_id : & PublicKey ) -> bool {
//TODO: Determine whether to request a full sync based on the network map.
const FULL_SYNCS_TO_REQUEST : usize = 5 ;
if self . full_syncs_requested . load ( Ordering ::Acquire ) < FULL_SYNCS_TO_REQUEST {
self . full_syncs_requested . fetch_add ( 1 , Ordering ::AcqRel ) ;
true
} else {
false
}
}
2021-08-12 16:02:42 -05:00
/// Applies changes to the [`NetworkGraph`] from the given update.
fn handle_network_update ( & self , update : & NetworkUpdate ) {
match * update {
NetworkUpdate ::ChannelUpdateMessage { ref msg } = > {
let short_channel_id = msg . contents . short_channel_id ;
let is_enabled = msg . contents . flags & ( 1 < < 1 ) ! = ( 1 < < 1 ) ;
let status = if is_enabled { " enabled " } else { " disabled " } ;
log_debug! ( self . logger , " Updating channel with channel_update from a payment failure. Channel {} is {}. " , short_channel_id , status ) ;
let _ = self . network_graph . update_channel ( msg , & self . secp_ctx ) ;
} ,
NetworkUpdate ::ChannelClosed { short_channel_id , is_permanent } = > {
let action = if is_permanent { " Removing " } else { " Disabling " } ;
log_debug! ( self . logger , " {} channel graph entry for {} due to a payment failure. " , action , short_channel_id ) ;
self . network_graph . close_channel_from_update ( short_channel_id , is_permanent ) ;
} ,
NetworkUpdate ::NodeFailure { ref node_id , is_permanent } = > {
let action = if is_permanent { " Removing " } else { " Disabling " } ;
log_debug! ( self . logger , " {} node graph entry for {} due to a payment failure. " , action , node_id ) ;
self . network_graph . fail_node ( node_id , is_permanent ) ;
} ,
}
}
2020-08-24 14:14:05 -04:00
}
2020-05-02 15:05:04 -04:00
macro_rules ! secp_verify_sig {
( $secp_ctx : expr , $msg : expr , $sig : expr , $pubkey : expr ) = > {
match $secp_ctx . verify ( $msg , $sig , $pubkey ) {
Ok ( _ ) = > { } ,
2020-07-13 13:16:32 +09:00
Err ( _ ) = > return Err ( LightningError { err : " Invalid signature from remote node " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ,
2020-05-02 15:05:04 -04:00
}
} ;
}
2021-08-12 16:02:42 -05:00
impl < C : Deref , L : Deref > RoutingMessageHandler for NetGraphMsgHandler < C , L >
where C ::Target : chain ::Access , L ::Target : Logger
{
2020-05-02 15:05:04 -04:00
fn handle_node_announcement ( & self , msg : & msgs ::NodeAnnouncement ) -> Result < bool , LightningError > {
2021-08-10 09:47:27 -05:00
self . network_graph . update_node_from_announcement ( msg , & self . secp_ctx ) ? ;
2021-02-15 15:25:37 -05:00
Ok ( msg . contents . excess_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY & &
msg . contents . excess_address_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY & &
msg . contents . excess_data . len ( ) + msg . contents . excess_address_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY )
2020-05-02 15:05:04 -04:00
}
fn handle_channel_announcement ( & self , msg : & msgs ::ChannelAnnouncement ) -> Result < bool , LightningError > {
2021-08-10 09:47:27 -05:00
self . network_graph . update_channel_from_announcement ( msg , & self . chain_access , & self . secp_ctx ) ? ;
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Added channel_announcement for {}{} " , msg . contents . short_channel_id , if ! msg . contents . excess_data . is_empty ( ) { " with excess uninterpreted data! " } else { " " } ) ;
2021-02-15 15:25:37 -05:00
Ok ( msg . contents . excess_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY )
2020-05-02 15:05:04 -04:00
}
fn handle_channel_update ( & self , msg : & msgs ::ChannelUpdate ) -> Result < bool , LightningError > {
2021-08-10 09:47:27 -05:00
self . network_graph . update_channel ( msg , & self . secp_ctx ) ? ;
2021-02-15 15:25:37 -05:00
Ok ( msg . contents . excess_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY )
2020-05-02 15:05:04 -04:00
}
2020-05-30 23:20:17 -04:00
fn get_next_channel_announcements ( & self , starting_point : u64 , batch_amount : u8 ) -> Vec < ( ChannelAnnouncement , Option < ChannelUpdate > , Option < ChannelUpdate > ) > {
2020-05-02 15:05:04 -04:00
let mut result = Vec ::with_capacity ( batch_amount as usize ) ;
2021-08-16 18:40:19 -05:00
let channels = self . network_graph . channels . read ( ) . unwrap ( ) ;
2021-08-09 22:24:41 -05:00
let mut iter = channels . range ( starting_point .. ) ;
2020-05-02 15:05:04 -04:00
while result . len ( ) < batch_amount as usize {
if let Some ( ( _ , ref chan ) ) = iter . next ( ) {
if chan . announcement_message . is_some ( ) {
2020-05-03 16:06:59 -04:00
let chan_announcement = chan . announcement_message . clone ( ) . unwrap ( ) ;
let mut one_to_two_announcement : Option < msgs ::ChannelUpdate > = None ;
let mut two_to_one_announcement : Option < msgs ::ChannelUpdate > = None ;
if let Some ( one_to_two ) = chan . one_to_two . as_ref ( ) {
one_to_two_announcement = one_to_two . last_update_message . clone ( ) ;
}
if let Some ( two_to_one ) = chan . two_to_one . as_ref ( ) {
two_to_one_announcement = two_to_one . last_update_message . clone ( ) ;
}
result . push ( ( chan_announcement , one_to_two_announcement , two_to_one_announcement ) ) ;
2020-05-02 15:05:04 -04:00
} else {
// TODO: We may end up sending un-announced channel_updates if we are sending
// initial sync data while receiving announce/updates for this channel.
}
} else {
return result ;
}
}
result
}
2020-05-30 23:20:17 -04:00
fn get_next_node_announcements ( & self , starting_point : Option < & PublicKey > , batch_amount : u8 ) -> Vec < NodeAnnouncement > {
2020-05-02 15:05:04 -04:00
let mut result = Vec ::with_capacity ( batch_amount as usize ) ;
2021-08-16 18:40:19 -05:00
let nodes = self . network_graph . nodes . read ( ) . unwrap ( ) ;
2020-05-02 15:05:04 -04:00
let mut iter = if let Some ( pubkey ) = starting_point {
2021-08-09 22:24:41 -05:00
let mut iter = nodes . range ( ( * pubkey ) .. ) ;
2020-05-02 15:05:04 -04:00
iter . next ( ) ;
iter
} else {
2021-08-09 22:24:41 -05:00
nodes . range ( .. )
2020-05-02 15:05:04 -04:00
} ;
while result . len ( ) < batch_amount as usize {
if let Some ( ( _ , ref node ) ) = iter . next ( ) {
2020-05-04 13:53:44 -04:00
if let Some ( node_info ) = node . announcement_info . as_ref ( ) {
if node_info . announcement_message . is_some ( ) {
result . push ( node_info . announcement_message . clone ( ) . unwrap ( ) ) ;
}
2020-05-02 15:05:04 -04:00
}
} else {
return result ;
}
}
result
}
2020-11-18 13:32:55 -05:00
/// Initiates a stateless sync of routing gossip information with a peer
2020-12-03 12:48:40 -05:00
/// using gossip_queries. The default strategy used by this implementation
/// is to sync the full block range with several peers.
///
2020-11-18 13:32:55 -05:00
/// We should expect one or more reply_channel_range messages in response
2020-12-03 12:48:40 -05:00
/// to our query_channel_range. Each reply will enqueue a query_scid message
/// to request gossip messages for each channel. The sync is considered complete
/// when the final reply_scids_end message is received, though we are not
2020-11-18 13:32:55 -05:00
/// tracking this directly.
2020-12-03 12:00:36 -05:00
fn sync_routing_table ( & self , their_node_id : & PublicKey , init_msg : & Init ) {
2020-12-03 12:48:40 -05:00
// We will only perform a sync with peers that support gossip_queries.
2020-12-03 12:00:36 -05:00
if ! init_msg . features . supports_gossip_queries ( ) {
return ( ) ;
}
2020-12-03 12:48:40 -05:00
// Check if we need to perform a full synchronization with this peer
if ! self . should_request_full_sync ( their_node_id ) {
return ( ) ;
}
2020-11-18 13:32:55 -05:00
let first_blocknum = 0 ;
let number_of_blocks = 0xffffffff ;
2020-10-22 12:44:53 -04:00
log_debug! ( self . logger , " Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={} " , log_pubkey! ( their_node_id ) , first_blocknum , number_of_blocks ) ;
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
2020-12-31 20:19:21 -05:00
pending_events . push ( MessageSendEvent ::SendChannelRangeQuery {
2020-10-22 12:44:53 -04:00
node_id : their_node_id . clone ( ) ,
msg : QueryChannelRange {
2021-08-10 09:47:27 -05:00
chain_hash : self . network_graph . genesis_hash ,
2020-10-22 12:44:53 -04:00
first_blocknum ,
number_of_blocks ,
} ,
} ) ;
2020-10-22 10:51:54 -04:00
}
2020-11-18 13:32:55 -05:00
/// Statelessly processes a reply to a channel range query by immediately
/// sending an SCID query with SCIDs in the reply. To keep this handler
/// stateless, it does not validate the sequencing of replies for multi-
/// reply ranges. It does not validate whether the reply(ies) cover the
/// queried range. It also does not filter SCIDs to only those in the
2020-12-03 12:48:40 -05:00
/// original query range. We also do not validate that the chain_hash
/// matches the chain_hash of the NetworkGraph. Any chan_ann message that
/// does not match our chain_hash will be rejected when the announcement is
/// processed.
2020-12-03 11:52:54 -05:00
fn handle_reply_channel_range ( & self , their_node_id : & PublicKey , msg : ReplyChannelRange ) -> Result < ( ) , LightningError > {
2021-02-03 11:34:48 -05:00
log_debug! ( self . logger , " Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={} " , log_pubkey! ( their_node_id ) , msg . first_blocknum , msg . number_of_blocks , msg . sync_complete , msg . short_channel_ids . len ( ) , ) ;
2020-10-22 12:44:53 -04:00
2020-12-03 11:52:54 -05:00
log_debug! ( self . logger , " Sending query_short_channel_ids peer={}, batch_size={} " , log_pubkey! ( their_node_id ) , msg . short_channel_ids . len ( ) ) ;
2020-11-18 13:32:55 -05:00
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
2020-12-31 20:19:21 -05:00
pending_events . push ( MessageSendEvent ::SendShortIdsQuery {
2020-11-18 13:32:55 -05:00
node_id : their_node_id . clone ( ) ,
msg : QueryShortChannelIds {
2020-12-03 11:52:54 -05:00
chain_hash : msg . chain_hash ,
short_channel_ids : msg . short_channel_ids ,
2020-10-22 12:44:53 -04:00
}
2020-11-18 13:32:55 -05:00
} ) ;
2020-10-22 12:44:53 -04:00
Ok ( ( ) )
2020-10-22 10:51:54 -04:00
}
2020-11-18 13:32:55 -05:00
/// When an SCID query is initiated the remote peer will begin streaming
2020-10-22 12:44:53 -04:00
/// gossip messages. In the event of a failure, we may have received
/// some channel information. Before trying with another peer, the
/// caller should update its set of SCIDs that need to be queried.
2020-12-03 11:52:54 -05:00
fn handle_reply_short_channel_ids_end ( & self , their_node_id : & PublicKey , msg : ReplyShortChannelIdsEnd ) -> Result < ( ) , LightningError > {
2020-10-22 12:44:53 -04:00
log_debug! ( self . logger , " Handling reply_short_channel_ids_end peer={}, full_information={} " , log_pubkey! ( their_node_id ) , msg . full_information ) ;
// If the remote node does not have up-to-date information for the
// chain_hash they will set full_information=false. We can fail
// the result and try again with a different peer.
if ! msg . full_information {
return Err ( LightningError {
err : String ::from ( " Received reply_short_channel_ids_end with no information " ) ,
action : ErrorAction ::IgnoreError
} ) ;
}
Ok ( ( ) )
2020-10-22 10:51:54 -04:00
}
2021-03-14 15:45:45 -04:00
/// Processes a query from a peer by finding announced/public channels whose funding UTXOs
2021-03-03 16:48:19 -05:00
/// are in the specified block range. Due to message size limits, large range
/// queries may result in several reply messages. This implementation enqueues
2021-03-14 15:45:45 -04:00
/// all reply messages into pending events. Each message will allocate just under 65KiB. A full
/// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB.
/// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts
/// memory constrained systems.
2021-03-03 16:48:19 -05:00
fn handle_query_channel_range ( & self , their_node_id : & PublicKey , msg : QueryChannelRange ) -> Result < ( ) , LightningError > {
log_debug! ( self . logger , " Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={} " , log_pubkey! ( their_node_id ) , msg . first_blocknum , msg . number_of_blocks ) ;
2021-03-14 15:45:45 -04:00
let inclusive_start_scid = scid_from_parts ( msg . first_blocknum as u64 , 0 , 0 ) ;
2021-03-03 16:48:19 -05:00
2021-03-14 15:45:45 -04:00
// We might receive valid queries with end_blocknum that would overflow SCID conversion.
// If so, we manually cap the ending block to avoid this overflow.
2021-03-09 15:34:52 -05:00
let exclusive_end_scid = scid_from_parts ( cmp ::min ( msg . end_blocknum ( ) as u64 , MAX_SCID_BLOCK ) , 0 , 0 ) ;
2021-03-03 16:48:19 -05:00
// Per spec, we must reply to a query. Send an empty message when things are invalid.
2021-08-10 09:47:27 -05:00
if msg . chain_hash ! = self . network_graph . genesis_hash | | inclusive_start_scid . is_err ( ) | | exclusive_end_scid . is_err ( ) | | msg . number_of_blocks = = 0 {
2021-03-03 16:48:19 -05:00
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . push ( MessageSendEvent ::SendReplyChannelRange {
node_id : their_node_id . clone ( ) ,
msg : ReplyChannelRange {
chain_hash : msg . chain_hash . clone ( ) ,
first_blocknum : msg . first_blocknum ,
number_of_blocks : msg . number_of_blocks ,
sync_complete : true ,
short_channel_ids : vec ! [ ] ,
}
} ) ;
2021-03-14 15:45:45 -04:00
return Err ( LightningError {
err : String ::from ( " query_channel_range could not be processed " ) ,
action : ErrorAction ::IgnoreError ,
} ) ;
2021-03-03 16:48:19 -05:00
}
// Creates channel batches. We are not checking if the channel is routable
// (has at least one update). A peer may still want to know the channel
// exists even if its not yet routable.
2021-03-13 14:51:36 -05:00
let mut batches : Vec < Vec < u64 > > = vec! [ Vec ::with_capacity ( MAX_SCIDS_PER_REPLY ) ] ;
2021-08-16 18:40:19 -05:00
let channels = self . network_graph . channels . read ( ) . unwrap ( ) ;
for ( _ , ref chan ) in channels . range ( inclusive_start_scid . unwrap ( ) .. exclusive_end_scid . unwrap ( ) ) {
2021-03-03 16:48:19 -05:00
if let Some ( chan_announcement ) = & chan . announcement_message {
// Construct a new batch if last one is full
if batches . last ( ) . unwrap ( ) . len ( ) = = batches . last ( ) . unwrap ( ) . capacity ( ) {
2021-03-13 14:51:36 -05:00
batches . push ( Vec ::with_capacity ( MAX_SCIDS_PER_REPLY ) ) ;
2021-03-03 16:48:19 -05:00
}
let batch = batches . last_mut ( ) . unwrap ( ) ;
batch . push ( chan_announcement . contents . short_channel_id ) ;
}
}
2021-08-16 18:40:19 -05:00
drop ( channels ) ;
2021-03-03 16:48:19 -05:00
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
let batch_count = batches . len ( ) ;
2021-06-19 15:48:23 +00:00
let mut prev_batch_endblock = msg . first_blocknum ;
2021-03-09 16:30:52 -05:00
for ( batch_index , batch ) in batches . into_iter ( ) . enumerate ( ) {
2021-06-19 15:48:23 +00:00
// Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
// and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
//
// Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
// reply is >= the previous reply's `first_blocknum` and either exactly the previous
// reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
// significant diversion from the requirements set by the spec, and, in case of blocks
// with no channel opens (e.g. empty blocks), requires that we use the previous value
// and *not* derive the first_blocknum from the actual first block of the reply.
let first_blocknum = prev_batch_endblock ;
// Each message carries the number of blocks (from the `first_blocknum`) its contents
// fit in. Though there is no requirement that we use exactly the number of blocks its
// contents are from, except for the bogus requirements c-lightning enforces, above.
//
// Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
// >= the query's end block. Thus, for the last reply, we calculate the difference
// between the query's end block and the start of the reply.
//
// Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
// first_blocknum will be either msg.first_blocknum or a higher block height.
let ( sync_complete , number_of_blocks ) = if batch_index = = batch_count - 1 {
( true , msg . end_blocknum ( ) - first_blocknum )
}
// Prior replies should use the number of blocks that fit into the reply. Overflow
// safe since first_blocknum is always <= last SCID's block.
else {
( false , block_from_scid ( batch . last ( ) . unwrap ( ) ) - first_blocknum )
2021-03-03 16:48:19 -05:00
} ;
2021-06-19 15:48:23 +00:00
prev_batch_endblock = first_blocknum + number_of_blocks ;
2021-03-03 16:48:19 -05:00
pending_events . push ( MessageSendEvent ::SendReplyChannelRange {
node_id : their_node_id . clone ( ) ,
msg : ReplyChannelRange {
chain_hash : msg . chain_hash . clone ( ) ,
first_blocknum ,
number_of_blocks ,
sync_complete ,
short_channel_ids : batch ,
}
} ) ;
}
Ok ( ( ) )
2020-10-22 10:51:54 -04:00
}
2020-12-03 11:52:54 -05:00
fn handle_query_short_channel_ids ( & self , _their_node_id : & PublicKey , _msg : QueryShortChannelIds ) -> Result < ( ) , LightningError > {
2020-10-22 10:51:54 -04:00
// TODO
Err ( LightningError {
err : String ::from ( " Not implemented " ) ,
action : ErrorAction ::IgnoreError ,
} )
}
}
2020-12-31 20:19:21 -05:00
impl < C : Deref , L : Deref > MessageSendEventsProvider for NetGraphMsgHandler < C , L >
2020-10-22 10:51:54 -04:00
where
C ::Target : chain ::Access ,
L ::Target : Logger ,
{
2020-12-31 20:19:21 -05:00
fn get_and_clear_pending_msg_events ( & self ) -> Vec < MessageSendEvent > {
2020-10-22 10:51:54 -04:00
let mut ret = Vec ::new ( ) ;
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
2021-05-23 23:22:46 +00:00
core ::mem ::swap ( & mut ret , & mut pending_events ) ;
2020-10-22 10:51:54 -04:00
ret
}
2020-05-02 15:05:04 -04:00
}
2021-02-22 11:05:54 -05:00
#[ derive(Clone, Debug, PartialEq) ]
2020-05-06 19:04:44 -04:00
/// Details about one direction of a channel. Received
/// within a channel update.
2020-05-02 15:05:04 -04:00
pub struct DirectionalChannelInfo {
2020-05-06 19:04:44 -04:00
/// When the last update to the channel direction was issued.
/// Value is opaque, as set in the announcement.
2020-05-02 15:05:04 -04:00
pub last_update : u32 ,
2020-05-06 19:04:44 -04:00
/// Whether the channel can be currently used for payments (in this one direction).
2020-05-02 15:05:04 -04:00
pub enabled : bool ,
2020-05-06 19:04:44 -04:00
/// The difference in CLTV values that you must have when routing through this channel.
2020-05-02 15:05:04 -04:00
pub cltv_expiry_delta : u16 ,
/// The minimum value, which must be relayed to the next hop via the channel
pub htlc_minimum_msat : u64 ,
2020-06-28 14:43:10 +03:00
/// The maximum value which may be relayed to the next hop via the channel.
pub htlc_maximum_msat : Option < u64 > ,
2020-05-02 15:05:04 -04:00
/// Fees charged when the channel is used for routing
pub fees : RoutingFees ,
/// Most recent update for the channel received from the network
2020-05-11 21:08:11 -04:00
/// Mostly redundant with the data we store in fields explicitly.
/// Everything else is useful only for sending out for initial routing sync.
/// Not stored if contains excess data to prevent DoS.
2020-09-10 21:37:08 -04:00
pub last_update_message : Option < ChannelUpdate > ,
2020-05-02 15:05:04 -04:00
}
2020-06-11 15:32:23 -04:00
impl fmt ::Display for DirectionalChannelInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> Result < ( ) , fmt ::Error > {
2020-05-03 16:06:59 -04:00
write! ( f , " last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?} " , self . last_update , self . enabled , self . cltv_expiry_delta , self . htlc_minimum_msat , self . fees ) ? ;
2020-05-02 15:05:04 -04:00
Ok ( ( ) )
}
}
2021-05-25 23:13:11 +00:00
impl_writeable_tlv_based! ( DirectionalChannelInfo , {
2021-06-22 16:50:18 -04:00
( 0 , last_update , required ) ,
( 2 , enabled , required ) ,
( 4 , cltv_expiry_delta , required ) ,
( 6 , htlc_minimum_msat , required ) ,
( 8 , htlc_maximum_msat , required ) ,
( 10 , fees , required ) ,
( 12 , last_update_message , required ) ,
} ) ;
2020-05-02 15:05:04 -04:00
2021-02-22 11:05:54 -05:00
#[ derive(Clone, Debug, PartialEq) ]
2020-05-06 19:04:44 -04:00
/// Details about a channel (both directions).
/// Received within a channel announcement.
2020-05-02 15:05:04 -04:00
pub struct ChannelInfo {
/// Protocol features of a channel communicated during its announcement
pub features : ChannelFeatures ,
2020-05-03 16:06:59 -04:00
/// Source node of the first direction of a channel
pub node_one : PublicKey ,
/// Details about the first direction of a channel
pub one_to_two : Option < DirectionalChannelInfo > ,
/// Source node of the second direction of a channel
pub node_two : PublicKey ,
/// Details about the second direction of a channel
pub two_to_one : Option < DirectionalChannelInfo > ,
2020-06-28 15:18:33 +03:00
/// The channel capacity as seen on-chain, if chain lookup is available.
pub capacity_sats : Option < u64 > ,
2020-05-02 15:05:04 -04:00
/// An initial announcement of the channel
2020-05-06 19:04:44 -04:00
/// Mostly redundant with the data we store in fields explicitly.
/// Everything else is useful only for sending out for initial routing sync.
/// Not stored if contains excess data to prevent DoS.
2020-09-10 21:37:08 -04:00
pub announcement_message : Option < ChannelAnnouncement > ,
2020-05-02 15:05:04 -04:00
}
2020-06-11 15:32:23 -04:00
impl fmt ::Display for ChannelInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> Result < ( ) , fmt ::Error > {
2020-05-03 16:06:59 -04:00
write! ( f , " features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?} " ,
log_bytes! ( self . features . encode ( ) ) , log_pubkey! ( self . node_one ) , self . one_to_two , log_pubkey! ( self . node_two ) , self . two_to_one ) ? ;
2020-05-02 15:05:04 -04:00
Ok ( ( ) )
}
}
2021-05-25 23:13:11 +00:00
impl_writeable_tlv_based! ( ChannelInfo , {
2021-06-22 16:50:18 -04:00
( 0 , features , required ) ,
( 2 , node_one , required ) ,
( 4 , one_to_two , required ) ,
( 6 , node_two , required ) ,
( 8 , two_to_one , required ) ,
( 10 , capacity_sats , required ) ,
( 12 , announcement_message , required ) ,
} ) ;
2020-05-02 15:05:04 -04:00
/// Fees for routing via a given channel or a node
2021-08-24 21:00:17 +00:00
#[ derive(Eq, PartialEq, Copy, Clone, Debug, Hash) ]
2020-05-02 15:05:04 -04:00
pub struct RoutingFees {
2020-05-06 19:04:44 -04:00
/// Flat routing fee in satoshis
2020-05-02 15:05:04 -04:00
pub base_msat : u32 ,
2020-05-06 19:04:44 -04:00
/// Liquidity-based routing fee in millionths of a routed amount.
/// In other words, 10000 is 1%.
2020-05-02 15:05:04 -04:00
pub proportional_millionths : u32 ,
}
2021-06-22 16:50:18 -04:00
impl_writeable_tlv_based! ( RoutingFees , {
( 0 , base_msat , required ) ,
( 2 , proportional_millionths , required )
} ) ;
2020-05-02 15:05:04 -04:00
2021-02-22 11:05:54 -05:00
#[ derive(Clone, Debug, PartialEq) ]
2020-05-04 13:53:44 -04:00
/// Information received in the latest node_announcement from this node.
pub struct NodeAnnouncementInfo {
2020-05-02 15:05:04 -04:00
/// Protocol features the node announced support for
2020-05-11 21:12:23 -04:00
pub features : NodeFeatures ,
2020-05-06 19:04:44 -04:00
/// When the last known update to the node state was issued.
/// Value is opaque, as set in the announcement.
2020-05-11 21:12:23 -04:00
pub last_update : u32 ,
2020-05-02 15:05:04 -04:00
/// Color assigned to the node
pub rgb : [ u8 ; 3 ] ,
2020-05-06 19:04:44 -04:00
/// Moniker assigned to the node.
/// May be invalid or malicious (eg control chars),
/// should not be exposed to the user.
2020-05-02 15:05:04 -04:00
pub alias : [ u8 ; 32 ] ,
/// Internet-level addresses via which one can connect to the node
pub addresses : Vec < NetAddress > ,
/// An initial announcement of the node
2020-05-06 19:04:44 -04:00
/// Mostly redundant with the data we store in fields explicitly.
/// Everything else is useful only for sending out for initial routing sync.
/// Not stored if contains excess data to prevent DoS.
2020-09-10 21:37:08 -04:00
pub announcement_message : Option < NodeAnnouncement >
2020-05-02 15:05:04 -04:00
}
2021-05-25 23:13:11 +00:00
impl_writeable_tlv_based! ( NodeAnnouncementInfo , {
2021-06-22 16:50:18 -04:00
( 0 , features , required ) ,
( 2 , last_update , required ) ,
( 4 , rgb , required ) ,
( 6 , alias , required ) ,
( 8 , announcement_message , option ) ,
( 10 , addresses , vec_type ) ,
2021-05-25 23:13:11 +00:00
} ) ;
2020-05-02 15:05:04 -04:00
2021-02-22 11:05:54 -05:00
#[ derive(Clone, Debug, PartialEq) ]
2020-05-06 19:04:44 -04:00
/// Details about a node in the network, known from the network announcement.
2020-05-04 13:53:44 -04:00
pub struct NodeInfo {
/// All valid channels a node has announced
pub channels : Vec < u64 > ,
2020-05-11 21:09:44 -04:00
/// Lowest fees enabling routing via any of the enabled, known channels to a node.
/// The two fields (flat and proportional fee) are independent,
2020-05-06 19:04:44 -04:00
/// meaning they don't have to refer to the same channel.
2020-05-04 13:53:44 -04:00
pub lowest_inbound_channel_fees : Option < RoutingFees > ,
2020-05-06 19:04:44 -04:00
/// More information about a node from node_announcement.
/// Optional because we store a Node entry after learning about it from
/// a channel announcement, but before receiving a node announcement.
2020-05-04 13:53:44 -04:00
pub announcement_info : Option < NodeAnnouncementInfo >
}
2020-06-11 15:32:23 -04:00
impl fmt ::Display for NodeInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> Result < ( ) , fmt ::Error > {
2020-05-04 13:53:44 -04:00
write! ( f , " lowest_inbound_channel_fees: {:?}, channels: {:?}, announcement_info: {:?} " ,
self . lowest_inbound_channel_fees , & self . channels [ .. ] , self . announcement_info ) ? ;
Ok ( ( ) )
}
}
2021-06-22 16:50:18 -04:00
impl_writeable_tlv_based! ( NodeInfo , {
( 0 , lowest_inbound_channel_fees , option ) ,
( 2 , announcement_info , option ) ,
( 4 , channels , vec_type ) ,
2021-05-25 23:13:11 +00:00
} ) ;
2020-05-04 13:53:44 -04:00
2021-05-07 22:36:45 +00:00
const SERIALIZATION_VERSION : u8 = 1 ;
const MIN_SERIALIZATION_VERSION : u8 = 1 ;
2020-05-02 15:05:04 -04:00
impl Writeable for NetworkGraph {
2021-08-01 18:22:06 +02:00
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , io ::Error > {
2021-05-07 22:36:45 +00:00
write_ver_prefix! ( writer , SERIALIZATION_VERSION , MIN_SERIALIZATION_VERSION ) ;
2020-11-29 15:20:35 -05:00
self . genesis_hash . write ( writer ) ? ;
2021-08-09 22:24:41 -05:00
let channels = self . channels . read ( ) . unwrap ( ) ;
( channels . len ( ) as u64 ) . write ( writer ) ? ;
for ( ref chan_id , ref chan_info ) in channels . iter ( ) {
2020-05-02 15:05:04 -04:00
( * chan_id ) . write ( writer ) ? ;
chan_info . write ( writer ) ? ;
}
2021-08-09 22:24:41 -05:00
let nodes = self . nodes . read ( ) . unwrap ( ) ;
( nodes . len ( ) as u64 ) . write ( writer ) ? ;
for ( ref node_id , ref node_info ) in nodes . iter ( ) {
2020-05-02 15:05:04 -04:00
node_id . write ( writer ) ? ;
node_info . write ( writer ) ? ;
}
2021-05-07 22:36:45 +00:00
2021-06-22 16:50:18 -04:00
write_tlv_fields! ( writer , { } ) ;
2020-05-02 15:05:04 -04:00
Ok ( ( ) )
}
}
impl Readable for NetworkGraph {
2021-08-01 18:22:06 +02:00
fn read < R : io ::Read > ( reader : & mut R ) -> Result < NetworkGraph , DecodeError > {
2021-05-07 22:36:45 +00:00
let _ver = read_ver_prefix! ( reader , SERIALIZATION_VERSION ) ;
2020-11-29 15:20:35 -05:00
let genesis_hash : BlockHash = Readable ::read ( reader ) ? ;
2020-05-02 15:05:04 -04:00
let channels_count : u64 = Readable ::read ( reader ) ? ;
let mut channels = BTreeMap ::new ( ) ;
for _ in 0 .. channels_count {
let chan_id : u64 = Readable ::read ( reader ) ? ;
let chan_info = Readable ::read ( reader ) ? ;
channels . insert ( chan_id , chan_info ) ;
}
let nodes_count : u64 = Readable ::read ( reader ) ? ;
let mut nodes = BTreeMap ::new ( ) ;
for _ in 0 .. nodes_count {
let node_id = Readable ::read ( reader ) ? ;
let node_info = Readable ::read ( reader ) ? ;
nodes . insert ( node_id , node_info ) ;
}
2021-06-22 16:50:18 -04:00
read_tlv_fields! ( reader , { } ) ;
2021-05-07 22:36:45 +00:00
2020-05-02 15:05:04 -04:00
Ok ( NetworkGraph {
2020-11-29 15:20:35 -05:00
genesis_hash ,
2021-08-09 22:24:41 -05:00
channels : RwLock ::new ( channels ) ,
nodes : RwLock ::new ( nodes ) ,
2020-05-02 15:05:04 -04:00
} )
}
}
2020-06-11 15:32:23 -04:00
impl fmt ::Display for NetworkGraph {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> Result < ( ) , fmt ::Error > {
2020-10-08 15:46:43 -07:00
writeln! ( f , " Network map \n [Channels] " ) ? ;
2021-08-09 22:24:41 -05:00
for ( key , val ) in self . channels . read ( ) . unwrap ( ) . iter ( ) {
2020-10-08 15:46:43 -07:00
writeln! ( f , " {}: {} " , key , val ) ? ;
2020-05-02 15:05:04 -04:00
}
2020-10-08 15:46:43 -07:00
writeln! ( f , " [Nodes] " ) ? ;
2021-08-09 22:24:41 -05:00
for ( key , val ) in self . nodes . read ( ) . unwrap ( ) . iter ( ) {
2020-10-08 15:46:43 -07:00
writeln! ( f , " {}: {} " , log_pubkey! ( key ) , val ) ? ;
2020-05-02 15:05:04 -04:00
}
Ok ( ( ) )
}
}
2021-08-09 22:24:41 -05:00
impl PartialEq for NetworkGraph {
fn eq ( & self , other : & Self ) -> bool {
self . genesis_hash = = other . genesis_hash & &
* self . channels . read ( ) . unwrap ( ) = = * other . channels . read ( ) . unwrap ( ) & &
* self . nodes . read ( ) . unwrap ( ) = = * other . nodes . read ( ) . unwrap ( )
}
}
2020-05-02 15:05:04 -04:00
impl NetworkGraph {
2020-06-11 15:33:20 -04:00
/// Creates a new, empty, network graph.
2020-11-29 15:20:35 -05:00
pub fn new ( genesis_hash : BlockHash ) -> NetworkGraph {
2020-06-11 15:33:20 -04:00
Self {
2020-11-29 15:20:35 -05:00
genesis_hash ,
2021-08-09 22:24:41 -05:00
channels : RwLock ::new ( BTreeMap ::new ( ) ) ,
nodes : RwLock ::new ( BTreeMap ::new ( ) ) ,
2020-06-11 15:33:20 -04:00
}
}
2021-08-16 18:40:19 -05:00
/// Returns a read-only view of the network graph.
pub fn read_only ( & '_ self ) -> ReadOnlyNetworkGraph < '_ > {
let channels = self . channels . read ( ) . unwrap ( ) ;
let nodes = self . nodes . read ( ) . unwrap ( ) ;
ReadOnlyNetworkGraph {
channels ,
nodes ,
}
}
2020-11-24 13:38:39 -05:00
/// For an already known node (from channel announcements), update its stored properties from a
/// given node announcement.
2020-11-12 17:16:14 -05:00
///
/// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's
/// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
2020-11-24 13:38:39 -05:00
/// routing messages from a source using a protocol other than the lightning P2P protocol.
2021-08-10 09:47:27 -05:00
pub fn update_node_from_announcement < T : secp256k1 ::Verification > ( & self , msg : & msgs ::NodeAnnouncement , secp_ctx : & Secp256k1 < T > ) -> Result < ( ) , LightningError > {
2020-11-24 13:38:39 -05:00
let msg_hash = hash_to_message! ( & Sha256dHash ::hash ( & msg . contents . encode ( ) [ .. ] ) [ .. ] ) ;
secp_verify_sig! ( secp_ctx , & msg_hash , & msg . signature , & msg . contents . node_id ) ;
self . update_node_from_announcement_intern ( & msg . contents , Some ( & msg ) )
}
/// For an already known node (from channel announcements), update its stored properties from a
/// given node announcement without verifying the associated signatures. Because we aren't
/// given the associated signatures here we cannot relay the node announcement to any of our
/// peers.
2021-08-10 09:47:27 -05:00
pub fn update_node_from_unsigned_announcement ( & self , msg : & msgs ::UnsignedNodeAnnouncement ) -> Result < ( ) , LightningError > {
2020-11-24 13:38:39 -05:00
self . update_node_from_announcement_intern ( msg , None )
}
2020-05-02 15:05:04 -04:00
2021-08-10 09:47:27 -05:00
fn update_node_from_announcement_intern ( & self , msg : & msgs ::UnsignedNodeAnnouncement , full_msg : Option < & msgs ::NodeAnnouncement > ) -> Result < ( ) , LightningError > {
2021-08-09 22:24:41 -05:00
match self . nodes . write ( ) . unwrap ( ) . get_mut ( & msg . node_id ) {
2020-07-13 13:16:32 +09:00
None = > Err ( LightningError { err : " No existing channels for node_announcement " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ,
2020-05-02 15:05:04 -04:00
Some ( node ) = > {
2020-05-04 13:53:44 -04:00
if let Some ( node_info ) = node . announcement_info . as_ref ( ) {
2020-11-24 13:38:39 -05:00
if node_info . last_update > = msg . timestamp {
2021-06-22 01:33:44 +00:00
return Err ( LightningError { err : " Update older than last processed update " . to_owned ( ) , action : ErrorAction ::IgnoreAndLog ( Level ::Trace ) } ) ;
2020-05-04 13:53:44 -04:00
}
2020-05-02 15:05:04 -04:00
}
2021-02-15 15:25:37 -05:00
let should_relay =
msg . excess_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY & &
msg . excess_address_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY & &
msg . excess_data . len ( ) + msg . excess_address_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY ;
2020-05-04 13:53:44 -04:00
node . announcement_info = Some ( NodeAnnouncementInfo {
2020-11-24 13:38:39 -05:00
features : msg . features . clone ( ) ,
last_update : msg . timestamp ,
rgb : msg . rgb ,
alias : msg . alias ,
addresses : msg . addresses . clone ( ) ,
announcement_message : if should_relay { full_msg . cloned ( ) } else { None } ,
2020-05-04 13:53:44 -04:00
} ) ;
2020-11-24 13:38:39 -05:00
Ok ( ( ) )
2020-05-02 15:05:04 -04:00
}
}
}
2020-11-12 17:16:14 -05:00
/// Store or update channel info from a channel announcement.
///
/// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's
/// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
2020-11-24 13:38:39 -05:00
/// routing messages from a source using a protocol other than the lightning P2P protocol.
2020-11-12 17:16:14 -05:00
///
2020-11-24 12:40:11 -05:00
/// If a `chain::Access` object is provided via `chain_access`, it will be called to verify
/// the corresponding UTXO exists on chain and is correctly-formatted.
2021-08-10 09:47:27 -05:00
pub fn update_channel_from_announcement < T : secp256k1 ::Verification , C : Deref > (
& self , msg : & msgs ::ChannelAnnouncement , chain_access : & Option < C > , secp_ctx : & Secp256k1 < T >
) -> Result < ( ) , LightningError >
where
C ::Target : chain ::Access ,
{
2020-11-24 13:38:39 -05:00
let msg_hash = hash_to_message! ( & Sha256dHash ::hash ( & msg . contents . encode ( ) [ .. ] ) [ .. ] ) ;
secp_verify_sig! ( secp_ctx , & msg_hash , & msg . node_signature_1 , & msg . contents . node_id_1 ) ;
secp_verify_sig! ( secp_ctx , & msg_hash , & msg . node_signature_2 , & msg . contents . node_id_2 ) ;
secp_verify_sig! ( secp_ctx , & msg_hash , & msg . bitcoin_signature_1 , & msg . contents . bitcoin_key_1 ) ;
secp_verify_sig! ( secp_ctx , & msg_hash , & msg . bitcoin_signature_2 , & msg . contents . bitcoin_key_2 ) ;
self . update_channel_from_unsigned_announcement_intern ( & msg . contents , Some ( msg ) , chain_access )
}
/// Store or update channel info from a channel announcement without verifying the associated
/// signatures. Because we aren't given the associated signatures here we cannot relay the
/// channel announcement to any of our peers.
///
/// If a `chain::Access` object is provided via `chain_access`, it will be called to verify
/// the corresponding UTXO exists on chain and is correctly-formatted.
2021-08-10 09:47:27 -05:00
pub fn update_channel_from_unsigned_announcement < C : Deref > (
& self , msg : & msgs ::UnsignedChannelAnnouncement , chain_access : & Option < C >
) -> Result < ( ) , LightningError >
where
C ::Target : chain ::Access ,
{
2020-11-24 13:38:39 -05:00
self . update_channel_from_unsigned_announcement_intern ( msg , None , chain_access )
}
2020-11-12 17:16:14 -05:00
2021-08-10 09:47:27 -05:00
fn update_channel_from_unsigned_announcement_intern < C : Deref > (
& self , msg : & msgs ::UnsignedChannelAnnouncement , full_msg : Option < & msgs ::ChannelAnnouncement > , chain_access : & Option < C >
) -> Result < ( ) , LightningError >
where
C ::Target : chain ::Access ,
{
2020-11-24 13:38:39 -05:00
if msg . node_id_1 = = msg . node_id_2 | | msg . bitcoin_key_1 = = msg . bitcoin_key_2 {
return Err ( LightningError { err : " Channel announcement node had a channel with itself " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ;
2020-05-02 15:05:04 -04:00
}
2020-11-24 12:40:11 -05:00
let utxo_value = match & chain_access {
& None = > {
// Tentatively accept, potentially exposing us to DoS attacks
None
} ,
& Some ( ref chain_access ) = > {
2020-11-24 13:38:39 -05:00
match chain_access . get_utxo ( & msg . chain_hash , msg . short_channel_id ) {
2020-11-24 12:40:11 -05:00
Ok ( TxOut { value , script_pubkey } ) = > {
let expected_script = Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHNUM_2 )
2020-11-24 13:38:39 -05:00
. push_slice ( & msg . bitcoin_key_1 . serialize ( ) )
. push_slice ( & msg . bitcoin_key_2 . serialize ( ) )
2020-11-24 12:40:11 -05:00
. push_opcode ( opcodes ::all ::OP_PUSHNUM_2 )
. push_opcode ( opcodes ::all ::OP_CHECKMULTISIG ) . into_script ( ) . to_v0_p2wsh ( ) ;
if script_pubkey ! = expected_script {
return Err ( LightningError { err : format ! ( " Channel announcement key ({}) didn't match on-chain script ({}) " , script_pubkey . to_hex ( ) , expected_script . to_hex ( ) ) , action : ErrorAction ::IgnoreError } ) ;
}
//TODO: Check if value is worth storing, use it to inform routing, and compare it
//to the new HTLC max field in channel_update
Some ( value )
} ,
Err ( chain ::AccessError ::UnknownChain ) = > {
2020-11-24 13:38:39 -05:00
return Err ( LightningError { err : format ! ( " Channel announced on an unknown chain ({}) " , msg . chain_hash . encode ( ) . to_hex ( ) ) , action : ErrorAction ::IgnoreError } ) ;
2020-11-24 12:40:11 -05:00
} ,
Err ( chain ::AccessError ::UnknownTx ) = > {
return Err ( LightningError { err : " Channel announced without corresponding UTXO entry " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ;
} ,
}
} ,
} ;
2020-05-02 15:05:04 -04:00
let chan_info = ChannelInfo {
2020-11-24 13:38:39 -05:00
features : msg . features . clone ( ) ,
node_one : msg . node_id_1 . clone ( ) ,
2020-05-03 16:06:59 -04:00
one_to_two : None ,
2020-11-24 13:38:39 -05:00
node_two : msg . node_id_2 . clone ( ) ,
2020-05-03 16:06:59 -04:00
two_to_one : None ,
2020-06-28 15:18:33 +03:00
capacity_sats : utxo_value ,
2021-02-15 15:25:37 -05:00
announcement_message : if msg . excess_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY
{ full_msg . cloned ( ) } else { None } ,
2020-05-02 15:05:04 -04:00
} ;
2021-08-09 22:24:41 -05:00
let mut channels = self . channels . write ( ) . unwrap ( ) ;
let mut nodes = self . nodes . write ( ) . unwrap ( ) ;
match channels . entry ( msg . short_channel_id ) {
2020-05-02 15:05:04 -04:00
BtreeEntry ::Occupied ( mut entry ) = > {
//TODO: because asking the blockchain if short_channel_id is valid is only optional
//in the blockchain API, we need to handle it smartly here, though it's unclear
//exactly how...
2020-06-28 15:18:33 +03:00
if utxo_value . is_some ( ) {
2020-05-02 15:05:04 -04:00
// Either our UTXO provider is busted, there was a reorg, or the UTXO provider
// only sometimes returns results. In any case remove the previous entry. Note
// that the spec expects us to "blacklist" the node_ids involved, but we can't
// do that because
// a) we don't *require* a UTXO provider that always returns results.
// b) we don't track UTXOs of channels we know about and remove them if they
// get reorg'd out.
// c) it's unclear how to do so without exposing ourselves to massive DoS risk.
2021-08-09 22:24:41 -05:00
Self ::remove_channel_in_nodes ( & mut nodes , & entry . get ( ) , msg . short_channel_id ) ;
2020-05-02 15:05:04 -04:00
* entry . get_mut ( ) = chan_info ;
} else {
2021-06-22 01:33:44 +00:00
return Err ( LightningError { err : " Already have knowledge of channel " . to_owned ( ) , action : ErrorAction ::IgnoreAndLog ( Level ::Trace ) } )
2020-05-02 15:05:04 -04:00
}
} ,
BtreeEntry ::Vacant ( entry ) = > {
entry . insert ( chan_info ) ;
}
} ;
macro_rules ! add_channel_to_node {
( $node_id : expr ) = > {
2021-08-09 22:24:41 -05:00
match nodes . entry ( $node_id ) {
2020-05-02 15:05:04 -04:00
BtreeEntry ::Occupied ( node_entry ) = > {
2020-11-24 13:38:39 -05:00
node_entry . into_mut ( ) . channels . push ( msg . short_channel_id ) ;
2020-05-02 15:05:04 -04:00
} ,
BtreeEntry ::Vacant ( node_entry ) = > {
node_entry . insert ( NodeInfo {
2020-11-24 13:38:39 -05:00
channels : vec ! ( msg . short_channel_id ) ,
2020-05-02 15:05:04 -04:00
lowest_inbound_channel_fees : None ,
2020-05-04 13:53:44 -04:00
announcement_info : None ,
2020-05-02 15:05:04 -04:00
} ) ;
}
}
} ;
}
2020-11-24 13:38:39 -05:00
add_channel_to_node! ( msg . node_id_1 ) ;
add_channel_to_node! ( msg . node_id_2 ) ;
2020-05-02 15:05:04 -04:00
2020-11-24 13:38:39 -05:00
Ok ( ( ) )
2020-05-02 15:05:04 -04:00
}
2020-05-03 11:01:55 -04:00
/// Close a channel if a corresponding HTLC fail was sent.
/// If permanent, removes a channel from the local storage.
/// May cause the removal of nodes too, if this was their last channel.
/// If not permanent, makes channels unavailable for routing.
2021-08-10 09:47:27 -05:00
pub fn close_channel_from_update ( & self , short_channel_id : u64 , is_permanent : bool ) {
2021-08-09 22:24:41 -05:00
let mut channels = self . channels . write ( ) . unwrap ( ) ;
2020-06-11 15:33:20 -04:00
if is_permanent {
2021-08-09 22:24:41 -05:00
if let Some ( chan ) = channels . remove ( & short_channel_id ) {
let mut nodes = self . nodes . write ( ) . unwrap ( ) ;
Self ::remove_channel_in_nodes ( & mut nodes , & chan , short_channel_id ) ;
2020-05-02 15:05:04 -04:00
}
} else {
2021-08-09 22:24:41 -05:00
if let Some ( chan ) = channels . get_mut ( & short_channel_id ) {
2020-05-03 16:06:59 -04:00
if let Some ( one_to_two ) = chan . one_to_two . as_mut ( ) {
one_to_two . enabled = false ;
}
if let Some ( two_to_one ) = chan . two_to_one . as_mut ( ) {
two_to_one . enabled = false ;
}
2020-05-02 15:05:04 -04:00
}
}
}
2021-08-12 15:30:53 -05:00
/// Marks a node in the graph as failed.
pub fn fail_node ( & self , _node_id : & PublicKey , is_permanent : bool ) {
2020-06-11 15:33:20 -04:00
if is_permanent {
2020-05-02 15:05:04 -04:00
// TODO: Wholly remove the node
} else {
// TODO: downgrade the node
}
}
2020-11-24 13:38:39 -05:00
/// For an already known (from announcement) channel, update info about one of the directions
/// of the channel.
2020-11-12 17:16:14 -05:00
///
/// You probably don't want to call this directly, instead relying on a NetGraphMsgHandler's
/// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
2020-11-24 13:38:39 -05:00
/// routing messages from a source using a protocol other than the lightning P2P protocol.
2021-08-10 09:47:27 -05:00
pub fn update_channel < T : secp256k1 ::Verification > ( & self , msg : & msgs ::ChannelUpdate , secp_ctx : & Secp256k1 < T > ) -> Result < ( ) , LightningError > {
2020-11-24 13:38:39 -05:00
self . update_channel_intern ( & msg . contents , Some ( & msg ) , Some ( ( & msg . signature , secp_ctx ) ) )
}
/// For an already known (from announcement) channel, update info about one of the directions
/// of the channel without verifying the associated signatures. Because we aren't given the
/// associated signatures here we cannot relay the channel update to any of our peers.
2021-08-10 09:47:27 -05:00
pub fn update_channel_unsigned ( & self , msg : & msgs ::UnsignedChannelUpdate ) -> Result < ( ) , LightningError > {
2020-11-24 13:38:39 -05:00
self . update_channel_intern ( msg , None , None ::< ( & secp256k1 ::Signature , & Secp256k1 < secp256k1 ::VerifyOnly > ) > )
}
2021-08-10 09:47:27 -05:00
fn update_channel_intern < T : secp256k1 ::Verification > ( & self , msg : & msgs ::UnsignedChannelUpdate , full_msg : Option < & msgs ::ChannelUpdate > , sig_info : Option < ( & secp256k1 ::Signature , & Secp256k1 < T > ) > ) -> Result < ( ) , LightningError > {
2020-05-02 15:05:04 -04:00
let dest_node_id ;
2020-11-24 13:38:39 -05:00
let chan_enabled = msg . flags & ( 1 < < 1 ) ! = ( 1 < < 1 ) ;
2020-05-02 15:05:04 -04:00
let chan_was_enabled ;
2021-08-09 22:24:41 -05:00
let mut channels = self . channels . write ( ) . unwrap ( ) ;
match channels . get_mut ( & msg . short_channel_id ) {
2020-07-13 13:16:32 +09:00
None = > return Err ( LightningError { err : " Couldn't find channel for update " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ,
2020-05-02 15:05:04 -04:00
Some ( channel ) = > {
2020-11-24 13:38:39 -05:00
if let OptionalField ::Present ( htlc_maximum_msat ) = msg . htlc_maximum_msat {
2020-06-29 21:28:15 +03:00
if htlc_maximum_msat > MAX_VALUE_MSAT {
return Err ( LightningError { err : " htlc_maximum_msat is larger than maximum possible msats " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ;
}
if let Some ( capacity_sats ) = channel . capacity_sats {
// It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None).
// Don't query UTXO set here to reduce DoS risks.
2020-11-12 18:59:06 -05:00
if capacity_sats > MAX_VALUE_MSAT / 1000 | | htlc_maximum_msat > capacity_sats * 1000 {
return Err ( LightningError { err : " htlc_maximum_msat is larger than channel capacity or capacity is bogus " . to_owned ( ) , action : ErrorAction ::IgnoreError } ) ;
2020-06-29 21:28:15 +03:00
}
}
}
2020-05-02 15:05:04 -04:00
macro_rules ! maybe_update_channel_info {
2020-05-03 16:06:59 -04:00
( $target : expr , $src_node : expr ) = > {
if let Some ( existing_chan_info ) = $target . as_ref ( ) {
2020-11-24 13:38:39 -05:00
if existing_chan_info . last_update > = msg . timestamp {
2021-06-22 01:33:44 +00:00
return Err ( LightningError { err : " Update older than last processed update " . to_owned ( ) , action : ErrorAction ::IgnoreAndLog ( Level ::Trace ) } ) ;
2020-05-03 16:06:59 -04:00
}
chan_was_enabled = existing_chan_info . enabled ;
} else {
chan_was_enabled = false ;
2020-05-02 15:05:04 -04:00
}
2020-05-03 16:06:59 -04:00
2021-02-15 15:25:37 -05:00
let last_update_message = if msg . excess_data . len ( ) < = MAX_EXCESS_BYTES_FOR_RELAY
{ full_msg . cloned ( ) } else { None } ;
2020-05-03 16:06:59 -04:00
let updated_channel_dir_info = DirectionalChannelInfo {
enabled : chan_enabled ,
2020-11-24 13:38:39 -05:00
last_update : msg . timestamp ,
cltv_expiry_delta : msg . cltv_expiry_delta ,
htlc_minimum_msat : msg . htlc_minimum_msat ,
htlc_maximum_msat : if let OptionalField ::Present ( max_value ) = msg . htlc_maximum_msat { Some ( max_value ) } else { None } ,
2020-05-03 16:06:59 -04:00
fees : RoutingFees {
2020-11-24 13:38:39 -05:00
base_msat : msg . fee_base_msat ,
proportional_millionths : msg . fee_proportional_millionths ,
2020-05-03 16:06:59 -04:00
} ,
last_update_message
} ;
$target = Some ( updated_channel_dir_info ) ;
2020-05-02 15:05:04 -04:00
}
}
2020-05-03 16:06:59 -04:00
2020-11-24 13:38:39 -05:00
let msg_hash = hash_to_message! ( & Sha256dHash ::hash ( & msg . encode ( ) [ .. ] ) [ .. ] ) ;
if msg . flags & 1 = = 1 {
2020-05-03 16:06:59 -04:00
dest_node_id = channel . node_one . clone ( ) ;
2020-11-24 13:38:39 -05:00
if let Some ( ( sig , ctx ) ) = sig_info {
secp_verify_sig! ( ctx , & msg_hash , & sig , & channel . node_two ) ;
2020-05-02 15:05:04 -04:00
}
2020-05-03 16:06:59 -04:00
maybe_update_channel_info! ( channel . two_to_one , channel . node_two ) ;
2020-05-02 15:05:04 -04:00
} else {
2020-05-03 16:06:59 -04:00
dest_node_id = channel . node_two . clone ( ) ;
2020-11-24 13:38:39 -05:00
if let Some ( ( sig , ctx ) ) = sig_info {
secp_verify_sig! ( ctx , & msg_hash , & sig , & channel . node_one ) ;
2020-05-02 15:05:04 -04:00
}
2020-05-03 16:06:59 -04:00
maybe_update_channel_info! ( channel . one_to_two , channel . node_one ) ;
2020-05-02 15:05:04 -04:00
}
}
}
2021-08-09 22:24:41 -05:00
let mut nodes = self . nodes . write ( ) . unwrap ( ) ;
2020-05-02 15:05:04 -04:00
if chan_enabled {
2021-08-09 22:24:41 -05:00
let node = nodes . get_mut ( & dest_node_id ) . unwrap ( ) ;
2020-11-24 13:38:39 -05:00
let mut base_msat = msg . fee_base_msat ;
let mut proportional_millionths = msg . fee_proportional_millionths ;
2020-05-02 15:05:04 -04:00
if let Some ( fees ) = node . lowest_inbound_channel_fees {
base_msat = cmp ::min ( base_msat , fees . base_msat ) ;
proportional_millionths = cmp ::min ( proportional_millionths , fees . proportional_millionths ) ;
}
node . lowest_inbound_channel_fees = Some ( RoutingFees {
base_msat ,
proportional_millionths
} ) ;
} else if chan_was_enabled {
2021-08-09 22:24:41 -05:00
let node = nodes . get_mut ( & dest_node_id ) . unwrap ( ) ;
2020-05-11 21:09:44 -04:00
let mut lowest_inbound_channel_fees = None ;
for chan_id in node . channels . iter ( ) {
2021-08-09 22:24:41 -05:00
let chan = channels . get ( chan_id ) . unwrap ( ) ;
2020-05-11 21:09:44 -04:00
let chan_info_opt ;
if chan . node_one = = dest_node_id {
chan_info_opt = chan . two_to_one . as_ref ( ) ;
} else {
chan_info_opt = chan . one_to_two . as_ref ( ) ;
}
if let Some ( chan_info ) = chan_info_opt {
if chan_info . enabled {
let fees = lowest_inbound_channel_fees . get_or_insert ( RoutingFees {
base_msat : u32 ::max_value ( ) , proportional_millionths : u32 ::max_value ( ) } ) ;
fees . base_msat = cmp ::min ( fees . base_msat , chan_info . fees . base_msat ) ;
fees . proportional_millionths = cmp ::min ( fees . proportional_millionths , chan_info . fees . proportional_millionths ) ;
2020-05-02 15:05:04 -04:00
}
}
}
2020-05-11 21:09:44 -04:00
node . lowest_inbound_channel_fees = lowest_inbound_channel_fees ;
2020-05-02 15:05:04 -04:00
}
2020-11-24 13:38:39 -05:00
Ok ( ( ) )
2020-05-02 15:05:04 -04:00
}
fn remove_channel_in_nodes ( nodes : & mut BTreeMap < PublicKey , NodeInfo > , chan : & ChannelInfo , short_channel_id : u64 ) {
macro_rules ! remove_from_node {
( $node_id : expr ) = > {
if let BtreeEntry ::Occupied ( mut entry ) = nodes . entry ( $node_id ) {
entry . get_mut ( ) . channels . retain ( | chan_id | {
short_channel_id ! = * chan_id
} ) ;
if entry . get ( ) . channels . is_empty ( ) {
entry . remove_entry ( ) ;
}
} else {
panic! ( " Had channel that pointed to unknown node (ie inconsistent network map)! " ) ;
}
}
}
2020-05-03 16:06:59 -04:00
remove_from_node! ( chan . node_one ) ;
remove_from_node! ( chan . node_two ) ;
2020-05-02 15:05:04 -04:00
}
}
2021-08-16 18:40:19 -05:00
impl ReadOnlyNetworkGraph < '_ > {
/// Returns all known valid channels' short ids along with announced channel info.
///
/// (C-not exported) because we have no mapping for `BTreeMap`s
pub fn channels ( & self ) -> & BTreeMap < u64 , ChannelInfo > {
& * self . channels
}
/// Returns all known nodes' public keys along with announced node info.
///
/// (C-not exported) because we have no mapping for `BTreeMap`s
pub fn nodes ( & self ) -> & BTreeMap < PublicKey , NodeInfo > {
& * self . nodes
}
/// Get network addresses by node id.
/// Returns None if the requested node is completely unknown,
/// or if node announcement for the node was never received.
///
/// (C-not exported) as there is no practical way to track lifetimes of returned values.
pub fn get_addresses ( & self , pubkey : & PublicKey ) -> Option < & Vec < NetAddress > > {
if let Some ( node ) = self . nodes . get ( pubkey ) {
if let Some ( node_info ) = node . announcement_info . as_ref ( ) {
return Some ( & node_info . addresses )
}
}
None
}
}
2020-05-02 15:05:04 -04:00
#[ cfg(test) ]
mod tests {
2020-07-17 22:08:34 -07:00
use chain ;
2021-08-12 16:02:42 -05:00
use ln ::PaymentHash ;
2020-12-03 12:00:36 -05:00
use ln ::features ::{ ChannelFeatures , InitFeatures , NodeFeatures } ;
2021-08-12 16:02:42 -05:00
use routing ::network_graph ::{ NetGraphMsgHandler , NetworkGraph , NetworkUpdate , MAX_EXCESS_BYTES_FOR_RELAY } ;
2020-12-03 12:00:36 -05:00
use ln ::msgs ::{ Init , OptionalField , RoutingMessageHandler , UnsignedNodeAnnouncement , NodeAnnouncement ,
2021-08-12 15:30:53 -05:00
UnsignedChannelAnnouncement , ChannelAnnouncement , UnsignedChannelUpdate , ChannelUpdate ,
2020-10-22 12:44:53 -04:00
ReplyChannelRange , ReplyShortChannelIdsEnd , QueryChannelRange , QueryShortChannelIds , MAX_VALUE_MSAT } ;
2020-05-02 15:05:04 -04:00
use util ::test_utils ;
use util ::logger ::Logger ;
use util ::ser ::{ Readable , Writeable } ;
2021-08-12 16:02:42 -05:00
use util ::events ::{ Event , EventHandler , MessageSendEvent , MessageSendEventsProvider } ;
2021-03-13 14:51:36 -05:00
use util ::scid_utils ::scid_from_parts ;
2020-05-02 15:05:04 -04:00
use bitcoin ::hashes ::sha256d ::Hash as Sha256dHash ;
use bitcoin ::hashes ::Hash ;
use bitcoin ::network ::constants ::Network ;
use bitcoin ::blockdata ::constants ::genesis_block ;
use bitcoin ::blockdata ::script ::Builder ;
2020-07-17 22:08:34 -07:00
use bitcoin ::blockdata ::transaction ::TxOut ;
2020-05-02 15:05:04 -04:00
use bitcoin ::blockdata ::opcodes ;
use hex ;
use bitcoin ::secp256k1 ::key ::{ PublicKey , SecretKey } ;
use bitcoin ::secp256k1 ::{ All , Secp256k1 } ;
2021-08-01 18:22:06 +02:00
use io ;
2021-05-19 04:21:39 +00:00
use prelude ::* ;
2021-07-19 15:01:58 +02:00
use sync ::Arc ;
2020-05-02 15:05:04 -04:00
2020-07-17 22:08:34 -07:00
fn create_net_graph_msg_handler ( ) -> ( Secp256k1 < All > , NetGraphMsgHandler < Arc < test_utils ::TestChainSource > , Arc < test_utils ::TestLogger > > ) {
2020-05-02 15:05:04 -04:00
let secp_ctx = Secp256k1 ::new ( ) ;
2020-03-02 12:55:53 -05:00
let logger = Arc ::new ( test_utils ::TestLogger ::new ( ) ) ;
2020-11-29 15:20:35 -05:00
let genesis_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2021-08-12 16:02:42 -05:00
let network_graph = NetworkGraph ::new ( genesis_hash ) ;
let net_graph_msg_handler = NetGraphMsgHandler ::new ( network_graph , None , Arc ::clone ( & logger ) ) ;
2020-05-02 15:05:04 -04:00
( secp_ctx , net_graph_msg_handler )
}
#[ test ]
fn request_full_sync_finite_times ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & hex ::decode ( " 0202020202020202020202020202020202020202020202020202020202020202 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ) ;
assert! ( net_graph_msg_handler . should_request_full_sync ( & node_id ) ) ;
assert! ( net_graph_msg_handler . should_request_full_sync ( & node_id ) ) ;
assert! ( net_graph_msg_handler . should_request_full_sync ( & node_id ) ) ;
assert! ( net_graph_msg_handler . should_request_full_sync ( & node_id ) ) ;
assert! ( net_graph_msg_handler . should_request_full_sync ( & node_id ) ) ;
assert! ( ! net_graph_msg_handler . should_request_full_sync ( & node_id ) ) ;
}
#[ test ]
fn handling_node_announcements ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let zero_hash = Sha256dHash ::hash ( & [ 0 ; 32 ] ) ;
let first_announcement_time = 500 ;
let mut unsigned_announcement = UnsignedNodeAnnouncement {
features : NodeFeatures ::known ( ) ,
timestamp : first_announcement_time ,
node_id : node_id_1 ,
rgb : [ 0 ; 3 ] ,
alias : [ 0 ; 32 ] ,
addresses : Vec ::new ( ) ,
excess_address_data : Vec ::new ( ) ,
excess_data : Vec ::new ( ) ,
} ;
let mut msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
match net_graph_msg_handler . handle_node_announcement ( & valid_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( " No existing channels for node_announcement " , e . err )
} ;
{
// Announce a channel to add a corresponding node.
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::known ( ) ,
2020-08-25 17:12:00 -04:00
chain_hash : genesis_block ( Network ::Testnet ) . header . block_hash ( ) ,
2020-05-02 15:05:04 -04:00
short_channel_id : 0 ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( res ) ,
_ = > panic! ( )
} ;
}
match net_graph_msg_handler . handle_node_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( res ) ,
Err ( _ ) = > panic! ( )
} ;
let fake_msghash = hash_to_message! ( & zero_hash ) ;
match net_graph_msg_handler . handle_node_announcement (
& NodeAnnouncement {
signature : secp_ctx . sign ( & fake_msghash , node_1_privkey ) ,
contents : unsigned_announcement . clone ( )
} ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Invalid signature from remote node " )
} ;
unsigned_announcement . timestamp + = 1000 ;
2021-02-15 15:25:37 -05:00
unsigned_announcement . excess_data . resize ( MAX_EXCESS_BYTES_FOR_RELAY + 1 , 0 ) ;
2020-05-02 15:05:04 -04:00
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let announcement_with_data = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
// Return false because contains excess data.
match net_graph_msg_handler . handle_node_announcement ( & announcement_with_data ) {
Ok ( res ) = > assert! ( ! res ) ,
Err ( _ ) = > panic! ( )
} ;
unsigned_announcement . excess_data = Vec ::new ( ) ;
// Even though previous announcement was not relayed further, we still accepted it,
// so we now won't accept announcements before the previous one.
unsigned_announcement . timestamp - = 10 ;
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let outdated_announcement = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
match net_graph_msg_handler . handle_node_announcement ( & outdated_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Update older than last processed update " )
} ;
}
#[ test ]
fn handling_channel_announcements ( ) {
let secp_ctx = Secp256k1 ::new ( ) ;
let logger : Arc < Logger > = Arc ::new ( test_utils ::TestLogger ::new ( ) ) ;
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let good_script = Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHNUM_2 )
. push_slice ( & PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) . serialize ( ) )
. push_slice ( & PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) . serialize ( ) )
. push_opcode ( opcodes ::all ::OP_PUSHNUM_2 )
. push_opcode ( opcodes ::all ::OP_CHECKMULTISIG ) . into_script ( ) . to_v0_p2wsh ( ) ;
let mut unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::known ( ) ,
2020-08-25 17:12:00 -04:00
chain_hash : genesis_block ( Network ::Testnet ) . header . block_hash ( ) ,
2020-05-02 15:05:04 -04:00
short_channel_id : 0 ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let mut msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
// Test if the UTXO lookups were not supported
2021-08-12 16:02:42 -05:00
let network_graph = NetworkGraph ::new ( genesis_block ( Network ::Testnet ) . header . block_hash ( ) ) ;
let mut net_graph_msg_handler = NetGraphMsgHandler ::new ( network_graph , None , Arc ::clone ( & logger ) ) ;
2020-05-02 15:05:04 -04:00
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( res ) ,
_ = > panic! ( )
} ;
{
2021-08-10 09:47:27 -05:00
let network = & net_graph_msg_handler . network_graph ;
2021-08-16 18:40:19 -05:00
match network . read_only ( ) . channels ( ) . get ( & unsigned_announcement . short_channel_id ) {
2020-05-02 15:05:04 -04:00
None = > panic! ( ) ,
Some ( _ ) = > ( )
2021-08-09 22:24:41 -05:00
} ;
2020-05-02 15:05:04 -04:00
}
// If we receive announcement for the same channel (with UTXO lookups disabled),
// drop new one on the floor, since we can't see any changes.
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Already have knowledge of channel " )
} ;
// Test if an associated transaction were not on-chain (or not confirmed).
2020-07-17 22:08:34 -07:00
let chain_source = Arc ::new ( test_utils ::TestChainSource ::new ( Network ::Testnet ) ) ;
* chain_source . utxo_ret . lock ( ) . unwrap ( ) = Err ( chain ::AccessError ::UnknownTx ) ;
2021-08-12 16:02:42 -05:00
let network_graph = NetworkGraph ::new ( genesis_block ( Network ::Testnet ) . header . block_hash ( ) ) ;
net_graph_msg_handler = NetGraphMsgHandler ::new ( network_graph , Some ( chain_source . clone ( ) ) , Arc ::clone ( & logger ) ) ;
2020-05-02 15:05:04 -04:00
unsigned_announcement . short_channel_id + = 1 ;
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Channel announced without corresponding UTXO entry " )
} ;
// Now test if the transaction is found in the UTXO set and the script is correct.
unsigned_announcement . short_channel_id + = 1 ;
2020-07-17 22:08:34 -07:00
* chain_source . utxo_ret . lock ( ) . unwrap ( ) = Ok ( TxOut { value : 0 , script_pubkey : good_script . clone ( ) } ) ;
2020-05-02 15:05:04 -04:00
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( res ) ,
_ = > panic! ( )
} ;
{
2021-08-10 09:47:27 -05:00
let network = & net_graph_msg_handler . network_graph ;
2021-08-16 18:40:19 -05:00
match network . read_only ( ) . channels ( ) . get ( & unsigned_announcement . short_channel_id ) {
2020-05-02 15:05:04 -04:00
None = > panic! ( ) ,
Some ( _ ) = > ( )
2021-08-09 22:24:41 -05:00
} ;
2020-05-02 15:05:04 -04:00
}
// If we receive announcement for the same channel (but TX is not confirmed),
// drop new one on the floor, since we can't see any changes.
2020-07-17 22:08:34 -07:00
* chain_source . utxo_ret . lock ( ) . unwrap ( ) = Err ( chain ::AccessError ::UnknownTx ) ;
2020-05-02 15:05:04 -04:00
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Channel announced without corresponding UTXO entry " )
} ;
// But if it is confirmed, replace the channel
2020-07-17 22:08:34 -07:00
* chain_source . utxo_ret . lock ( ) . unwrap ( ) = Ok ( TxOut { value : 0 , script_pubkey : good_script } ) ;
2020-05-02 15:05:04 -04:00
unsigned_announcement . features = ChannelFeatures ::empty ( ) ;
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( res ) ,
_ = > panic! ( )
} ;
{
2021-08-10 09:47:27 -05:00
let network = & net_graph_msg_handler . network_graph ;
2021-08-16 18:40:19 -05:00
match network . read_only ( ) . channels ( ) . get ( & unsigned_announcement . short_channel_id ) {
2020-05-02 15:05:04 -04:00
Some ( channel_entry ) = > {
assert_eq! ( channel_entry . features , ChannelFeatures ::empty ( ) ) ;
} ,
_ = > panic! ( )
2021-08-09 22:24:41 -05:00
} ;
2020-05-02 15:05:04 -04:00
}
// Don't relay valid channels with excess data
unsigned_announcement . short_channel_id + = 1 ;
2021-02-15 15:25:37 -05:00
unsigned_announcement . excess_data . resize ( MAX_EXCESS_BYTES_FOR_RELAY + 1 , 0 ) ;
2020-05-02 15:05:04 -04:00
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( ! res ) ,
_ = > panic! ( )
} ;
unsigned_announcement . excess_data = Vec ::new ( ) ;
let invalid_sig_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & invalid_sig_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Invalid signature from remote node " )
} ;
unsigned_announcement . node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let channel_to_itself_announcement = ChannelAnnouncement {
2020-11-24 13:38:39 -05:00
node_signature_1 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
2020-05-02 15:05:04 -04:00
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & channel_to_itself_announcement ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Channel announcement node had a channel with itself " )
} ;
}
#[ test ]
fn handling_channel_update ( ) {
2020-06-29 21:28:15 +03:00
let secp_ctx = Secp256k1 ::new ( ) ;
let logger : Arc < Logger > = Arc ::new ( test_utils ::TestLogger ::new ( ) ) ;
2020-07-17 22:08:34 -07:00
let chain_source = Arc ::new ( test_utils ::TestChainSource ::new ( Network ::Testnet ) ) ;
2021-08-12 16:02:42 -05:00
let network_graph = NetworkGraph ::new ( genesis_block ( Network ::Testnet ) . header . block_hash ( ) ) ;
let net_graph_msg_handler = NetGraphMsgHandler ::new ( network_graph , Some ( chain_source . clone ( ) ) , Arc ::clone ( & logger ) ) ;
2020-06-29 21:28:15 +03:00
2020-05-02 15:05:04 -04:00
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let zero_hash = Sha256dHash ::hash ( & [ 0 ; 32 ] ) ;
let short_channel_id = 0 ;
2020-08-25 17:12:00 -04:00
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2020-06-29 21:28:15 +03:00
let amount_sats = 1000_000 ;
2020-05-02 15:05:04 -04:00
{
// Announce a channel we will update
2020-06-29 21:28:15 +03:00
let good_script = Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHNUM_2 )
. push_slice ( & PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) . serialize ( ) )
. push_slice ( & PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) . serialize ( ) )
. push_opcode ( opcodes ::all ::OP_PUSHNUM_2 )
. push_opcode ( opcodes ::all ::OP_CHECKMULTISIG ) . into_script ( ) . to_v0_p2wsh ( ) ;
2020-07-17 22:08:34 -07:00
* chain_source . utxo_ret . lock ( ) . unwrap ( ) = Ok ( TxOut { value : amount_sats , script_pubkey : good_script . clone ( ) } ) ;
2020-05-02 15:05:04 -04:00
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::empty ( ) ,
chain_hash ,
short_channel_id ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_channel_announcement ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
}
let mut unsigned_channel_update = UnsignedChannelUpdate {
chain_hash ,
short_channel_id ,
timestamp : 100 ,
flags : 0 ,
cltv_expiry_delta : 144 ,
htlc_minimum_msat : 1000000 ,
2020-06-28 14:43:10 +03:00
htlc_maximum_msat : OptionalField ::Absent ,
2020-05-02 15:05:04 -04:00
fee_base_msat : 10000 ,
fee_proportional_millionths : 20 ,
excess_data : Vec ::new ( )
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( res ) = > assert! ( res ) ,
_ = > panic! ( )
} ;
{
2021-08-10 09:47:27 -05:00
let network = & net_graph_msg_handler . network_graph ;
2021-08-16 18:40:19 -05:00
match network . read_only ( ) . channels ( ) . get ( & short_channel_id ) {
2020-05-02 15:05:04 -04:00
None = > panic! ( ) ,
Some ( channel_info ) = > {
2020-05-03 16:06:59 -04:00
assert_eq! ( channel_info . one_to_two . as_ref ( ) . unwrap ( ) . cltv_expiry_delta , 144 ) ;
assert! ( channel_info . two_to_one . is_none ( ) ) ;
2020-05-02 15:05:04 -04:00
}
2021-08-09 22:24:41 -05:00
} ;
2020-05-02 15:05:04 -04:00
}
unsigned_channel_update . timestamp + = 100 ;
2021-02-15 15:25:37 -05:00
unsigned_channel_update . excess_data . resize ( MAX_EXCESS_BYTES_FOR_RELAY + 1 , 0 ) ;
2020-05-02 15:05:04 -04:00
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
// Return false because contains excess data
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( res ) = > assert! ( ! res ) ,
_ = > panic! ( )
} ;
2020-06-29 20:48:01 +03:00
unsigned_channel_update . timestamp + = 10 ;
2020-05-02 15:05:04 -04:00
unsigned_channel_update . short_channel_id + = 1 ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Couldn't find channel for update " )
} ;
unsigned_channel_update . short_channel_id = short_channel_id ;
2020-06-29 21:28:15 +03:00
unsigned_channel_update . htlc_maximum_msat = OptionalField ::Present ( MAX_VALUE_MSAT + 1 ) ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " htlc_maximum_msat is larger than maximum possible msats " )
} ;
unsigned_channel_update . htlc_maximum_msat = OptionalField ::Absent ;
unsigned_channel_update . htlc_maximum_msat = OptionalField ::Present ( amount_sats * 1000 + 1 ) ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( _ ) = > panic! ( ) ,
2020-11-12 18:59:06 -05:00
Err ( e ) = > assert_eq! ( e . err , " htlc_maximum_msat is larger than channel capacity or capacity is bogus " )
2020-06-29 21:28:15 +03:00
} ;
unsigned_channel_update . htlc_maximum_msat = OptionalField ::Absent ;
2020-05-02 15:05:04 -04:00
// Even though previous update was not relayed further, we still accepted it,
// so we now won't accept update before the previous one.
unsigned_channel_update . timestamp - = 10 ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Update older than last processed update " )
} ;
unsigned_channel_update . timestamp + = 500 ;
let fake_msghash = hash_to_message! ( & zero_hash ) ;
let invalid_sig_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & fake_msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & invalid_sig_channel_update ) {
Ok ( _ ) = > panic! ( ) ,
Err ( e ) = > assert_eq! ( e . err , " Invalid signature from remote node " )
} ;
}
#[ test ]
2021-08-12 16:02:42 -05:00
fn handling_network_update ( ) {
let logger = test_utils ::TestLogger ::new ( ) ;
let chain_source = Arc ::new ( test_utils ::TestChainSource ::new ( Network ::Testnet ) ) ;
let genesis_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
let network_graph = NetworkGraph ::new ( genesis_hash ) ;
let net_graph_msg_handler = NetGraphMsgHandler ::new ( network_graph , Some ( chain_source . clone ( ) ) , & logger ) ;
let secp_ctx = Secp256k1 ::new ( ) ;
2020-05-02 15:05:04 -04:00
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let short_channel_id = 0 ;
2020-08-25 17:12:00 -04:00
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2021-08-12 16:02:42 -05:00
let network_graph = & net_graph_msg_handler . network_graph ;
2020-05-02 15:05:04 -04:00
{
// There is no nodes in the table at the beginning.
2021-08-12 16:02:42 -05:00
assert_eq! ( network_graph . read_only ( ) . nodes ( ) . len ( ) , 0 ) ;
2020-05-02 15:05:04 -04:00
}
{
// Announce a channel we will update
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::empty ( ) ,
chain_hash ,
short_channel_id ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
2021-08-12 16:02:42 -05:00
let chain_source : Option < & test_utils ::TestChainSource > = None ;
assert! ( network_graph . update_channel_from_announcement ( & valid_channel_announcement , & chain_source , & secp_ctx ) . is_ok ( ) ) ;
assert! ( network_graph . read_only ( ) . channels ( ) . get ( & short_channel_id ) . is_some ( ) ) ;
2020-05-02 15:05:04 -04:00
2020-05-03 16:06:59 -04:00
let unsigned_channel_update = UnsignedChannelUpdate {
chain_hash ,
short_channel_id ,
timestamp : 100 ,
flags : 0 ,
cltv_expiry_delta : 144 ,
htlc_minimum_msat : 1000000 ,
2020-06-28 14:43:10 +03:00
htlc_maximum_msat : OptionalField ::Absent ,
2020-05-03 16:06:59 -04:00
fee_base_msat : 10000 ,
fee_proportional_millionths : 20 ,
excess_data : Vec ::new ( )
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
2021-08-12 16:02:42 -05:00
assert! ( network_graph . read_only ( ) . channels ( ) . get ( & short_channel_id ) . unwrap ( ) . one_to_two . is_none ( ) ) ;
net_graph_msg_handler . handle_event ( & Event ::PaymentFailed {
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
rejected_by_dest : false ,
network_update : Some ( NetworkUpdate ::ChannelUpdateMessage {
msg : valid_channel_update ,
} ) ,
error_code : None ,
error_data : None ,
} ) ;
assert! ( network_graph . read_only ( ) . channels ( ) . get ( & short_channel_id ) . unwrap ( ) . one_to_two . is_some ( ) ) ;
2020-05-03 16:06:59 -04:00
}
// Non-permanent closing just disables a channel
{
2021-08-12 16:02:42 -05:00
match network_graph . read_only ( ) . channels ( ) . get ( & short_channel_id ) {
2020-05-03 16:06:59 -04:00
None = > panic! ( ) ,
Some ( channel_info ) = > {
2021-08-12 16:02:42 -05:00
assert! ( channel_info . one_to_two . as_ref ( ) . unwrap ( ) . enabled ) ;
2020-05-03 16:06:59 -04:00
}
2021-08-09 22:24:41 -05:00
} ;
2020-05-02 15:05:04 -04:00
2021-08-12 16:02:42 -05:00
net_graph_msg_handler . handle_event ( & Event ::PaymentFailed {
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
rejected_by_dest : false ,
network_update : Some ( NetworkUpdate ::ChannelClosed {
short_channel_id ,
is_permanent : false ,
} ) ,
error_code : None ,
error_data : None ,
} ) ;
2020-05-02 15:05:04 -04:00
2021-08-12 16:02:42 -05:00
match network_graph . read_only ( ) . channels ( ) . get ( & short_channel_id ) {
2020-05-02 15:05:04 -04:00
None = > panic! ( ) ,
Some ( channel_info ) = > {
2020-05-03 16:06:59 -04:00
assert! ( ! channel_info . one_to_two . as_ref ( ) . unwrap ( ) . enabled ) ;
2020-05-02 15:05:04 -04:00
}
2021-08-09 22:24:41 -05:00
} ;
2020-05-02 15:05:04 -04:00
}
// Permanent closing deletes a channel
{
2021-08-12 16:02:42 -05:00
net_graph_msg_handler . handle_event ( & Event ::PaymentFailed {
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
rejected_by_dest : false ,
network_update : Some ( NetworkUpdate ::ChannelClosed {
short_channel_id ,
is_permanent : true ,
} ) ,
error_code : None ,
error_data : None ,
} ) ;
assert_eq! ( network_graph . read_only ( ) . channels ( ) . len ( ) , 0 ) ;
2020-05-02 15:05:04 -04:00
// Nodes are also deleted because there are no associated channels anymore
2021-08-12 16:02:42 -05:00
assert_eq! ( network_graph . read_only ( ) . nodes ( ) . len ( ) , 0 ) ;
2020-05-02 15:05:04 -04:00
}
2021-08-12 15:30:53 -05:00
// TODO: Test NetworkUpdate::NodeFailure, which is not implemented yet.
2020-05-02 15:05:04 -04:00
}
#[ test ]
fn getting_next_channel_announcements ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let short_channel_id = 1 ;
2020-08-25 17:12:00 -04:00
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2020-05-02 15:05:04 -04:00
// Channels were not announced yet.
let channels_with_announcements = net_graph_msg_handler . get_next_channel_announcements ( 0 , 1 ) ;
assert_eq! ( channels_with_announcements . len ( ) , 0 ) ;
{
// Announce a channel we will update
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::empty ( ) ,
chain_hash ,
short_channel_id ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_channel_announcement ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
}
// Contains initial channel announcement now.
let channels_with_announcements = net_graph_msg_handler . get_next_channel_announcements ( short_channel_id , 1 ) ;
assert_eq! ( channels_with_announcements . len ( ) , 1 ) ;
if let Some ( channel_announcements ) = channels_with_announcements . first ( ) {
let & ( _ , ref update_1 , ref update_2 ) = channel_announcements ;
assert_eq! ( update_1 , & None ) ;
assert_eq! ( update_2 , & None ) ;
} else {
panic! ( ) ;
}
{
// Valid channel update
let unsigned_channel_update = UnsignedChannelUpdate {
chain_hash ,
short_channel_id ,
timestamp : 101 ,
flags : 0 ,
cltv_expiry_delta : 144 ,
htlc_minimum_msat : 1000000 ,
2020-06-28 14:43:10 +03:00
htlc_maximum_msat : OptionalField ::Absent ,
2020-05-02 15:05:04 -04:00
fee_base_msat : 10000 ,
fee_proportional_millionths : 20 ,
excess_data : Vec ::new ( )
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
}
// Now contains an initial announcement and an update.
let channels_with_announcements = net_graph_msg_handler . get_next_channel_announcements ( short_channel_id , 1 ) ;
assert_eq! ( channels_with_announcements . len ( ) , 1 ) ;
if let Some ( channel_announcements ) = channels_with_announcements . first ( ) {
let & ( _ , ref update_1 , ref update_2 ) = channel_announcements ;
assert_ne! ( update_1 , & None ) ;
assert_eq! ( update_2 , & None ) ;
} else {
panic! ( ) ;
}
{
// Channel update with excess data.
let unsigned_channel_update = UnsignedChannelUpdate {
chain_hash ,
short_channel_id ,
timestamp : 102 ,
flags : 0 ,
cltv_expiry_delta : 144 ,
htlc_minimum_msat : 1000000 ,
2020-06-28 14:43:10 +03:00
htlc_maximum_msat : OptionalField ::Absent ,
2020-05-02 15:05:04 -04:00
fee_base_msat : 10000 ,
fee_proportional_millionths : 20 ,
2021-02-15 15:25:37 -05:00
excess_data : [ 1 ; MAX_EXCESS_BYTES_FOR_RELAY + 1 ] . to_vec ( )
2020-05-02 15:05:04 -04:00
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_channel_update . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_update = ChannelUpdate {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_channel_update . clone ( )
} ;
match net_graph_msg_handler . handle_channel_update ( & valid_channel_update ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
}
// Test that announcements with excess data won't be returned
let channels_with_announcements = net_graph_msg_handler . get_next_channel_announcements ( short_channel_id , 1 ) ;
assert_eq! ( channels_with_announcements . len ( ) , 1 ) ;
if let Some ( channel_announcements ) = channels_with_announcements . first ( ) {
let & ( _ , ref update_1 , ref update_2 ) = channel_announcements ;
assert_eq! ( update_1 , & None ) ;
assert_eq! ( update_2 , & None ) ;
} else {
panic! ( ) ;
}
// Further starting point have no channels after it
let channels_with_announcements = net_graph_msg_handler . get_next_channel_announcements ( short_channel_id + 1000 , 1 ) ;
assert_eq! ( channels_with_announcements . len ( ) , 0 ) ;
}
#[ test ]
fn getting_next_node_announcements ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let short_channel_id = 1 ;
2020-08-25 17:12:00 -04:00
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2020-05-02 15:05:04 -04:00
// No nodes yet.
let next_announcements = net_graph_msg_handler . get_next_node_announcements ( None , 10 ) ;
assert_eq! ( next_announcements . len ( ) , 0 ) ;
{
// Announce a channel to add 2 nodes
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::empty ( ) ,
chain_hash ,
short_channel_id ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_channel_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_channel_announcement ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
}
// Nodes were never announced
let next_announcements = net_graph_msg_handler . get_next_node_announcements ( None , 3 ) ;
assert_eq! ( next_announcements . len ( ) , 0 ) ;
{
let mut unsigned_announcement = UnsignedNodeAnnouncement {
features : NodeFeatures ::known ( ) ,
timestamp : 1000 ,
node_id : node_id_1 ,
rgb : [ 0 ; 3 ] ,
alias : [ 0 ; 32 ] ,
addresses : Vec ::new ( ) ,
excess_address_data : Vec ::new ( ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
match net_graph_msg_handler . handle_node_announcement ( & valid_announcement ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
unsigned_announcement . node_id = node_id_2 ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_2_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
match net_graph_msg_handler . handle_node_announcement ( & valid_announcement ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
}
let next_announcements = net_graph_msg_handler . get_next_node_announcements ( None , 3 ) ;
assert_eq! ( next_announcements . len ( ) , 2 ) ;
// Skip the first node.
let next_announcements = net_graph_msg_handler . get_next_node_announcements ( Some ( & node_id_1 ) , 2 ) ;
assert_eq! ( next_announcements . len ( ) , 1 ) ;
{
// Later announcement which should not be relayed (excess data) prevent us from sharing a node
let unsigned_announcement = UnsignedNodeAnnouncement {
features : NodeFeatures ::known ( ) ,
timestamp : 1010 ,
node_id : node_id_2 ,
rgb : [ 0 ; 3 ] ,
alias : [ 0 ; 32 ] ,
addresses : Vec ::new ( ) ,
excess_address_data : Vec ::new ( ) ,
2021-02-15 15:25:37 -05:00
excess_data : [ 1 ; MAX_EXCESS_BYTES_FOR_RELAY + 1 ] . to_vec ( ) ,
2020-05-02 15:05:04 -04:00
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_2_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
match net_graph_msg_handler . handle_node_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( ! res ) ,
Err ( _ ) = > panic! ( )
} ;
}
let next_announcements = net_graph_msg_handler . get_next_node_announcements ( Some ( & node_id_1 ) , 2 ) ;
assert_eq! ( next_announcements . len ( ) , 0 ) ;
}
#[ test ]
fn network_graph_serialization ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
// Announce a channel to add a corresponding node.
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::known ( ) ,
2020-08-25 17:12:00 -04:00
chain_hash : genesis_block ( Network ::Testnet ) . header . block_hash ( ) ,
2020-05-02 15:05:04 -04:00
short_channel_id : 0 ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 : PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ,
bitcoin_key_2 : PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( res ) = > assert! ( res ) ,
_ = > panic! ( )
} ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let unsigned_announcement = UnsignedNodeAnnouncement {
features : NodeFeatures ::known ( ) ,
timestamp : 100 ,
node_id ,
rgb : [ 0 ; 3 ] ,
alias : [ 0 ; 32 ] ,
addresses : Vec ::new ( ) ,
excess_address_data : Vec ::new ( ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = NodeAnnouncement {
signature : secp_ctx . sign ( & msghash , node_1_privkey ) ,
contents : unsigned_announcement . clone ( )
} ;
match net_graph_msg_handler . handle_node_announcement ( & valid_announcement ) {
Ok ( _ ) = > ( ) ,
Err ( _ ) = > panic! ( )
} ;
2021-08-10 09:47:27 -05:00
let network = & net_graph_msg_handler . network_graph ;
2020-05-02 15:05:04 -04:00
let mut w = test_utils ::TestVecWriter ( Vec ::new ( ) ) ;
2021-08-16 18:40:19 -05:00
assert! ( ! network . read_only ( ) . nodes ( ) . is_empty ( ) ) ;
assert! ( ! network . read_only ( ) . channels ( ) . is_empty ( ) ) ;
2020-05-02 15:05:04 -04:00
network . write ( & mut w ) . unwrap ( ) ;
2021-08-01 18:22:06 +02:00
assert! ( < NetworkGraph > ::read ( & mut io ::Cursor ::new ( & w . 0 ) ) . unwrap ( ) = = * network ) ;
2020-05-02 15:05:04 -04:00
}
2020-10-22 12:44:53 -04:00
#[ test ]
2020-11-18 13:32:55 -05:00
fn calling_sync_routing_table ( ) {
2020-10-22 12:44:53 -04:00
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_privkey_1 = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_privkey_1 ) ;
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
let first_blocknum = 0 ;
let number_of_blocks = 0xffff_ffff ;
2020-12-03 12:00:36 -05:00
// It should ignore if gossip_queries feature is not enabled
{
let init_msg = Init { features : InitFeatures ::known ( ) . clear_gossip_queries ( ) } ;
net_graph_msg_handler . sync_routing_table ( & node_id_1 , & init_msg ) ;
let events = net_graph_msg_handler . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 0 ) ;
}
2020-11-18 13:32:55 -05:00
// It should send a query_channel_message with the correct information
2020-12-03 12:00:36 -05:00
{
let init_msg = Init { features : InitFeatures ::known ( ) } ;
net_graph_msg_handler . sync_routing_table ( & node_id_1 , & init_msg ) ;
let events = net_graph_msg_handler . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match & events [ 0 ] {
MessageSendEvent ::SendChannelRangeQuery { node_id , msg } = > {
assert_eq! ( node_id , & node_id_1 ) ;
assert_eq! ( msg . chain_hash , chain_hash ) ;
assert_eq! ( msg . first_blocknum , first_blocknum ) ;
assert_eq! ( msg . number_of_blocks , number_of_blocks ) ;
} ,
_ = > panic! ( " Expected MessageSendEvent::SendChannelRangeQuery " )
} ;
}
2020-12-03 12:48:40 -05:00
// It should not enqueue a query when should_request_full_sync return false.
// The initial implementation allows syncing with the first 5 peers after
// which should_request_full_sync will return false
{
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let init_msg = Init { features : InitFeatures ::known ( ) } ;
for n in 1 .. 7 {
let node_privkey = & SecretKey ::from_slice ( & [ n ; 32 ] ) . unwrap ( ) ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , node_privkey ) ;
net_graph_msg_handler . sync_routing_table ( & node_id , & init_msg ) ;
let events = net_graph_msg_handler . get_and_clear_pending_msg_events ( ) ;
if n < = 5 {
assert_eq! ( events . len ( ) , 1 ) ;
} else {
assert_eq! ( events . len ( ) , 0 ) ;
}
}
}
2020-10-22 12:44:53 -04:00
}
#[ test ]
fn handling_reply_channel_range ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_privkey_1 = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_privkey_1 ) ;
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2020-11-18 13:32:55 -05:00
// Test receipt of a single reply that should enqueue an SCID query
// matching the SCIDs in the reply
2020-10-22 12:44:53 -04:00
{
2020-12-03 11:52:54 -05:00
let result = net_graph_msg_handler . handle_reply_channel_range ( & node_id_1 , ReplyChannelRange {
2020-10-22 12:44:53 -04:00
chain_hash ,
2021-02-03 11:34:48 -05:00
sync_complete : true ,
2020-10-22 12:44:53 -04:00
first_blocknum : 0 ,
number_of_blocks : 2000 ,
short_channel_ids : vec ! [
0x0003e0_000000_0000 , // 992x0x0
0x0003e8_000000_0000 , // 1000x0x0
0x0003e9_000000_0000 , // 1001x0x0
0x0003f0_000000_0000 , // 1008x0x0
0x00044c_000000_0000 , // 1100x0x0
0x0006e0_000000_0000 , // 1760x0x0
] ,
} ) ;
assert! ( result . is_ok ( ) ) ;
2020-11-18 13:32:55 -05:00
// We expect to emit a query_short_channel_ids message with the received scids
2020-10-22 12:44:53 -04:00
let events = net_graph_msg_handler . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match & events [ 0 ] {
MessageSendEvent ::SendShortIdsQuery { node_id , msg } = > {
assert_eq! ( node_id , & node_id_1 ) ;
assert_eq! ( msg . chain_hash , chain_hash ) ;
2020-11-18 13:32:55 -05:00
assert_eq! ( msg . short_channel_ids , vec! [
0x0003e0_000000_0000 , // 992x0x0
0x0003e8_000000_0000 , // 1000x0x0
0x0003e9_000000_0000 , // 1001x0x0
0x0003f0_000000_0000 , // 1008x0x0
0x00044c_000000_0000 , // 1100x0x0
0x0006e0_000000_0000 , // 1760x0x0
] ) ;
2020-10-22 12:44:53 -04:00
} ,
_ = > panic! ( " expected MessageSendEvent::SendShortIdsQuery " ) ,
}
}
}
#[ test ]
fn handling_reply_short_channel_ids ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , node_privkey ) ;
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2020-11-18 13:32:55 -05:00
// Test receipt of a successful reply
2020-10-22 12:44:53 -04:00
{
2020-12-03 11:52:54 -05:00
let result = net_graph_msg_handler . handle_reply_short_channel_ids_end ( & node_id , ReplyShortChannelIdsEnd {
2020-10-22 12:44:53 -04:00
chain_hash ,
full_information : true ,
} ) ;
assert! ( result . is_ok ( ) ) ;
}
// Test receipt of a reply that indicates the peer does not maintain up-to-date information
2020-11-18 13:32:55 -05:00
// for the chain_hash requested in the query.
2020-10-22 12:44:53 -04:00
{
2020-12-03 11:52:54 -05:00
let result = net_graph_msg_handler . handle_reply_short_channel_ids_end ( & node_id , ReplyShortChannelIdsEnd {
2020-10-22 12:44:53 -04:00
chain_hash ,
full_information : false ,
} ) ;
assert! ( result . is_err ( ) ) ;
assert_eq! ( result . err ( ) . unwrap ( ) . err , " Received reply_short_channel_ids_end with no information " ) ;
}
}
#[ test ]
fn handling_query_channel_range ( ) {
2021-03-13 14:51:36 -05:00
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
2020-10-22 12:44:53 -04:00
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2021-03-03 16:48:19 -05:00
let node_1_privkey = & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ;
let node_2_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_1_btckey = & SecretKey ::from_slice ( & [ 40 ; 32 ] ) . unwrap ( ) ;
let node_2_btckey = & SecretKey ::from_slice ( & [ 39 ; 32 ] ) . unwrap ( ) ;
let node_id_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_privkey ) ;
let node_id_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_privkey ) ;
let bitcoin_key_1 = PublicKey ::from_secret_key ( & secp_ctx , node_1_btckey ) ;
let bitcoin_key_2 = PublicKey ::from_secret_key ( & secp_ctx , node_2_btckey ) ;
2021-03-13 14:51:36 -05:00
let mut scids : Vec < u64 > = vec! [
scid_from_parts ( 0xfffffe , 0xffffff , 0xffff ) . unwrap ( ) , // max
scid_from_parts ( 0xffffff , 0xffffff , 0xffff ) . unwrap ( ) , // never
2021-03-03 16:48:19 -05:00
] ;
2021-03-13 14:51:36 -05:00
// used for testing multipart reply across blocks
for block in 100000 ..= 108001 {
scids . push ( scid_from_parts ( block , 0 , 0 ) . unwrap ( ) ) ;
}
// used for testing resumption on same block
scids . push ( scid_from_parts ( 108001 , 1 , 0 ) . unwrap ( ) ) ;
2021-03-03 16:48:19 -05:00
for scid in scids {
let unsigned_announcement = UnsignedChannelAnnouncement {
features : ChannelFeatures ::known ( ) ,
chain_hash : chain_hash . clone ( ) ,
short_channel_id : scid ,
node_id_1 ,
node_id_2 ,
bitcoin_key_1 ,
bitcoin_key_2 ,
excess_data : Vec ::new ( ) ,
} ;
2020-10-22 12:44:53 -04:00
2021-03-03 16:48:19 -05:00
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & unsigned_announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let valid_announcement = ChannelAnnouncement {
node_signature_1 : secp_ctx . sign ( & msghash , node_1_privkey ) ,
node_signature_2 : secp_ctx . sign ( & msghash , node_2_privkey ) ,
bitcoin_signature_1 : secp_ctx . sign ( & msghash , node_1_btckey ) ,
bitcoin_signature_2 : secp_ctx . sign ( & msghash , node_2_btckey ) ,
contents : unsigned_announcement . clone ( ) ,
} ;
match net_graph_msg_handler . handle_channel_announcement ( & valid_announcement ) {
Ok ( _ ) = > ( ) ,
_ = > panic! ( )
} ;
}
2021-03-14 15:45:45 -04:00
// Error when number_of_blocks=0
2021-03-13 14:51:36 -05:00
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
first_blocknum : 0 ,
number_of_blocks : 0 ,
} ,
2021-03-14 15:45:45 -04:00
false ,
2021-03-03 16:48:19 -05:00
vec! [ ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
first_blocknum : 0 ,
number_of_blocks : 0 ,
sync_complete : true ,
short_channel_ids : vec ! [ ]
} ]
) ;
2021-03-14 15:45:45 -04:00
// Error when wrong chain
2021-03-13 14:51:36 -05:00
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : genesis_block ( Network ::Bitcoin ) . header . block_hash ( ) ,
first_blocknum : 0 ,
number_of_blocks : 0xffff_ffff ,
} ,
2021-03-14 15:45:45 -04:00
false ,
2021-03-03 16:48:19 -05:00
vec! [ ReplyChannelRange {
chain_hash : genesis_block ( Network ::Bitcoin ) . header . block_hash ( ) ,
first_blocknum : 0 ,
number_of_blocks : 0xffff_ffff ,
sync_complete : true ,
short_channel_ids : vec ! [ ] ,
} ]
) ;
2021-03-14 15:45:45 -04:00
// Error when first_blocknum > 0xffffff
2021-03-13 14:51:36 -05:00
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
first_blocknum : 0x01000000 ,
2021-03-13 14:51:36 -05:00
number_of_blocks : 0xffff_ffff ,
2021-03-03 16:48:19 -05:00
} ,
2021-03-14 15:45:45 -04:00
false ,
2021-03-03 16:48:19 -05:00
vec! [ ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
first_blocknum : 0x01000000 ,
2021-03-13 14:51:36 -05:00
number_of_blocks : 0xffff_ffff ,
2021-03-03 16:48:19 -05:00
sync_complete : true ,
short_channel_ids : vec ! [ ]
} ]
) ;
2021-03-14 15:45:45 -04:00
// Empty reply when max valid SCID block num
2021-03-13 14:51:36 -05:00
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
first_blocknum : 0xffffff ,
number_of_blocks : 1 ,
} ,
2021-03-14 15:45:45 -04:00
true ,
2021-03-03 16:48:19 -05:00
vec! [
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 0xffffff ,
number_of_blocks : 1 ,
2021-03-03 16:48:19 -05:00
sync_complete : true ,
short_channel_ids : vec ! [ ]
} ,
]
) ;
// No results in valid query range
2021-03-13 14:51:36 -05:00
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-03-16 16:30:22 -04:00
first_blocknum : 1000 ,
2021-03-03 16:48:19 -05:00
number_of_blocks : 1000 ,
} ,
2021-03-14 15:45:45 -04:00
true ,
2021-03-03 16:48:19 -05:00
vec! [
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 1000 ,
number_of_blocks : 1000 ,
2021-03-03 16:48:19 -05:00
sync_complete : true ,
short_channel_ids : vec ! [ ] ,
}
]
) ;
2021-03-13 14:51:36 -05:00
// Overflow first_blocknum + number_of_blocks
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-03-13 14:51:36 -05:00
first_blocknum : 0xfe0000 ,
2021-03-03 16:48:19 -05:00
number_of_blocks : 0xffffffff ,
} ,
2021-03-14 15:45:45 -04:00
true ,
2021-03-03 16:48:19 -05:00
vec! [
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 0xfe0000 ,
number_of_blocks : 0xffffffff - 0xfe0000 ,
2021-03-03 16:48:19 -05:00
sync_complete : true ,
short_channel_ids : vec ! [
0xfffffe_ffffff_ffff , // max
]
}
]
) ;
2021-03-14 15:45:45 -04:00
// Single block exactly full
do_handling_query_channel_range (
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
first_blocknum : 100000 ,
number_of_blocks : 8000 ,
} ,
true ,
vec! [
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 100000 ,
number_of_blocks : 8000 ,
2021-03-14 15:45:45 -04:00
sync_complete : true ,
short_channel_ids : ( 100000 ..= 107999 )
. map ( | block | scid_from_parts ( block , 0 , 0 ) . unwrap ( ) )
. collect ( ) ,
} ,
]
) ;
2021-03-13 14:51:36 -05:00
// Multiple split on new block
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-03-13 14:51:36 -05:00
first_blocknum : 100000 ,
number_of_blocks : 8001 ,
2021-03-03 16:48:19 -05:00
} ,
2021-03-14 15:45:45 -04:00
true ,
2021-03-03 16:48:19 -05:00
vec! [
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 100000 ,
number_of_blocks : 7999 ,
2021-03-03 16:48:19 -05:00
sync_complete : false ,
2021-03-13 14:51:36 -05:00
short_channel_ids : ( 100000 ..= 107999 )
. map ( | block | scid_from_parts ( block , 0 , 0 ) . unwrap ( ) )
. collect ( ) ,
2021-03-03 16:48:19 -05:00
} ,
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 107999 ,
number_of_blocks : 2 ,
2021-03-03 16:48:19 -05:00
sync_complete : true ,
short_channel_ids : vec ! [
2021-03-13 14:51:36 -05:00
scid_from_parts ( 108000 , 0 , 0 ) . unwrap ( ) ,
] ,
2021-03-03 16:48:19 -05:00
}
]
) ;
2021-03-13 14:51:36 -05:00
// Multiple split on same block
do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
& net_graph_msg_handler ,
& node_id_2 ,
QueryChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-03-13 14:51:36 -05:00
first_blocknum : 100002 ,
number_of_blocks : 8000 ,
2021-03-03 16:48:19 -05:00
} ,
2021-03-14 15:45:45 -04:00
true ,
2021-03-03 16:48:19 -05:00
vec! [
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 100002 ,
number_of_blocks : 7999 ,
2021-03-03 16:48:19 -05:00
sync_complete : false ,
2021-03-13 14:51:36 -05:00
short_channel_ids : ( 100002 ..= 108001 )
. map ( | block | scid_from_parts ( block , 0 , 0 ) . unwrap ( ) )
. collect ( ) ,
2021-03-03 16:48:19 -05:00
} ,
ReplyChannelRange {
chain_hash : chain_hash . clone ( ) ,
2021-06-19 15:48:23 +00:00
first_blocknum : 108001 ,
number_of_blocks : 1 ,
2021-03-03 16:48:19 -05:00
sync_complete : true ,
short_channel_ids : vec ! [
2021-03-13 14:51:36 -05:00
scid_from_parts ( 108001 , 1 , 0 ) . unwrap ( ) ,
] ,
2021-03-03 16:48:19 -05:00
}
]
) ;
}
2021-03-13 14:51:36 -05:00
fn do_handling_query_channel_range (
2021-03-03 16:48:19 -05:00
net_graph_msg_handler : & NetGraphMsgHandler < Arc < test_utils ::TestChainSource > , Arc < test_utils ::TestLogger > > ,
test_node_id : & PublicKey ,
msg : QueryChannelRange ,
2021-03-14 15:45:45 -04:00
expected_ok : bool ,
2021-03-03 16:48:19 -05:00
expected_replies : Vec < ReplyChannelRange >
) {
2021-06-19 15:48:23 +00:00
let mut max_firstblocknum = msg . first_blocknum . saturating_sub ( 1 ) ;
let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum ;
let query_end_blocknum = msg . end_blocknum ( ) ;
2021-03-03 16:48:19 -05:00
let result = net_graph_msg_handler . handle_query_channel_range ( test_node_id , msg ) ;
2021-03-14 15:45:45 -04:00
if expected_ok {
assert! ( result . is_ok ( ) ) ;
} else {
assert! ( result . is_err ( ) ) ;
}
2021-03-03 16:48:19 -05:00
let events = net_graph_msg_handler . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , expected_replies . len ( ) ) ;
for i in 0 .. events . len ( ) {
let expected_reply = & expected_replies [ i ] ;
match & events [ i ] {
MessageSendEvent ::SendReplyChannelRange { node_id , msg } = > {
assert_eq! ( node_id , test_node_id ) ;
assert_eq! ( msg . chain_hash , expected_reply . chain_hash ) ;
assert_eq! ( msg . first_blocknum , expected_reply . first_blocknum ) ;
assert_eq! ( msg . number_of_blocks , expected_reply . number_of_blocks ) ;
assert_eq! ( msg . sync_complete , expected_reply . sync_complete ) ;
assert_eq! ( msg . short_channel_ids , expected_reply . short_channel_ids ) ;
2021-06-19 15:48:23 +00:00
// Enforce exactly the sequencing requirements present on c-lightning v0.9.3
assert! ( msg . first_blocknum = = c_lightning_0_9_prev_end_blocknum | | msg . first_blocknum = = c_lightning_0_9_prev_end_blocknum . saturating_add ( 1 ) ) ;
assert! ( msg . first_blocknum > = max_firstblocknum ) ;
max_firstblocknum = msg . first_blocknum ;
c_lightning_0_9_prev_end_blocknum = msg . first_blocknum . saturating_add ( msg . number_of_blocks ) ;
// Check that the last block count is >= the query's end_blocknum
if i = = events . len ( ) - 1 {
assert! ( msg . first_blocknum . saturating_add ( msg . number_of_blocks ) > = query_end_blocknum ) ;
}
2021-03-03 16:48:19 -05:00
} ,
_ = > panic! ( " expected MessageSendEvent::SendReplyChannelRange " ) ,
}
}
2020-10-22 12:44:53 -04:00
}
#[ test ]
fn handling_query_short_channel_ids ( ) {
let ( secp_ctx , net_graph_msg_handler ) = create_net_graph_msg_handler ( ) ;
let node_privkey = & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , node_privkey ) ;
let chain_hash = genesis_block ( Network ::Testnet ) . header . block_hash ( ) ;
2020-12-03 11:52:54 -05:00
let result = net_graph_msg_handler . handle_query_short_channel_ids ( & node_id , QueryShortChannelIds {
2020-10-22 12:44:53 -04:00
chain_hash ,
short_channel_ids : vec ! [ 0x0003e8_000000_0000 ] ,
} ) ;
assert! ( result . is_err ( ) ) ;
}
2020-05-02 15:05:04 -04:00
}
Add benchmark of deserializing a NetworkGraph.
NetworkGraph is one of the largest structures we generally
deserialize, so it makes for a good benchmark, even if it isn't the
most complicated one.
As of this commit, on an Intel 2687W v3, these benchmarks take:
test routing::network_graph::benches::read_network_graph ... bench: 2,101,420,078 ns/iter (+/- 6,649,020)
test routing::network_graph::benches::write_network_graph ... bench: 344,696,835 ns/iter (+/- 229,061)
2021-05-28 00:47:11 +00:00
#[ cfg(all(test, feature = " unstable " )) ]
mod benches {
use super ::* ;
use test ::Bencher ;
use std ::io ::Read ;
#[ bench ]
fn read_network_graph ( bench : & mut Bencher ) {
let mut d = ::routing ::router ::test_utils ::get_route_file ( ) . unwrap ( ) ;
let mut v = Vec ::new ( ) ;
d . read_to_end ( & mut v ) . unwrap ( ) ;
bench . iter ( | | {
let _ = NetworkGraph ::read ( & mut std ::io ::Cursor ::new ( & v ) ) . unwrap ( ) ;
} ) ;
}
#[ bench ]
fn write_network_graph ( bench : & mut Bencher ) {
let mut d = ::routing ::router ::test_utils ::get_route_file ( ) . unwrap ( ) ;
let net_graph = NetworkGraph ::read ( & mut d ) . unwrap ( ) ;
bench . iter ( | | {
let _ = net_graph . encode ( ) ;
} ) ;
}
}