diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f7cf8c4e6..f1e4ce517 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -100,6 +100,10 @@ comment suggesting that you're working on it. If someone is already assigned, don't hesitate to ask if the assigned party or previous commenters are still working on it if it has been awhile. +Any changes that have nontrivial backwards compatibility considerations should +have an entry added in the `pending_changelog` folder which includes the +CHANGELOG entries that should be added in the next release. + Peer review ----------- diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index f506acc9f..9f1580288 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -415,7 +415,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { chan_handler: channelmanager.clone(), route_handler: gossip_sync.clone(), onion_message_handler: IgnoringMessageHandler {}, - }, our_network_key, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), IgnoringMessageHandler{})); + }, our_network_key, 0, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), IgnoringMessageHandler{})); let mut should_forward = false; let mut payments_received: Vec = Vec::new(); diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index e1fa8144a..275d170f8 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -746,7 +746,7 @@ mod tests { let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())); let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone())); let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}}; - let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{})); + let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), 0, &seed, logger.clone(), IgnoringMessageHandler{})); let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0))); let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer }; nodes.push(node); diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 7dfa38e95..022b67fca 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -546,6 +546,7 @@ mod tests { use lightning::ln::features::*; use lightning::ln::msgs::*; use lightning::ln::peer_handler::{MessageHandler, PeerManager}; + use lightning::ln::features::NodeFeatures; use lightning::util::events::*; use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey}; @@ -612,6 +613,7 @@ mod tests { } fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &ChannelReestablish) {} fn handle_error(&self, _their_node_id: &PublicKey, _msg: &ErrorMessage) {} + fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::known() } } impl MessageSendEventsProvider for MsgHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { @@ -657,7 +659,7 @@ mod tests { chan_handler: Arc::clone(&a_handler), route_handler: Arc::clone(&a_handler), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + }, a_key.clone(), 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); let (b_connected_sender, mut b_connected) = mpsc::channel(1); let (b_disconnected_sender, mut b_disconnected) = mpsc::channel(1); @@ -672,7 +674,7 @@ mod tests { chan_handler: Arc::clone(&b_handler), route_handler: Arc::clone(&b_handler), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + }, b_key.clone(), 0, &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); // We bind on localhost, hoping the environment is properly configured with a local // address. This may not always be the case in containers and the like, so if this test is @@ -725,7 +727,7 @@ mod tests { chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()), onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}), - }, a_key, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); + }, a_key, 0, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}))); // Make two connections, one for an inbound and one for an outbound connection let conn_a = { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 794ac0db7..008075333 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1126,8 +1126,8 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); + let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); @@ -1145,8 +1145,8 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); - assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); + assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); + assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); assert_eq!( @@ -1319,8 +1319,8 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); + let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); @@ -1451,8 +1451,8 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); + let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); @@ -2032,9 +2032,9 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events(); @@ -2160,9 +2160,9 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b75eefb13..afbd040b6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -54,7 +54,6 @@ use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Rec use util::config::{UserConfig, ChannelConfig}; use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use util::{byte_utils, events}; -use util::crypto::sign; use util::wakers::{Future, Notifier}; use util::scid_utils::fake_scid; use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -764,10 +763,6 @@ pub struct ChannelManager ChannelMana probing_cookie_secret: keys_manager.get_secure_random_bytes(), - last_node_announcement_serial: AtomicUsize::new(0), highest_seen_timestamp: AtomicUsize::new(0), per_peer_state: RwLock::new(HashMap::new()), @@ -2928,89 +2922,6 @@ impl ChannelMana }) } - #[allow(dead_code)] - // Messages of up to 64KB should never end up more than half full with addresses, as that would - // be absurd. We ensure this by checking that at least 100 (our stated public contract on when - // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB - // message... - const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; - #[deny(const_err)] - #[allow(dead_code)] - // ...by failing to compile if the number of addresses that would be half of a message is - // smaller than 100: - const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 100; - - /// Regenerates channel_announcements and generates a signed node_announcement from the given - /// arguments, providing them in corresponding events via - /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed - /// on-chain. This effectively re-broadcasts all channel announcements and sends our node - /// announcement to ensure that the lightning P2P network is aware of the channels we have and - /// our network addresses. - /// - /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this - /// node to humans. They carry no in-protocol meaning. - /// - /// `addresses` represent the set (possibly empty) of socket addresses on which this node - /// accepts incoming connections. These will be included in the node_announcement, publicly - /// tying these addresses together and to this node. If you wish to preserve user privacy, - /// addresses should likely contain only Tor Onion addresses. - /// - /// Panics if `addresses` is absurdly large (more than 100). - /// - /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events - pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - - if addresses.len() > 100 { - panic!("More than half the message size was taken up by public addresses!"); - } - - // While all existing nodes handle unsorted addresses just fine, the spec requires that - // addresses be sorted for future compatibility. - addresses.sort_by_key(|addr| addr.get_id()); - - let announcement = msgs::UnsignedNodeAnnouncement { - features: NodeFeatures::known(), - timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, - node_id: self.get_our_node_id(), - rgb, alias, addresses, - excess_address_data: Vec::new(), - excess_data: Vec::new(), - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); - let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key); - - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - let mut announced_chans = false; - for (_, chan) in channel_state.by_id.iter() { - if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg, - update_msg: match self.get_channel_update_for_broadcast(chan) { - Ok(msg) => msg, - Err(_) => continue, - }, - }); - announced_chans = true; - } else { - // If the channel is not public or has not yet reached channel_ready, check the - // next channel. If we don't yet have any public channels, we'll skip the broadcast - // below as peers may not accept it without channels on chain first. - } - } - - if announced_chans { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement { - msg: msgs::NodeAnnouncement { - signature: node_announce_sig, - contents: announcement - }, - }); - } - } - /// Atomically updates the [`ChannelConfig`] for the given channels. /// /// Once the updates are applied, each eligible channel (advertised with a known short channel @@ -5754,7 +5665,6 @@ where } } } - max_time!(self.last_node_announcement_serial); max_time!(self.highest_seen_timestamp); let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); payment_secrets.retain(|_, inbound_payment| { @@ -6104,8 +6014,8 @@ impl &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id, + &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, @@ -6148,7 +6058,7 @@ impl let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { + let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { if !chan.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now @@ -6162,7 +6072,18 @@ impl }); true } - } else { true } + } else { true }; + if retain && chan.get_counterparty_node_id() != *counterparty_node_id { + if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { + if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { + pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { + node_id: *counterparty_node_id, + msg, update_msg, + }); + } + } + } + retain }); //TODO: Also re-broadcast announcement_signatures } @@ -6199,6 +6120,10 @@ impl let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true); } } + + fn provided_node_features(&self) -> NodeFeatures { + NodeFeatures::known() + } } const SERIALIZATION_VERSION: u8 = 1; @@ -6621,7 +6546,10 @@ impl Writeable f } } - (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?; + // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in + // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is + // likely to be identical. + (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?; (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?; (pending_inbound_payments.len() as u64).write(writer)?; @@ -6940,7 +6868,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let last_node_announcement_serial: u32 = Readable::read(reader)?; + let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 let highest_seen_timestamp: u32 = Readable::read(reader)?; let pending_inbound_payment_count: u64 = Readable::read(reader)?; @@ -7201,7 +7129,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> our_network_pubkey, secp_ctx, - last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize), per_peer_state: RwLock::new(per_peer_state), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d8c1b75fa..554e60ba7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -901,60 +901,10 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: & } pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>(nodes: &'a Vec>, a: usize, b: usize, ann: &msgs::ChannelAnnouncement, upd_1: &msgs::ChannelUpdate, upd_2: &msgs::ChannelUpdate) { - nodes[a].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new()); - let a_events = nodes[a].node.get_and_clear_pending_msg_events(); - assert!(a_events.len() >= 2); - - // ann should be re-generated by broadcast_node_announcement - check that we have it. - let mut found_ann_1 = false; - for event in a_events.iter() { - match event { - MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => { - if msg == ann { found_ann_1 = true; } - }, - MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, - _ => panic!("Unexpected event {:?}", event), - } - } - assert!(found_ann_1); - - let a_node_announcement = match a_events.last().unwrap() { - MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => { - (*msg).clone() - }, - _ => panic!("Unexpected event"), - }; - - nodes[b].node.broadcast_node_announcement([1, 1, 1], [1; 32], Vec::new()); - let b_events = nodes[b].node.get_and_clear_pending_msg_events(); - assert!(b_events.len() >= 2); - - // ann should be re-generated by broadcast_node_announcement - check that we have it. - let mut found_ann_2 = false; - for event in b_events.iter() { - match event { - MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => { - if msg == ann { found_ann_2 = true; } - }, - MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, - _ => panic!("Unexpected event"), - } - } - assert!(found_ann_2); - - let b_node_announcement = match b_events.last().unwrap() { - MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => { - (*msg).clone() - }, - _ => panic!("Unexpected event"), - }; - for node in nodes { assert!(node.gossip_sync.handle_channel_announcement(ann).unwrap()); node.gossip_sync.handle_channel_update(upd_1).unwrap(); node.gossip_sync.handle_channel_update(upd_2).unwrap(); - node.gossip_sync.handle_node_announcement(&a_node_announcement).unwrap(); - node.gossip_sync.handle_node_announcement(&b_node_announcement).unwrap(); // Note that channel_updates are also delivered to ChannelManagers to ensure we have // forwarding info for local channels even if its not accepted in the network graph. @@ -2333,15 +2283,27 @@ macro_rules! get_channel_value_stat { macro_rules! get_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => { { + let mut announcements = $crate::prelude::HashSet::new(); let mut res = Vec::with_capacity(1); for msg in $src_node.node.get_and_clear_pending_msg_events() { if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); res.push(msg.clone()); + } else if let MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, .. } = msg { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + announcements.insert(msg.contents.short_channel_id); } else { panic!("Unexpected event") } } + for chan in $src_node.node.list_channels() { + if chan.is_public && chan.counterparty.node_id != $dst_node.node.get_our_node_id() { + if let Some(scid) = chan.short_channel_id { + assert!(announcements.remove(&scid)); + } + } + } + assert!(announcements.is_empty()); res } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2efeb8852..21b7d9a7d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3895,9 +3895,9 @@ fn test_funding_peer_disconnect() { assert!(events_2.is_empty()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect. nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); @@ -4038,21 +4038,6 @@ fn test_funding_peer_disconnect() { check_added_monitors!(nodes[0], 1); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - - // The channel announcement should be re-generated exactly by broadcast_node_announcement. - nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new()); - let msgs = nodes[0].node.get_and_clear_pending_msg_events(); - let mut found_announcement = false; - for event in msgs.iter() { - match event { - MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => { - if *msg == chan_announcement { found_announcement = true; } - }, - MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, - _ => panic!("Unexpected event"), - } - } - assert!(found_announcement); } #[test] @@ -4737,19 +4722,23 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap(); nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish); - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { - match action { - &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.channel_id, channel_id); - }, - _ => panic!("Unexpected event!"), + let mut found_err = false; + for msg_event in nodes[0].node.get_and_clear_pending_msg_events() { + if let MessageSendEvent::HandleError { ref action, .. } = msg_event { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.channel_id, channel_id); + assert!(!found_err); + found_err = true; + }, + _ => panic!("Unexpected event!"), + } } } + assert!(found_err); } macro_rules! check_spendable_outputs { diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index a32b17b9f..a810731c6 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -896,6 +896,12 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { // Error: /// Handle an incoming error message from the given peer. fn handle_error(&self, their_node_id: &PublicKey, msg: &ErrorMessage); + + // Handler information: + /// Gets the node feature flags which this handler itself supports. All available handlers are + /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`] + /// which are broadcasted in our node_announcement message. + fn provided_node_features(&self) -> NodeFeatures; } /// A trait to describe an object which can receive routing messages. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 3666f3e79..dcad041f4 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -457,7 +457,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 1); @@ -676,7 +676,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 1); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index f80c8984c..9d3b7aca8 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -17,7 +17,7 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey}; -use ln::features::InitFeatures; +use ln::features::{InitFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler}; use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; @@ -27,6 +27,7 @@ use ln::wire; use ln::wire::Encode; use routing::gossip::{NetworkGraph, P2PGossipSync}; use util::atomic_counter::AtomicCounter; +use util::crypto::sign; use util::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider}; use util::logger::Logger; @@ -34,13 +35,14 @@ use prelude::*; use io; use alloc::collections::LinkedList; use sync::{Arc, Mutex, MutexGuard, FairRwLock}; -use core::sync::atomic::{AtomicBool, Ordering}; +use core::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use core::{cmp, hash, fmt, mem}; use core::ops::Deref; use core::convert::Infallible; #[cfg(feature = "std")] use std::error; use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::sha256::HashEngine as Sha256Engine; use bitcoin::hashes::{HashEngine, Hash}; @@ -200,6 +202,7 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {} fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {} + fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } } impl Deref for ErroringMessageHandler { type Target = ErroringMessageHandler; @@ -505,6 +508,11 @@ pub struct PeerManager PeerManager Self { + pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, our_node_secret: SecretKey, current_time: u64, ephemeral_random_data: &[u8; 32], logger: L) -> Self { Self::new(MessageHandler { chan_handler: channel_message_handler, route_handler: IgnoringMessageHandler{}, onion_message_handler, - }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{}) + }, our_node_secret, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{}) } } @@ -569,16 +582,21 @@ impl PeerManager Self { + pub fn new_routing_only(routing_message_handler: RM, our_node_secret: SecretKey, current_time: u64, ephemeral_random_data: &[u8; 32], logger: L) -> Self { Self::new(MessageHandler { chan_handler: ErroringMessageHandler::new(), route_handler: routing_message_handler, onion_message_handler: IgnoringMessageHandler{}, - }, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{}) + }, our_node_secret, current_time, ephemeral_random_data, logger, IgnoringMessageHandler{}) } } @@ -632,7 +650,12 @@ impl, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self { + /// + /// `current_time` is used as an always-increasing counter that survives across restarts and is + /// incremented irregularly internally. In general it is best to simply use the current UNIX + /// timestamp, however if it is not available a persistent counter that increases once per + /// minute should suffice. + pub fn new(message_handler: MessageHandler, our_node_secret: SecretKey, current_time: u64, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self { let mut ephemeral_key_midstate = Sha256::engine(); ephemeral_key_midstate.input(ephemeral_random_data); @@ -649,6 +672,7 @@ impl { + log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}", + log_pubkey!(node_id), + msg.contents.short_channel_id); + self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); + self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), update_msg); + }, MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => { log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); match self.message_handler.route_handler.handle_channel_announcement(&msg) { @@ -1626,14 +1657,6 @@ impl {}, } }, - MessageSendEvent::BroadcastNodeAnnouncement { msg } => { - log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler"); - match self.message_handler.route_handler.handle_node_announcement(&msg) { - Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => - self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None), - _ => {}, - } - }, MessageSendEvent::BroadcastChannelUpdate { msg } => { log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); match self.message_handler.route_handler.handle_channel_update(&msg) { @@ -1899,6 +1922,63 @@ impl) { + if addresses.len() > 100 { + panic!("More than half the message size was taken up by public addresses!"); + } + + // While all existing nodes handle unsorted addresses just fine, the spec requires that + // addresses be sorted for future compatibility. + addresses.sort_by_key(|addr| addr.get_id()); + + let announcement = msgs::UnsignedNodeAnnouncement { + features: self.message_handler.chan_handler.provided_node_features(), + timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, + node_id: PublicKey::from_secret_key(&self.secp_ctx, &self.our_node_secret), + rgb, alias, addresses, + excess_address_data: Vec::new(), + excess_data: Vec::new(), + }; + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); + let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_node_secret); + + let msg = msgs::NodeAnnouncement { + signature: node_announce_sig, + contents: announcement + }; + + log_debug!(self.logger, "Broadcasting NodeAnnouncement after passing it to our own RoutingMessageHandler."); + let _ = self.message_handler.route_handler.handle_node_announcement(&msg); + self.forward_broadcast_msg(&*self.peers.read().unwrap(), &wire::Message::NodeAnnouncement(msg), None); + } } fn is_gossip_msg(type_id: u16) -> bool { @@ -1982,7 +2062,7 @@ mod tests { let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap(); let ephemeral_bytes = [i as u8; 32]; let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, onion_message_handler: IgnoringMessageHandler {} }; - let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {}); + let peer = PeerManager::new(msg_handler, node_secret, 0, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {}); peers.push(peer); } diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 525908a40..acceb4395 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -144,8 +144,8 @@ fn test_priv_forwarding_rejection() { nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); - let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); + let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); @@ -153,8 +153,8 @@ fn test_priv_forwarding_rejection() { nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id()); - let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap(); + let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap(); nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); nodes[1].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &cs_reestablish); get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id()); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 3e9f7e343..4f0675182 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -248,9 +248,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let node_1_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish); let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); @@ -310,11 +310,11 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); if recv_count == 0 { // If all closing_signeds weren't delivered we can just resume where we left off... - let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let node_0_2nd_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish); let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index be1979c77..8ddd762e9 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -1137,24 +1137,31 @@ pub enum MessageSendEvent { /// The message which should be sent. msg: msgs::ChannelReestablish, }, - /// Used to indicate that a channel_announcement and channel_update should be broadcast to all - /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2). - /// - /// Note that after doing so, you very likely (unless you did so very recently) want to call - /// ChannelManager::broadcast_node_announcement to trigger a BroadcastNodeAnnouncement event. - /// This ensures that any nodes which see our channel_announcement also have a relevant - /// node_announcement, including relevant feature flags which may be important for routing - /// through or to us. - BroadcastChannelAnnouncement { + /// Used to send a channel_announcement and channel_update to a specific peer, likely on + /// initial connection to ensure our peers know about our channels. + SendChannelAnnouncement { + /// The node_id of the node which should receive this message + node_id: PublicKey, /// The channel_announcement which should be sent. msg: msgs::ChannelAnnouncement, /// The followup channel_update which should be sent. update_msg: msgs::ChannelUpdate, }, - /// Used to indicate that a node_announcement should be broadcast to all peers. - BroadcastNodeAnnouncement { - /// The node_announcement which should be sent. - msg: msgs::NodeAnnouncement, + /// Used to indicate that a channel_announcement and channel_update should be broadcast to all + /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2). + /// + /// Note that after doing so, you very likely (unless you did so very recently) want to + /// broadcast a node_announcement (e.g. via [`PeerManager::broadcast_node_announcement`]). This + /// ensures that any nodes which see our channel_announcement also have a relevant + /// node_announcement, including relevant feature flags which may be important for routing + /// through or to us. + /// + /// [`PeerManager::broadcast_node_announcement`]: crate::ln::peer_handler::PeerManager::broadcast_node_announcement + BroadcastChannelAnnouncement { + /// The channel_announcement which should be sent. + msg: msgs::ChannelAnnouncement, + /// The followup channel_update which should be sent. + update_msg: msgs::ChannelUpdate, }, /// Used to indicate that a channel_update should be broadcast to all peers. BroadcastChannelUpdate { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 7a32b13e6..42ac228c1 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -17,7 +17,7 @@ use chain::channelmonitor; use chain::channelmonitor::MonitorEvent; use chain::transaction::OutPoint; use chain::keysinterface; -use ln::features::{ChannelFeatures, InitFeatures}; +use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use ln::{msgs, wire}; use ln::script::ShutdownScript; use routing::scoring::FixedPenaltyScorer; @@ -357,6 +357,9 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_error(&self, _their_node_id: &PublicKey, msg: &msgs::ErrorMessage) { self.received_msg(wire::Message::Error(msg.clone())); } + fn provided_node_features(&self) -> NodeFeatures { + NodeFeatures::empty() + } } impl events::MessageSendEventsProvider for TestChannelMessageHandler { diff --git a/pending_changelog/1699.txt b/pending_changelog/1699.txt new file mode 100644 index 000000000..75ed45d6e --- /dev/null +++ b/pending_changelog/1699.txt @@ -0,0 +1,2 @@ +`broadcast_node_announcement` has been moved to `PeerManager` from `ChannelManager`. +`PeerManager::new`'s new `current_time` argument must be set to a UNIX timestamp, not a persistent counter, for any existing nodes that upgrade.