From 7608483b0f5894cf2001af02248adac28bd2b269 Mon Sep 17 00:00:00 2001 From: Antoine Riard Date: Mon, 4 Nov 2019 19:09:51 -0500 Subject: [PATCH 1/2] Rename HandleError to LightningError to stress already-processed error --- fuzz/fuzz_targets/chanmon_fail_consistency.rs | 4 +- src/ln/chanmon_update_fail_tests.rs | 34 ++++---- src/ln/channelmanager.rs | 70 ++++++++--------- src/ln/functional_tests.rs | 38 ++++----- src/ln/msgs.rs | 44 +++++------ src/ln/peer_channel_encryptor.rs | 26 +++---- src/ln/router.rs | 40 +++++----- src/util/test_utils.rs | 78 +++++++++---------- 8 files changed, 167 insertions(+), 167 deletions(-) diff --git a/fuzz/fuzz_targets/chanmon_fail_consistency.rs b/fuzz/fuzz_targets/chanmon_fail_consistency.rs index 3610b1ca6..28f03b639 100644 --- a/fuzz/fuzz_targets/chanmon_fail_consistency.rs +++ b/fuzz/fuzz_targets/chanmon_fail_consistency.rs @@ -37,7 +37,7 @@ use lightning::ln::channelmonitor; use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate}; use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, ChannelManagerReadArgs}; use lightning::ln::router::{Route, RouteHop}; -use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, HandleError, UpdateAddHTLC, LocalFeatures}; +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, LightningError, UpdateAddHTLC, LocalFeatures}; use lightning::util::events; use lightning::util::logger::Logger; use lightning::util::config::UserConfig; @@ -392,7 +392,7 @@ pub fn do_test(data: &[u8]) { ($res: expr) => { match $res { Ok(()) => {}, - Err(HandleError { action: Some(ErrorAction::IgnoreError), .. }) => { }, + Err(LightningError { action: Some(ErrorAction::IgnoreError), .. }) => { }, _ => { $res.unwrap() }, } } diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index 137d98346..af676edb2 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -190,7 +190,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } } @@ -485,7 +485,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -515,7 +515,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -565,7 +565,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -618,12 +618,12 @@ fn test_monitor_update_raa_while_paused() { *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -704,7 +704,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -768,7 +768,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -945,7 +945,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1033,12 +1033,12 @@ fn raa_no_response_awaiting_raa_state() { // then restore channel monitor updates. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1130,7 +1130,7 @@ fn claim_while_disconnected_monitor_update_fail() { // update. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1145,7 +1145,7 @@ fn claim_while_disconnected_monitor_update_fail() { let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1235,7 +1235,7 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1327,7 +1327,7 @@ fn first_message_on_recv_ordering() { // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1336,7 +1336,7 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with // the appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } @@ -1597,7 +1597,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: } let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); if fail_on_signed || !restore_between_fails { - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() { + if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() { if fail_on_generate && !restore_between_fails { assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast"); check_added_monitors!(nodes[0], 0); diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index a957a96df..156b31964 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -33,7 +33,7 @@ use ln::router::Route; use ln::msgs; use ln::msgs::LocalFeatures; use ln::onion_utils; -use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError}; +use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; use chain::keysinterface::KeysInterface; use util::config::UserConfig; use util::{byte_utils, events}; @@ -118,7 +118,7 @@ impl HTLCSource { #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum HTLCFailReason { - ErrorPacket { + LightningError { err: msgs::OnionErrorPacket, }, Reason { @@ -143,14 +143,14 @@ type ShutdownResult = (Vec, Vec<(HTLCSource, PaymentHash)>); /// this struct and call handle_error!() on it. struct MsgHandleErrInternal { - err: msgs::HandleError, + err: msgs::LightningError, shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { #[inline] fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self { Self { - err: HandleError { + err: LightningError { err, action: Some(msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -165,7 +165,7 @@ impl MsgHandleErrInternal { #[inline] fn ignore_no_close(err: &'static str) -> Self { Self { - err: HandleError { + err: LightningError { err, action: Some(msgs::ErrorAction::IgnoreError), }, @@ -173,13 +173,13 @@ impl MsgHandleErrInternal { } } #[inline] - fn from_no_close(err: msgs::HandleError) -> Self { + fn from_no_close(err: msgs::LightningError) -> Self { Self { err, shutdown_finish: None } } #[inline] fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { - err: HandleError { + err: LightningError { err, action: Some(msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -195,11 +195,11 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { - ChannelError::Ignore(msg) => HandleError { + ChannelError::Ignore(msg) => LightningError { err: msg, action: Some(msgs::ErrorAction::IgnoreError), }, - ChannelError::Close(msg) => HandleError { + ChannelError::Close(msg) => LightningError { err: msg, action: Some(msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -208,7 +208,7 @@ impl MsgHandleErrInternal { }, }), }, - ChannelError::CloseDelayBroadcast { msg, .. } => HandleError { + ChannelError::CloseDelayBroadcast { msg, .. } => LightningError { err: msg, action: Some(msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -1009,9 +1009,9 @@ impl ChannelManager { /// only fails if the channel does not yet have an assigned short_id /// May be called with channel_state already locked! - fn get_channel_update(&self, chan: &Channel) -> Result { + fn get_channel_update(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { - None => return Err(HandleError{err: "Channel not yet established", action: None}), + None => return Err(LightningError{err: "Channel not yet established", action: None}), Some(id) => id, }; @@ -1488,7 +1488,7 @@ impl ChannelManager { log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); mem::drop(channel_state_lock); match &onion_error { - &HTLCFailReason::ErrorPacket { ref err } => { + &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] @@ -1541,8 +1541,8 @@ impl ChannelManager { let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) }, - HTLCFailReason::ErrorPacket { err } => { - log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built ErrorPacket", log_bytes!(payment_hash.0)); + HTLCFailReason::LightningError { err } => { + log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0)); onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) } }; @@ -2118,7 +2118,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan); + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } @@ -2292,7 +2292,7 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); } let our_node_id = self.get_our_node_id(); @@ -2626,82 +2626,82 @@ impl ChainListener for ChannelManager { impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) - fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> { + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg)) } - fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg)) } - fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { + fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_created(their_node_id, msg)) } - fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> { + fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_signed(their_node_id, msg)) } - fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> { + fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_locked(their_node_id, msg)) } - fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> { + fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_shutdown(their_node_id, msg)) } - fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> { + fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_closing_signed(their_node_id, msg)) } - fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> { + fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_add_htlc(their_node_id, msg)) } - fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { + fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg)) } - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg)) } - fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { + fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg)) } - fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { + fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_commitment_signed(their_node_id, msg)) } - fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { + fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg)) } - fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { + fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fee(their_node_id, msg)) } - fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { + fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_announcement_signatures(their_node_id, msg)) } - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_channel_reestablish(their_node_id, msg)) } @@ -2946,7 +2946,7 @@ impl Readable for HTLCSource { impl Writeable for HTLCFailReason { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { match self { - &HTLCFailReason::ErrorPacket { ref err } => { + &HTLCFailReason::LightningError { ref err } => { 0u8.write(writer)?; err.write(writer)?; }, @@ -2963,7 +2963,7 @@ impl Writeable for HTLCFailReason { impl Readable for HTLCFailReason { fn read(reader: &mut R) -> Result { match >::read(reader)? { - 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }), + 0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }), 1 => Ok(HTLCFailReason::Reason { failure_code: Readable::read(reader)?, data: Readable::read(reader)?, diff --git a/src/ln/functional_tests.rs b/src/ln/functional_tests.rs index 044c16730..611b94f1a 100644 --- a/src/ln/functional_tests.rs +++ b/src/ln/functional_tests.rs @@ -70,10 +70,10 @@ fn test_insane_channel_opens() { // that supposedly makes the channel open message insane let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) { - Err(msgs::HandleError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => { - assert_eq!(error_str, expected_error_str, "unexpected HandleError string (expected `{}`, actual `{}`)", expected_error_str, error_str) + Err(msgs::LightningError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => { + assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str) }, - Err(msgs::HandleError{..}) => {panic!("unexpected HandleError action")}, + Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")}, _ => panic!("insane OpenChannel message was somehow Ok"), } }; @@ -952,7 +952,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // transaction. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) = + if let Err(msgs::LightningError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); let msgs::ErrorMessage {ref channel_id, ..} = msg; @@ -1246,7 +1246,7 @@ fn duplicate_htlc_test() { } fn do_channel_reserve_test(test_recv: bool) { - use ln::msgs::HandleError; + use ln::msgs::LightningError; let mut nodes = create_network(3, &[None, None, None]); let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, LocalFeatures::new(), LocalFeatures::new()); @@ -1382,7 +1382,7 @@ fn do_channel_reserve_test(test_recv: bool) { if test_recv { let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); match err { - HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + LightningError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), } // If we send a garbage message, the channel should get closed, making the rest of this test case fail. assert_eq!(nodes[1].node.list_channels().len(), 1); @@ -3455,7 +3455,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id()); let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id()); - if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { + if let Err(msgs::LightningError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { assert_eq!(msg.channel_id, channel_id); } else { panic!("Unexpected result"); } } @@ -5101,7 +5101,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote side tried to send less than our minimum HTLC value"); } else { assert!(false); @@ -5127,7 +5127,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote HTLC add would put them over their reserve value"); } else { assert!(false); @@ -5174,7 +5174,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote tried to push more than our max accepted HTLCs"); } else { assert!(false); @@ -5197,7 +5197,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err,"Remote HTLC add would put them over our max HTLC value"); } else { assert!(false); @@ -5220,7 +5220,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { updates.update_add_htlcs[0].cltv_expiry = 500000000; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height"); } else { assert!(false); @@ -5266,7 +5266,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote skipped HTLC ID"); } else { assert!(false); @@ -5298,7 +5298,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5330,7 +5330,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5363,7 +5363,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5404,7 +5404,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { update_fulfill_msg.htlc_id = 1; let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find"); } else { assert!(false); @@ -5445,7 +5445,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage"); } else { assert!(false); @@ -5491,7 +5491,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag }; update_msg.failure_code &= !0x8000; let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set"); } else { assert!(false); diff --git a/src/ln/msgs.rs b/src/ln/msgs.rs index 0e6c4b95e..6939af13e 100644 --- a/src/ln/msgs.rs +++ b/src/ln/msgs.rs @@ -525,7 +525,7 @@ pub struct ChannelUpdate { pub(crate) contents: UnsignedChannelUpdate, } -/// Used to put an error message in a HandleError +/// Used to put an error message in a LightningError #[derive(Clone)] pub enum ErrorAction { /// The peer took some action which made us think they were useless. Disconnect them. @@ -543,7 +543,7 @@ pub enum ErrorAction { } /// An Err type for failure to process messages. -pub struct HandleError { //TODO: rename me +pub struct LightningError { /// A human-readable message describing the error pub err: &'static str, /// The action which should be taken against the offending peer. @@ -616,42 +616,42 @@ pub enum OptionalField { pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Sync { //Channel init: /// Handle an incoming open_channel message from the given peer. - fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &OpenChannel) -> Result<(), HandleError>; + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &OpenChannel) -> Result<(), LightningError>; /// Handle an incoming accept_channel message from the given peer. - fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &AcceptChannel) -> Result<(), HandleError>; + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &AcceptChannel) -> Result<(), LightningError>; /// Handle an incoming funding_created message from the given peer. - fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated) -> Result<(), HandleError>; + fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated) -> Result<(), LightningError>; /// Handle an incoming funding_signed message from the given peer. - fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned) -> Result<(), HandleError>; + fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned) -> Result<(), LightningError>; /// Handle an incoming funding_locked message from the given peer. - fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked) -> Result<(), HandleError>; + fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked) -> Result<(), LightningError>; // Channl close: /// Handle an incoming shutdown message from the given peer. - fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown) -> Result<(), HandleError>; + fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown) -> Result<(), LightningError>; /// Handle an incoming closing_signed message from the given peer. - fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned) -> Result<(), HandleError>; + fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned) -> Result<(), LightningError>; // HTLC handling: /// Handle an incoming update_add_htlc message from the given peer. - fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &UpdateAddHTLC) -> Result<(), HandleError>; + fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &UpdateAddHTLC) -> Result<(), LightningError>; /// Handle an incoming update_fulfill_htlc message from the given peer. - fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), HandleError>; + fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), LightningError>; /// Handle an incoming update_fail_htlc message from the given peer. - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), HandleError>; + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), LightningError>; /// Handle an incoming update_fail_malformed_htlc message from the given peer. - fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), HandleError>; + fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), LightningError>; /// Handle an incoming commitment_signed message from the given peer. - fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<(), HandleError>; + fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<(), LightningError>; /// Handle an incoming revoke_and_ack message from the given peer. - fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<(), HandleError>; + fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<(), LightningError>; /// Handle an incoming update_fee message from the given peer. - fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee) -> Result<(), HandleError>; + fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee) -> Result<(), LightningError>; // Channel-to-announce: /// Handle an incoming announcement_signatures message from the given peer. - fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures) -> Result<(), HandleError>; + fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures) -> Result<(), LightningError>; // Connection loss/reestablish: /// Indicates a connection to the peer failed/an existing connection was lost. If no connection @@ -663,7 +663,7 @@ pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Syn /// Handle a peer reconnecting, possibly generating channel_reestablish message(s). fn peer_connected(&self, their_node_id: &PublicKey); /// Handle an incoming channel_reestablish message from the given peer. - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(), HandleError>; + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(), LightningError>; // Error: /// Handle an incoming error message from the given peer. @@ -674,13 +674,13 @@ pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Syn pub trait RoutingMessageHandler : Send + Sync { /// Handle an incoming node_announcement message, returning true if it should be forwarded on, /// false or returning an Err otherwise. - fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result; + fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result; /// Handle a channel_announcement message, returning true if it should be forwarded on, false /// or returning an Err otherwise. - fn handle_channel_announcement(&self, msg: &ChannelAnnouncement) -> Result; + fn handle_channel_announcement(&self, msg: &ChannelAnnouncement) -> Result; /// Handle an incoming channel_update message, returning true if it should be forwarded on, /// false or returning an Err otherwise. - fn handle_channel_update(&self, msg: &ChannelUpdate) -> Result; + fn handle_channel_update(&self, msg: &ChannelUpdate) -> Result; /// Handle some updates to the route graph that we learned due to an outbound failed payment. fn handle_htlc_fail_channel_update(&self, update: &HTLCFailChannelUpdate); /// Gets a subset of the channel announcements and updates required to dump our routing table @@ -770,7 +770,7 @@ impl fmt::Display for DecodeError { } } -impl fmt::Debug for HandleError { +impl fmt::Debug for LightningError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.err) } diff --git a/src/ln/peer_channel_encryptor.rs b/src/ln/peer_channel_encryptor.rs index 9d716a2dc..ac23041a0 100644 --- a/src/ln/peer_channel_encryptor.rs +++ b/src/ln/peer_channel_encryptor.rs @@ -1,4 +1,4 @@ -use ln::msgs::HandleError; +use ln::msgs::LightningError; use ln::msgs; use bitcoin_hashes::{Hash, HashEngine, Hmac, HmacEngine}; @@ -133,13 +133,13 @@ impl PeerChannelEncryptor { } #[inline] - fn decrypt_with_ad(res: &mut[u8], n: u64, key: &[u8; 32], h: &[u8], cyphertext: &[u8]) -> Result<(), HandleError> { + fn decrypt_with_ad(res: &mut[u8], n: u64, key: &[u8; 32], h: &[u8], cyphertext: &[u8]) -> Result<(), LightningError> { let mut nonce = [0; 12]; nonce[4..].copy_from_slice(&byte_utils::le64_to_array(n)); let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h); if !chacha.decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]) { - return Err(HandleError{err: "Bad MAC", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); + return Err(LightningError{err: "Bad MAC", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); } Ok(()) } @@ -189,15 +189,15 @@ impl PeerChannelEncryptor { } #[inline] - fn inbound_noise_act(state: &mut BidirectionalNoiseState, act: &[u8], our_key: &SecretKey) -> Result<(PublicKey, [u8; 32]), HandleError> { + fn inbound_noise_act(state: &mut BidirectionalNoiseState, act: &[u8], our_key: &SecretKey) -> Result<(PublicKey, [u8; 32]), LightningError> { assert_eq!(act.len(), 50); if act[0] != 0 { - return Err(HandleError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); + return Err(LightningError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); } let their_pub = match PublicKey::from_slice(&act[1..34]) { - Err(_) => return Err(HandleError{err: "Invalid public key", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}), + Err(_) => return Err(LightningError{err: "Invalid public key", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}), Ok(key) => key, }; @@ -239,7 +239,7 @@ impl PeerChannelEncryptor { } } - pub fn process_act_one_with_keys(&mut self, act_one: &[u8], our_node_secret: &SecretKey, our_ephemeral: SecretKey) -> Result<[u8; 50], HandleError> { + pub fn process_act_one_with_keys(&mut self, act_one: &[u8], our_node_secret: &SecretKey, our_ephemeral: SecretKey) -> Result<[u8; 50], LightningError> { assert_eq!(act_one.len(), 50); match self.noise_state { @@ -266,7 +266,7 @@ impl PeerChannelEncryptor { } } - pub fn process_act_two(&mut self, act_two: &[u8], our_node_secret: &SecretKey) -> Result<([u8; 66], PublicKey), HandleError> { + pub fn process_act_two(&mut self, act_two: &[u8], our_node_secret: &SecretKey) -> Result<([u8; 66], PublicKey), LightningError> { assert_eq!(act_two.len(), 50); let final_hkdf; @@ -317,7 +317,7 @@ impl PeerChannelEncryptor { Ok((res, self.their_node_id.unwrap().clone())) } - pub fn process_act_three(&mut self, act_three: &[u8]) -> Result { + pub fn process_act_three(&mut self, act_three: &[u8]) -> Result { assert_eq!(act_three.len(), 66); let final_hkdf; @@ -330,14 +330,14 @@ impl PeerChannelEncryptor { panic!("Requested act at wrong step"); } if act_three[0] != 0 { - return Err(HandleError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); + return Err(LightningError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); } let mut their_node_id = [0; 33]; PeerChannelEncryptor::decrypt_with_ad(&mut their_node_id, 1, &temp_k2.unwrap(), &bidirectional_state.h, &act_three[1..50])?; self.their_node_id = Some(match PublicKey::from_slice(&their_node_id) { Ok(key) => key, - Err(_) => return Err(HandleError{err: "Bad node_id from peer", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}), + Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}), }); let mut sha = Sha256::engine(); @@ -403,7 +403,7 @@ impl PeerChannelEncryptor { /// Decrypts a message length header from the remote peer. /// panics if noise handshake has not yet finished or msg.len() != 18 - pub fn decrypt_length_header(&mut self, msg: &[u8]) -> Result { + pub fn decrypt_length_header(&mut self, msg: &[u8]) -> Result { assert_eq!(msg.len(), 16+2); match self.noise_state { @@ -426,7 +426,7 @@ impl PeerChannelEncryptor { /// Decrypts the given message. /// panics if msg.len() > 65535 + 16 - pub fn decrypt_message(&mut self, msg: &[u8]) -> Result, HandleError> { + pub fn decrypt_message(&mut self, msg: &[u8]) -> Result, LightningError> { if msg.len() > 65535 + 16 { panic!("Attempted to encrypt message longer than 65535 bytes!"); } diff --git a/src/ln/router.rs b/src/ln/router.rs index bb20c31c3..5071919c5 100644 --- a/src/ln/router.rs +++ b/src/ln/router.rs @@ -14,7 +14,7 @@ use bitcoin::blockdata::opcodes; use chain::chaininterface::{ChainError, ChainWatchInterface}; use ln::channelmanager; -use ln::msgs::{DecodeError,ErrorAction,HandleError,RoutingMessageHandler,NetAddress,GlobalFeatures}; +use ln::msgs::{DecodeError,ErrorAction,LightningError,RoutingMessageHandler,NetAddress,GlobalFeatures}; use ln::msgs; use util::ser::{Writeable, Readable, Writer, ReadableArgs}; use util::logger::Logger; @@ -404,13 +404,13 @@ macro_rules! secp_verify_sig { ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => { match $secp_ctx.verify($msg, $sig, $pubkey) { Ok(_) => {}, - Err(_) => return Err(HandleError{err: "Invalid signature from remote node", action: None}), + Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: None}), } }; } impl RoutingMessageHandler for Router { - fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { + fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id); @@ -420,10 +420,10 @@ impl RoutingMessageHandler for Router { let mut network = self.network_map.write().unwrap(); match network.nodes.get_mut(&msg.contents.node_id) { - None => Err(HandleError{err: "No existing channels for node_announcement", action: Some(ErrorAction::IgnoreError)}), + None => Err(LightningError{err: "No existing channels for node_announcement", action: Some(ErrorAction::IgnoreError)}), Some(node) => { if node.last_update >= msg.contents.timestamp { - return Err(HandleError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)}); } node.features = msg.contents.features.clone(); @@ -439,9 +439,9 @@ impl RoutingMessageHandler for Router { } } - fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { + fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 { - return Err(HandleError{err: "Channel announcement node had a channel with itself", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announcement node had a channel with itself", action: Some(ErrorAction::IgnoreError)}); } let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); @@ -462,7 +462,7 @@ impl RoutingMessageHandler for Router { .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); if script_pubkey != expected_script { - return Err(HandleError{err: "Channel announcement keys didn't match on-chain script", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: Some(ErrorAction::IgnoreError)}); } //TODO: Check if value is worth storing, use it to inform routing, and compare it //to the new HTLC max field in channel_update @@ -473,10 +473,10 @@ impl RoutingMessageHandler for Router { false }, Err(ChainError::NotWatched) => { - return Err(HandleError{err: "Channel announced on an unknown chain", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announced on an unknown chain", action: Some(ErrorAction::IgnoreError)}); }, Err(ChainError::UnknownTx) => { - return Err(HandleError{err: "Channel announced without corresponding UTXO entry", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: Some(ErrorAction::IgnoreError)}); }, }; @@ -527,7 +527,7 @@ impl RoutingMessageHandler for Router { Self::remove_channel_in_nodes(network.nodes, &entry.get(), msg.contents.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(HandleError{err: "Already have knowledge of channel", action: Some(ErrorAction::IgnoreError)}) + return Err(LightningError{err: "Already have knowledge of channel", action: Some(ErrorAction::IgnoreError)}) } }, BtreeEntry::Vacant(entry) => { @@ -592,19 +592,19 @@ impl RoutingMessageHandler for Router { } } - fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result { + fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result { let mut network = self.network_map.write().unwrap(); let dest_node_id; let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); let chan_was_enabled; match network.channels.get_mut(&NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)) { - None => return Err(HandleError{err: "Couldn't find channel for update", action: Some(ErrorAction::IgnoreError)}), + None => return Err(LightningError{err: "Couldn't find channel for update", action: Some(ErrorAction::IgnoreError)}), Some(channel) => { macro_rules! maybe_update_channel_info { ( $target: expr) => { if $target.last_update >= msg.contents.timestamp { - return Err(HandleError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)}); } chan_was_enabled = $target.enabled; $target.last_update = msg.contents.timestamp; @@ -824,17 +824,17 @@ impl Router { /// The fees on channels from us to next-hops are ignored (as they are assumed to all be /// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat /// *is* checked as they may change based on the receiving node. - pub fn get_route(&self, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>, last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32) -> Result { + pub fn get_route(&self, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>, last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32) -> Result { // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by // uptime/success in using a node in the past. let network = self.network_map.read().unwrap(); if *target == network.our_node_id { - return Err(HandleError{err: "Cannot generate a route to ourselves", action: None}); + return Err(LightningError{err: "Cannot generate a route to ourselves", action: None}); } if final_value_msat > 21_000_000 * 1_0000_0000 * 1000 { - return Err(HandleError{err: "Cannot generate a route of more value than all existing satoshis", action: None}); + return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: None}); } // We do a dest-to-source Dijkstra's sorting by each node's distance from the destination @@ -871,7 +871,7 @@ impl Router { first_hop_targets.insert(chan.remote_network_id, short_channel_id); } if first_hop_targets.is_empty() { - return Err(HandleError{err: "Cannot route when there are no outbound routes away from us", action: None}); + return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: None}); } } @@ -985,7 +985,7 @@ impl Router { while res.last().unwrap().pubkey != *target { let new_entry = match dist.remove(&res.last().unwrap().pubkey) { Some(hop) => hop.3, - None => return Err(HandleError{err: "Failed to find a non-fee-overflowing path to the given destination", action: None}), + None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: None}), }; res.last_mut().unwrap().fee_msat = new_entry.fee_msat; res.last_mut().unwrap().cltv_expiry_delta = new_entry.cltv_expiry_delta; @@ -1006,7 +1006,7 @@ impl Router { } } - Err(HandleError{err: "Failed to find a path to the given destination", action: None}) + Err(LightningError{err: "Failed to find a path to the given destination", action: None}) } } diff --git a/src/util/test_utils.rs b/src/util/test_utils.rs index cc6fe969d..444225d47 100644 --- a/src/util/test_utils.rs +++ b/src/util/test_utils.rs @@ -5,7 +5,7 @@ use chain::keysinterface; use ln::channelmonitor; use ln::msgs; use ln::msgs::LocalFeatures; -use ln::msgs::{HandleError}; +use ln::msgs::{LightningError}; use ln::channelmonitor::HTLCUpdate; use util::events; use util::logger::{Logger, Level, Record}; @@ -98,53 +98,53 @@ impl TestChannelMessageHandler { } impl msgs::ChannelMessageHandler for TestChannelMessageHandler { - fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } - fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { - Err(HandleError { err: "", action: None }) + fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), LightningError> { + Err(LightningError { err: "", action: None }) } fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} fn peer_connected(&self, _their_node_id: &PublicKey) {} @@ -168,14 +168,14 @@ impl TestRoutingMessageHandler { } } impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { - fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result { - Err(HandleError { err: "", action: None }) + fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result { + Err(LightningError { err: "", action: None }) } - fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result { - Err(HandleError { err: "", action: None }) + fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result { + Err(LightningError { err: "", action: None }) } - fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result { - Err(HandleError { err: "", action: None }) + fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result { + Err(LightningError { err: "", action: None }) } fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {} fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, msgs::ChannelUpdate,msgs::ChannelUpdate)> { From ddbe53f836865b65591f40356ea604c6cb8be432 Mon Sep 17 00:00:00 2001 From: Antoine Riard Date: Mon, 4 Nov 2019 19:54:43 -0500 Subject: [PATCH 2/2] Make field error of LightingError mandatory We also fulfilled last empty ErrorAction: - Router secp fail : IgnoreError - processing error in Router : IgnoreError - get_channel_update too early : IgnoreError --- fuzz/fuzz_targets/chanmon_fail_consistency.rs | 2 +- src/ln/chanmon_update_fail_tests.rs | 34 +++--- src/ln/channelmanager.rs | 34 +++--- src/ln/functional_tests.rs | 84 +++++++-------- src/ln/msgs.rs | 2 +- src/ln/peer_channel_encryptor.rs | 10 +- src/ln/peer_handler.rs | 101 ++++++++---------- src/ln/router.rs | 30 +++--- src/util/events.rs | 2 +- src/util/test_utils.rs | 38 +++---- 10 files changed, 160 insertions(+), 177 deletions(-) diff --git a/fuzz/fuzz_targets/chanmon_fail_consistency.rs b/fuzz/fuzz_targets/chanmon_fail_consistency.rs index 28f03b639..655b3b8d1 100644 --- a/fuzz/fuzz_targets/chanmon_fail_consistency.rs +++ b/fuzz/fuzz_targets/chanmon_fail_consistency.rs @@ -392,7 +392,7 @@ pub fn do_test(data: &[u8]) { ($res: expr) => { match $res { Ok(()) => {}, - Err(LightningError { action: Some(ErrorAction::IgnoreError), .. }) => { }, + Err(LightningError { action: ErrorAction::IgnoreError, .. }) => { }, _ => { $res.unwrap() }, } } diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index af676edb2..4b8490c5b 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -190,7 +190,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } } @@ -485,7 +485,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -515,7 +515,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -565,7 +565,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -618,12 +618,12 @@ fn test_monitor_update_raa_while_paused() { *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -704,7 +704,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -768,7 +768,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -945,7 +945,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1033,12 +1033,12 @@ fn raa_no_response_awaiting_raa_state() { // then restore channel monitor updates. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1130,7 +1130,7 @@ fn claim_while_disconnected_monitor_update_fail() { // update. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1145,7 +1145,7 @@ fn claim_while_disconnected_monitor_update_fail() { let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap(); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1235,7 +1235,7 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1327,7 +1327,7 @@ fn first_message_on_recv_ordering() { // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1336,7 +1336,7 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with // the appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } @@ -1597,7 +1597,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: } let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); if fail_on_signed || !restore_between_fails { - if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() { if fail_on_generate && !restore_between_fails { assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast"); check_added_monitors!(nodes[0], 0); diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 156b31964..a18628f3e 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -152,12 +152,12 @@ impl MsgHandleErrInternal { Self { err: LightningError { err, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: err.to_string() }, - }), + }, }, shutdown_finish: None, } @@ -167,7 +167,7 @@ impl MsgHandleErrInternal { Self { err: LightningError { err, - action: Some(msgs::ErrorAction::IgnoreError), + action: msgs::ErrorAction::IgnoreError, }, shutdown_finish: None, } @@ -181,12 +181,12 @@ impl MsgHandleErrInternal { Self { err: LightningError { err, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: err.to_string() }, - }), + }, }, shutdown_finish: Some((shutdown_res, channel_update)), } @@ -197,25 +197,25 @@ impl MsgHandleErrInternal { err: match err { ChannelError::Ignore(msg) => LightningError { err: msg, - action: Some(msgs::ErrorAction::IgnoreError), + action: msgs::ErrorAction::IgnoreError, }, ChannelError::Close(msg) => LightningError { err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: msg.to_string() }, - }), + }, }, ChannelError::CloseDelayBroadcast { msg, .. } => LightningError { err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: msg.to_string() }, - }), + }, }, }, shutdown_finish: None, @@ -1011,7 +1011,7 @@ impl ChannelManager { /// May be called with channel_state already locked! fn get_channel_update(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { - None => return Err(LightningError{err: "Channel not yet established", action: None}), + None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; @@ -1140,7 +1140,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { log_error!(self, "Got bad keys: {}!", e.err); let mut channel_state = self.channel_state.lock().unwrap(); @@ -1434,7 +1434,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => {}, Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { @@ -1660,7 +1660,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => {}, Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { @@ -2292,7 +2292,7 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError})); } let our_node_id = self.get_our_node_id(); @@ -2445,7 +2445,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { log_error!(self, "Got bad keys: {}!", e.err); let mut channel_state = self.channel_state.lock().unwrap(); @@ -2538,7 +2538,7 @@ impl ChainListener for ChannelManager { } else if let Err(e) = chan_res { pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_their_node_id(), - action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }), + action: msgs::ErrorAction::SendErrorMessage { msg: e }, }); return false; } diff --git a/src/ln/functional_tests.rs b/src/ln/functional_tests.rs index 611b94f1a..1c1f5d249 100644 --- a/src/ln/functional_tests.rs +++ b/src/ln/functional_tests.rs @@ -70,7 +70,7 @@ fn test_insane_channel_opens() { // that supposedly makes the channel open message insane let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) { - Err(msgs::LightningError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => { + Err(msgs::LightningError{ err: error_str, action: msgs::ErrorAction::SendErrorMessage {..}}) => { assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str) }, Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")}, @@ -952,7 +952,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // transaction. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - if let Err(msgs::LightningError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) = + if let Err(msgs::LightningError{action: msgs::ErrorAction::SendErrorMessage{msg}, ..}) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); let msgs::ErrorMessage {ref channel_id, ..} = msg; @@ -3455,7 +3455,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id()); let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id()); - if let Err(msgs::LightningError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { + if let Err(msgs::LightningError { action: msgs::ErrorAction::SendErrorMessage { msg }, .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { assert_eq!(msg.channel_id, channel_id); } else { panic!("Unexpected result"); } } @@ -5101,7 +5101,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote side tried to send less than our minimum HTLC value"); } else { assert!(false); @@ -5127,7 +5127,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote HTLC add would put them over their reserve value"); } else { assert!(false); @@ -5174,7 +5174,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to push more than our max accepted HTLCs"); } else { assert!(false); @@ -5197,7 +5197,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err,"Remote HTLC add would put them over our max HTLC value"); } else { assert!(false); @@ -5220,7 +5220,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { updates.update_add_htlcs[0].cltv_expiry = 500000000; let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height"); } else { assert!(false); @@ -5266,7 +5266,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote skipped HTLC ID"); } else { assert!(false); @@ -5298,7 +5298,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5330,7 +5330,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5363,7 +5363,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed"); } else { assert!(false); @@ -5404,7 +5404,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { update_fulfill_msg.htlc_id = 1; let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find"); } else { assert!(false); @@ -5445,7 +5445,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage"); } else { assert!(false); @@ -5491,7 +5491,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag }; update_msg.failure_code &= !0x8000; let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg); - if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err { assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set"); } else { assert!(false); @@ -5864,14 +5864,12 @@ fn test_upfront_shutdown_script() { node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) { - if let Some(error) = error.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); - }, - _ => { assert!(false); } - } - } else { assert!(false); } + match error.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); + }, + _ => { assert!(false); } + } } else { assert!(false); } let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5981,14 +5979,12 @@ fn test_user_configurable_csv_delay() { let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); accept_channel.to_self_delay = 200; if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) { - if let Some(error) = error.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period"); - }, - _ => { assert!(false); } - } - } else { assert!(false); } + match error.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period"); + }, + _ => { assert!(false); } + } } else { assert!(false); } // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req() @@ -6056,14 +6052,12 @@ fn test_data_loss_protect() { // Check we update monitor following learning of per_commitment_point from B if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]) { - if let Some(error) = err.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); - }, - _ => panic!("Unexpected event!"), - } - } else { assert!(false); } + match err.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); + }, + _ => panic!("Unexpected event!"), + } } else { assert!(false); } check_added_monitors!(nodes[0], 1); @@ -6085,13 +6079,11 @@ fn test_data_loss_protect() { // Check we close channel detecting A is fallen-behind if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) { - if let Some(error) = err.action { - match error { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); }, - _ => panic!("Unexpected event!"), - } - } else { assert!(false); } + match err.action { + ErrorAction::SendErrorMessage { msg } => { + assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); }, + _ => panic!("Unexpected event!"), + } } else { assert!(false); } let events = nodes[1].node.get_and_clear_pending_msg_events(); diff --git a/src/ln/msgs.rs b/src/ln/msgs.rs index 6939af13e..277d96d16 100644 --- a/src/ln/msgs.rs +++ b/src/ln/msgs.rs @@ -547,7 +547,7 @@ pub struct LightningError { /// A human-readable message describing the error pub err: &'static str, /// The action which should be taken against the offending peer. - pub action: Option, //TODO: Make this required + pub action: ErrorAction, } /// Struct used to return values from revoke_and_ack messages, containing a bunch of commitment diff --git a/src/ln/peer_channel_encryptor.rs b/src/ln/peer_channel_encryptor.rs index ac23041a0..7e84e329e 100644 --- a/src/ln/peer_channel_encryptor.rs +++ b/src/ln/peer_channel_encryptor.rs @@ -139,7 +139,7 @@ impl PeerChannelEncryptor { let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h); if !chacha.decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]) { - return Err(LightningError{err: "Bad MAC", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); + return Err(LightningError{err: "Bad MAC", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}); } Ok(()) } @@ -193,11 +193,11 @@ impl PeerChannelEncryptor { assert_eq!(act.len(), 50); if act[0] != 0 { - return Err(LightningError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); + return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}); } let their_pub = match PublicKey::from_slice(&act[1..34]) { - Err(_) => return Err(LightningError{err: "Invalid public key", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}), + Err(_) => return Err(LightningError{err: "Invalid public key", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}), Ok(key) => key, }; @@ -330,14 +330,14 @@ impl PeerChannelEncryptor { panic!("Requested act at wrong step"); } if act_three[0] != 0 { - return Err(LightningError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}); + return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}); } let mut their_node_id = [0; 33]; PeerChannelEncryptor::decrypt_with_ad(&mut their_node_id, 1, &temp_k2.unwrap(), &bidirectional_state.h, &act_three[1..50])?; self.their_node_id = Some(match PublicKey::from_slice(&their_node_id) { Ok(key) => key, - Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}), + Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}), }); let mut sha = Sha256::engine(); diff --git a/src/ln/peer_handler.rs b/src/ln/peer_handler.rs index 8094f2561..c0035f4f4 100644 --- a/src/ln/peer_handler.rs +++ b/src/ln/peer_handler.rs @@ -481,26 +481,21 @@ impl PeerManager { match $thing { Ok(x) => x, Err(e) => { - if let Some(action) = e.action { - match action { - msgs::ErrorAction::DisconnectPeer { msg: _ } => { - //TODO: Try to push msg - log_trace!(self, "Got Err handling message, disconnecting peer because {}", e.err); - return Err(PeerHandleError{ no_connection_possible: false }); - }, - msgs::ErrorAction::IgnoreError => { - log_trace!(self, "Got Err handling message, ignoring because {}", e.err); - continue; - }, - msgs::ErrorAction::SendErrorMessage { msg } => { - log_trace!(self, "Got Err handling message, sending Error message because {}", e.err); - encode_and_send_msg!(msg, 17); - continue; - }, - } - } else { - log_debug!(self, "Got Err handling message, action not yet filled in: {}", e.err); - return Err(PeerHandleError{ no_connection_possible: false }); + match e.action { + msgs::ErrorAction::DisconnectPeer { msg: _ } => { + //TODO: Try to push msg + log_trace!(self, "Got Err handling message, disconnecting peer because {}", e.err); + return Err(PeerHandleError{ no_connection_possible: false }); + }, + msgs::ErrorAction::IgnoreError => { + log_trace!(self, "Got Err handling message, ignoring because {}", e.err); + continue; + }, + msgs::ErrorAction::SendErrorMessage { msg } => { + log_trace!(self, "Got Err handling message, sending Error message because {}", e.err); + encode_and_send_msg!(msg, 17); + continue; + }, } } }; @@ -1020,42 +1015,38 @@ impl PeerManager { self.message_handler.route_handler.handle_htlc_fail_channel_update(update); }, MessageSendEvent::HandleError { ref node_id, ref action } => { - if let Some(ref action) = *action { - match *action { - msgs::ErrorAction::DisconnectPeer { ref msg } => { - if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) { - peers.peers_needing_send.remove(&descriptor); - if let Some(mut peer) = peers.peers.remove(&descriptor) { - if let Some(ref msg) = *msg { - log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}", - log_pubkey!(node_id), - msg.data); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17))); - // This isn't guaranteed to work, but if there is enough free - // room in the send buffer, put the error message there... - self.do_attempt_write_data(&mut descriptor, &mut peer); - } else { - log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); - } + match *action { + msgs::ErrorAction::DisconnectPeer { ref msg } => { + if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) { + peers.peers_needing_send.remove(&descriptor); + if let Some(mut peer) = peers.peers.remove(&descriptor) { + if let Some(ref msg) = *msg { + log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}", + log_pubkey!(node_id), + msg.data); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17))); + // This isn't guaranteed to work, but if there is enough free + // room in the send buffer, put the error message there... + self.do_attempt_write_data(&mut descriptor, &mut peer); + } else { + log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id)); } - descriptor.disconnect_socket(); - self.message_handler.chan_handler.peer_disconnected(&node_id, false); } - }, - msgs::ErrorAction::IgnoreError => {}, - msgs::ErrorAction::SendErrorMessage { ref msg } => { - log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", - log_pubkey!(node_id), - msg.data); - let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { - //TODO: Do whatever we're gonna do for handling dropped messages - }); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17))); - self.do_attempt_write_data(&mut descriptor, peer); - }, - } - } else { - log_error!(self, "Got no-action HandleError Event in peer_handler for node {}, no such events should ever be generated!", log_pubkey!(node_id)); + descriptor.disconnect_socket(); + self.message_handler.chan_handler.peer_disconnected(&node_id, false); + } + }, + msgs::ErrorAction::IgnoreError => {}, + msgs::ErrorAction::SendErrorMessage { ref msg } => { + log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", + log_pubkey!(node_id), + msg.data); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, { + //TODO: Do whatever we're gonna do for handling dropped messages + }); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17))); + self.do_attempt_write_data(&mut descriptor, peer); + }, } } } @@ -1172,7 +1163,7 @@ mod tests { let chan_handler = test_utils::TestChannelMessageHandler::new(); chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError { node_id: their_id, - action: Some(msgs::ErrorAction::DisconnectPeer { msg: None }), + action: msgs::ErrorAction::DisconnectPeer { msg: None }, }); assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1); peers[0].message_handler.chan_handler = Arc::new(chan_handler); diff --git a/src/ln/router.rs b/src/ln/router.rs index 5071919c5..5686e5a3b 100644 --- a/src/ln/router.rs +++ b/src/ln/router.rs @@ -404,7 +404,7 @@ macro_rules! secp_verify_sig { ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => { match $secp_ctx.verify($msg, $sig, $pubkey) { Ok(_) => {}, - Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: None}), + Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: ErrorAction::IgnoreError}), } }; } @@ -420,10 +420,10 @@ impl RoutingMessageHandler for Router { let mut network = self.network_map.write().unwrap(); match network.nodes.get_mut(&msg.contents.node_id) { - None => Err(LightningError{err: "No existing channels for node_announcement", action: Some(ErrorAction::IgnoreError)}), + None => Err(LightningError{err: "No existing channels for node_announcement", action: ErrorAction::IgnoreError}), Some(node) => { if node.last_update >= msg.contents.timestamp { - return Err(LightningError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError}); } node.features = msg.contents.features.clone(); @@ -441,7 +441,7 @@ impl RoutingMessageHandler for Router { fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result { if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 { - return Err(LightningError{err: "Channel announcement node had a channel with itself", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announcement node had a channel with itself", action: ErrorAction::IgnoreError}); } let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); @@ -462,7 +462,7 @@ impl RoutingMessageHandler for Router { .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); if script_pubkey != expected_script { - return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: ErrorAction::IgnoreError}); } //TODO: Check if value is worth storing, use it to inform routing, and compare it //to the new HTLC max field in channel_update @@ -473,10 +473,10 @@ impl RoutingMessageHandler for Router { false }, Err(ChainError::NotWatched) => { - return Err(LightningError{err: "Channel announced on an unknown chain", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announced on an unknown chain", action: ErrorAction::IgnoreError}); }, Err(ChainError::UnknownTx) => { - return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: ErrorAction::IgnoreError}); }, }; @@ -527,7 +527,7 @@ impl RoutingMessageHandler for Router { Self::remove_channel_in_nodes(network.nodes, &entry.get(), msg.contents.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel", action: Some(ErrorAction::IgnoreError)}) + return Err(LightningError{err: "Already have knowledge of channel", action: ErrorAction::IgnoreError}) } }, BtreeEntry::Vacant(entry) => { @@ -599,12 +599,12 @@ impl RoutingMessageHandler for Router { let chan_was_enabled; match network.channels.get_mut(&NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)) { - None => return Err(LightningError{err: "Couldn't find channel for update", action: Some(ErrorAction::IgnoreError)}), + None => return Err(LightningError{err: "Couldn't find channel for update", action: ErrorAction::IgnoreError}), Some(channel) => { macro_rules! maybe_update_channel_info { ( $target: expr) => { if $target.last_update >= msg.contents.timestamp { - return Err(LightningError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)}); + return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError}); } chan_was_enabled = $target.enabled; $target.last_update = msg.contents.timestamp; @@ -830,11 +830,11 @@ impl Router { let network = self.network_map.read().unwrap(); if *target == network.our_node_id { - return Err(LightningError{err: "Cannot generate a route to ourselves", action: None}); + return Err(LightningError{err: "Cannot generate a route to ourselves", action: ErrorAction::IgnoreError}); } if final_value_msat > 21_000_000 * 1_0000_0000 * 1000 { - return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: None}); + return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: ErrorAction::IgnoreError}); } // We do a dest-to-source Dijkstra's sorting by each node's distance from the destination @@ -871,7 +871,7 @@ impl Router { first_hop_targets.insert(chan.remote_network_id, short_channel_id); } if first_hop_targets.is_empty() { - return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: None}); + return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: ErrorAction::IgnoreError}); } } @@ -985,7 +985,7 @@ impl Router { while res.last().unwrap().pubkey != *target { let new_entry = match dist.remove(&res.last().unwrap().pubkey) { Some(hop) => hop.3, - None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: None}), + None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: ErrorAction::IgnoreError}), }; res.last_mut().unwrap().fee_msat = new_entry.fee_msat; res.last_mut().unwrap().cltv_expiry_delta = new_entry.cltv_expiry_delta; @@ -1006,7 +1006,7 @@ impl Router { } } - Err(LightningError{err: "Failed to find a path to the given destination", action: None}) + Err(LightningError{err: "Failed to find a path to the given destination", action: ErrorAction::IgnoreError}) } } diff --git a/src/util/events.rs b/src/util/events.rs index 96a5b48a7..e810368db 100644 --- a/src/util/events.rs +++ b/src/util/events.rs @@ -210,7 +210,7 @@ pub enum MessageSendEvent { /// The node_id of the node which should receive this message node_id: PublicKey, /// The action which should be taken. - action: Option + action: msgs::ErrorAction }, /// When a payment fails we may receive updates back from the hop where it failed. In such /// cases this event is generated so that we can inform the router of this information. diff --git a/src/util/test_utils.rs b/src/util/test_utils.rs index 444225d47..c3884d345 100644 --- a/src/util/test_utils.rs +++ b/src/util/test_utils.rs @@ -99,52 +99,52 @@ impl TestChannelMessageHandler { impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), LightningError> { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {} fn peer_connected(&self, _their_node_id: &PublicKey) {} @@ -169,13 +169,13 @@ impl TestRoutingMessageHandler { } impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result { - Err(LightningError { err: "", action: None }) + Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError }) } fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {} fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, msgs::ChannelUpdate,msgs::ChannelUpdate)> {