Make field error of LightingError mandatory

We also fulfilled last empty ErrorAction:
- Router secp fail : IgnoreError
- processing error in Router : IgnoreError
- get_channel_update too early : IgnoreError
This commit is contained in:
Antoine Riard 2019-11-04 19:54:43 -05:00
parent 7608483b0f
commit ddbe53f836
10 changed files with 160 additions and 177 deletions

View file

@ -392,7 +392,7 @@ pub fn do_test(data: &[u8]) {
($res: expr) => {
match $res {
Ok(()) => {},
Err(LightningError { action: Some(ErrorAction::IgnoreError), .. }) => { },
Err(LightningError { action: ErrorAction::IgnoreError, .. }) => { },
_ => { $res.unwrap() },
}
}

View file

@ -190,7 +190,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
_ => panic!("Unexpected event"),
}
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
} else { panic!(); }
}
@ -485,7 +485,7 @@ fn test_monitor_update_fail_cs() {
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
@ -515,7 +515,7 @@ fn test_monitor_update_fail_cs() {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[0], 1);
@ -565,7 +565,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@ -618,12 +618,12 @@ fn test_monitor_update_raa_while_paused() {
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap();
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[0], 1);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
} else { panic!(); }
check_added_monitors!(nodes[0], 1);
@ -704,7 +704,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
// Now fail monitor updating.
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
@ -768,7 +768,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
} else { panic!(); }
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@ -945,7 +945,7 @@ fn test_monitor_update_fail_reestablish() {
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
@ -1033,12 +1033,12 @@ fn raa_no_response_awaiting_raa_state() {
// then restore channel monitor updates.
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
@ -1130,7 +1130,7 @@ fn claim_while_disconnected_monitor_update_fail() {
// update.
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
@ -1145,7 +1145,7 @@ fn claim_while_disconnected_monitor_update_fail() {
let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap();
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
} else { panic!(); }
// Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
@ -1235,7 +1235,7 @@ fn monitor_failed_no_reestablish_response() {
assert_eq!(events.len(), 1);
let payment_event = SendEvent::from_event(events.pop().unwrap());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
@ -1327,7 +1327,7 @@ fn first_message_on_recv_ordering() {
// Deliver the final RAA for the first payment, which does not require a response. RAAs
// generally require a commitment_signed, so the fact that we're expecting an opposite response
// to the next message also tests resetting the delivery order.
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
assert_eq!(err, "Failed to update ChannelMonitor");
} else { panic!(); }
check_added_monitors!(nodes[1], 1);
@ -1336,7 +1336,7 @@ fn first_message_on_recv_ordering() {
// RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
// the appropriate HTLC acceptance).
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
} else { panic!(); }
@ -1597,7 +1597,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
}
let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
if fail_on_signed || !restore_between_fails {
if let msgs::LightningError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() {
if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() {
if fail_on_generate && !restore_between_fails {
assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast");
check_added_monitors!(nodes[0], 0);

View file

@ -152,12 +152,12 @@ impl MsgHandleErrInternal {
Self {
err: LightningError {
err,
action: Some(msgs::ErrorAction::SendErrorMessage {
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: err.to_string()
},
}),
},
},
shutdown_finish: None,
}
@ -167,7 +167,7 @@ impl MsgHandleErrInternal {
Self {
err: LightningError {
err,
action: Some(msgs::ErrorAction::IgnoreError),
action: msgs::ErrorAction::IgnoreError,
},
shutdown_finish: None,
}
@ -181,12 +181,12 @@ impl MsgHandleErrInternal {
Self {
err: LightningError {
err,
action: Some(msgs::ErrorAction::SendErrorMessage {
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: err.to_string()
},
}),
},
},
shutdown_finish: Some((shutdown_res, channel_update)),
}
@ -197,25 +197,25 @@ impl MsgHandleErrInternal {
err: match err {
ChannelError::Ignore(msg) => LightningError {
err: msg,
action: Some(msgs::ErrorAction::IgnoreError),
action: msgs::ErrorAction::IgnoreError,
},
ChannelError::Close(msg) => LightningError {
err: msg,
action: Some(msgs::ErrorAction::SendErrorMessage {
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: msg.to_string()
},
}),
},
},
ChannelError::CloseDelayBroadcast { msg, .. } => LightningError {
err: msg,
action: Some(msgs::ErrorAction::SendErrorMessage {
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: msg.to_string()
},
}),
},
},
},
shutdown_finish: None,
@ -1011,7 +1011,7 @@ impl ChannelManager {
/// May be called with channel_state already locked!
fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, LightningError> {
let short_channel_id = match chan.get_short_channel_id() {
None => return Err(LightningError{err: "Channel not yet established", action: None}),
None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
};
@ -1140,7 +1140,7 @@ impl ChannelManager {
match handle_error!(self, err) {
Ok(_) => unreachable!(),
Err(e) => {
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
if let msgs::ErrorAction::IgnoreError = e.action {
} else {
log_error!(self, "Got bad keys: {}!", e.err);
let mut channel_state = self.channel_state.lock().unwrap();
@ -1434,7 +1434,7 @@ impl ChannelManager {
match handle_error!(self, err) {
Ok(_) => {},
Err(e) => {
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
if let msgs::ErrorAction::IgnoreError = e.action {
} else {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
@ -1660,7 +1660,7 @@ impl ChannelManager {
match handle_error!(self, err) {
Ok(_) => {},
Err(e) => {
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
if let msgs::ErrorAction::IgnoreError = e.action {
} else {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
@ -2292,7 +2292,7 @@ impl ChannelManager {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
if !chan.get().is_usable() {
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError}));
}
let our_node_id = self.get_our_node_id();
@ -2445,7 +2445,7 @@ impl ChannelManager {
match handle_error!(self, err) {
Ok(_) => unreachable!(),
Err(e) => {
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
if let msgs::ErrorAction::IgnoreError = e.action {
} else {
log_error!(self, "Got bad keys: {}!", e.err);
let mut channel_state = self.channel_state.lock().unwrap();
@ -2538,7 +2538,7 @@ impl ChainListener for ChannelManager {
} else if let Err(e) = chan_res {
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_their_node_id(),
action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
});
return false;
}

View file

@ -70,7 +70,7 @@ fn test_insane_channel_opens() {
// that supposedly makes the channel open message insane
let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) {
Err(msgs::LightningError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => {
Err(msgs::LightningError{ err: error_str, action: msgs::ErrorAction::SendErrorMessage {..}}) => {
assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str)
},
Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")},
@ -952,7 +952,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
// transaction.
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
if let Err(msgs::LightningError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) =
if let Err(msgs::LightningError{action: msgs::ErrorAction::SendErrorMessage{msg}, ..}) =
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
let msgs::ErrorMessage {ref channel_id, ..} = msg;
@ -3455,7 +3455,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id());
let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id());
if let Err(msgs::LightningError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
if let Err(msgs::LightningError { action: msgs::ErrorAction::SendErrorMessage { msg }, .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
assert_eq!(msg.channel_id, channel_id);
} else { panic!("Unexpected result"); }
}
@ -5101,7 +5101,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote side tried to send less than our minimum HTLC value");
} else {
assert!(false);
@ -5127,7 +5127,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1;
let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote HTLC add would put them over their reserve value");
} else {
assert!(false);
@ -5174,7 +5174,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64;
let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote tried to push more than our max accepted HTLCs");
} else {
assert!(false);
@ -5197,7 +5197,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1;
let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err,"Remote HTLC add would put them over our max HTLC value");
} else {
assert!(false);
@ -5220,7 +5220,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
updates.update_add_htlcs[0].cltv_expiry = 500000000;
let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height");
} else {
assert!(false);
@ -5266,7 +5266,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote skipped HTLC ID");
} else {
assert!(false);
@ -5298,7 +5298,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
} else {
assert!(false);
@ -5330,7 +5330,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
} else {
assert!(false);
@ -5363,7 +5363,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
} else {
assert!(false);
@ -5404,7 +5404,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
update_fulfill_msg.htlc_id = 1;
let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find");
} else {
assert!(false);
@ -5445,7 +5445,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage");
} else {
assert!(false);
@ -5491,7 +5491,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
};
update_msg.failure_code &= !0x8000;
let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
if let Err(msgs::LightningError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set");
} else {
assert!(false);
@ -5864,14 +5864,12 @@ fn test_upfront_shutdown_script() {
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
// Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer
if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {
if let Some(error) = error.action {
match error {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
},
_ => { assert!(false); }
}
} else { assert!(false); }
match error.action {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
},
_ => { assert!(false); }
}
} else { assert!(false); }
let events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
@ -5981,14 +5979,12 @@ fn test_user_configurable_csv_delay() {
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
accept_channel.to_self_delay = 200;
if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) {
if let Some(error) = error.action {
match error {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
},
_ => { assert!(false); }
}
} else { assert!(false); }
match error.action {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
},
_ => { assert!(false); }
}
} else { assert!(false); }
// We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
@ -6056,14 +6052,12 @@ fn test_data_loss_protect() {
// Check we update monitor following learning of per_commitment_point from B
if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]) {
if let Some(error) = err.action {
match error {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
},
_ => panic!("Unexpected event!"),
}
} else { assert!(false); }
match err.action {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
},
_ => panic!("Unexpected event!"),
}
} else { assert!(false); }
check_added_monitors!(nodes[0], 1);
@ -6085,13 +6079,11 @@ fn test_data_loss_protect() {
// Check we close channel detecting A is fallen-behind
if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) {
if let Some(error) = err.action {
match error {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
_ => panic!("Unexpected event!"),
}
} else { assert!(false); }
match err.action {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
_ => panic!("Unexpected event!"),
}
} else { assert!(false); }
let events = nodes[1].node.get_and_clear_pending_msg_events();

View file

@ -547,7 +547,7 @@ pub struct LightningError {
/// A human-readable message describing the error
pub err: &'static str,
/// The action which should be taken against the offending peer.
pub action: Option<ErrorAction>, //TODO: Make this required
pub action: ErrorAction,
}
/// Struct used to return values from revoke_and_ack messages, containing a bunch of commitment

View file

@ -139,7 +139,7 @@ impl PeerChannelEncryptor {
let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h);
if !chacha.decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]) {
return Err(LightningError{err: "Bad MAC", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
return Err(LightningError{err: "Bad MAC", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
}
Ok(())
}
@ -193,11 +193,11 @@ impl PeerChannelEncryptor {
assert_eq!(act.len(), 50);
if act[0] != 0 {
return Err(LightningError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
}
let their_pub = match PublicKey::from_slice(&act[1..34]) {
Err(_) => return Err(LightningError{err: "Invalid public key", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}),
Err(_) => return Err(LightningError{err: "Invalid public key", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
Ok(key) => key,
};
@ -330,14 +330,14 @@ impl PeerChannelEncryptor {
panic!("Requested act at wrong step");
}
if act_three[0] != 0 {
return Err(LightningError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
}
let mut their_node_id = [0; 33];
PeerChannelEncryptor::decrypt_with_ad(&mut their_node_id, 1, &temp_k2.unwrap(), &bidirectional_state.h, &act_three[1..50])?;
self.their_node_id = Some(match PublicKey::from_slice(&their_node_id) {
Ok(key) => key,
Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}),
Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
});
let mut sha = Sha256::engine();

View file

@ -481,26 +481,21 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
match $thing {
Ok(x) => x,
Err(e) => {
if let Some(action) = e.action {
match action {
msgs::ErrorAction::DisconnectPeer { msg: _ } => {
//TODO: Try to push msg
log_trace!(self, "Got Err handling message, disconnecting peer because {}", e.err);
return Err(PeerHandleError{ no_connection_possible: false });
},
msgs::ErrorAction::IgnoreError => {
log_trace!(self, "Got Err handling message, ignoring because {}", e.err);
continue;
},
msgs::ErrorAction::SendErrorMessage { msg } => {
log_trace!(self, "Got Err handling message, sending Error message because {}", e.err);
encode_and_send_msg!(msg, 17);
continue;
},
}
} else {
log_debug!(self, "Got Err handling message, action not yet filled in: {}", e.err);
return Err(PeerHandleError{ no_connection_possible: false });
match e.action {
msgs::ErrorAction::DisconnectPeer { msg: _ } => {
//TODO: Try to push msg
log_trace!(self, "Got Err handling message, disconnecting peer because {}", e.err);
return Err(PeerHandleError{ no_connection_possible: false });
},
msgs::ErrorAction::IgnoreError => {
log_trace!(self, "Got Err handling message, ignoring because {}", e.err);
continue;
},
msgs::ErrorAction::SendErrorMessage { msg } => {
log_trace!(self, "Got Err handling message, sending Error message because {}", e.err);
encode_and_send_msg!(msg, 17);
continue;
},
}
}
};
@ -1020,42 +1015,38 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
self.message_handler.route_handler.handle_htlc_fail_channel_update(update);
},
MessageSendEvent::HandleError { ref node_id, ref action } => {
if let Some(ref action) = *action {
match *action {
msgs::ErrorAction::DisconnectPeer { ref msg } => {
if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) {
peers.peers_needing_send.remove(&descriptor);
if let Some(mut peer) = peers.peers.remove(&descriptor) {
if let Some(ref msg) = *msg {
log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
// This isn't guaranteed to work, but if there is enough free
// room in the send buffer, put the error message there...
self.do_attempt_write_data(&mut descriptor, &mut peer);
} else {
log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
}
match *action {
msgs::ErrorAction::DisconnectPeer { ref msg } => {
if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) {
peers.peers_needing_send.remove(&descriptor);
if let Some(mut peer) = peers.peers.remove(&descriptor) {
if let Some(ref msg) = *msg {
log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
// This isn't guaranteed to work, but if there is enough free
// room in the send buffer, put the error message there...
self.do_attempt_write_data(&mut descriptor, &mut peer);
} else {
log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
}
descriptor.disconnect_socket();
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
}
},
msgs::ErrorAction::IgnoreError => {},
msgs::ErrorAction::SendErrorMessage { ref msg } => {
log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
//TODO: Do whatever we're gonna do for handling dropped messages
});
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
self.do_attempt_write_data(&mut descriptor, peer);
},
}
} else {
log_error!(self, "Got no-action HandleError Event in peer_handler for node {}, no such events should ever be generated!", log_pubkey!(node_id));
descriptor.disconnect_socket();
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
}
},
msgs::ErrorAction::IgnoreError => {},
msgs::ErrorAction::SendErrorMessage { ref msg } => {
log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
//TODO: Do whatever we're gonna do for handling dropped messages
});
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
self.do_attempt_write_data(&mut descriptor, peer);
},
}
}
}
@ -1172,7 +1163,7 @@ mod tests {
let chan_handler = test_utils::TestChannelMessageHandler::new();
chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
node_id: their_id,
action: Some(msgs::ErrorAction::DisconnectPeer { msg: None }),
action: msgs::ErrorAction::DisconnectPeer { msg: None },
});
assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1);
peers[0].message_handler.chan_handler = Arc::new(chan_handler);

View file

@ -404,7 +404,7 @@ macro_rules! secp_verify_sig {
( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => {
match $secp_ctx.verify($msg, $sig, $pubkey) {
Ok(_) => {},
Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: None}),
Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: ErrorAction::IgnoreError}),
}
};
}
@ -420,10 +420,10 @@ impl RoutingMessageHandler for Router {
let mut network = self.network_map.write().unwrap();
match network.nodes.get_mut(&msg.contents.node_id) {
None => Err(LightningError{err: "No existing channels for node_announcement", action: Some(ErrorAction::IgnoreError)}),
None => Err(LightningError{err: "No existing channels for node_announcement", action: ErrorAction::IgnoreError}),
Some(node) => {
if node.last_update >= msg.contents.timestamp {
return Err(LightningError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)});
return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError});
}
node.features = msg.contents.features.clone();
@ -441,7 +441,7 @@ impl RoutingMessageHandler for Router {
fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 {
return Err(LightningError{err: "Channel announcement node had a channel with itself", action: Some(ErrorAction::IgnoreError)});
return Err(LightningError{err: "Channel announcement node had a channel with itself", action: ErrorAction::IgnoreError});
}
let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
@ -462,7 +462,7 @@ impl RoutingMessageHandler for Router {
.push_opcode(opcodes::all::OP_PUSHNUM_2)
.push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
if script_pubkey != expected_script {
return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: Some(ErrorAction::IgnoreError)});
return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: ErrorAction::IgnoreError});
}
//TODO: Check if value is worth storing, use it to inform routing, and compare it
//to the new HTLC max field in channel_update
@ -473,10 +473,10 @@ impl RoutingMessageHandler for Router {
false
},
Err(ChainError::NotWatched) => {
return Err(LightningError{err: "Channel announced on an unknown chain", action: Some(ErrorAction::IgnoreError)});
return Err(LightningError{err: "Channel announced on an unknown chain", action: ErrorAction::IgnoreError});
},
Err(ChainError::UnknownTx) => {
return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: Some(ErrorAction::IgnoreError)});
return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: ErrorAction::IgnoreError});
},
};
@ -527,7 +527,7 @@ impl RoutingMessageHandler for Router {
Self::remove_channel_in_nodes(network.nodes, &entry.get(), msg.contents.short_channel_id);
*entry.get_mut() = chan_info;
} else {
return Err(LightningError{err: "Already have knowledge of channel", action: Some(ErrorAction::IgnoreError)})
return Err(LightningError{err: "Already have knowledge of channel", action: ErrorAction::IgnoreError})
}
},
BtreeEntry::Vacant(entry) => {
@ -599,12 +599,12 @@ impl RoutingMessageHandler for Router {
let chan_was_enabled;
match network.channels.get_mut(&NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)) {
None => return Err(LightningError{err: "Couldn't find channel for update", action: Some(ErrorAction::IgnoreError)}),
None => return Err(LightningError{err: "Couldn't find channel for update", action: ErrorAction::IgnoreError}),
Some(channel) => {
macro_rules! maybe_update_channel_info {
( $target: expr) => {
if $target.last_update >= msg.contents.timestamp {
return Err(LightningError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)});
return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError});
}
chan_was_enabled = $target.enabled;
$target.last_update = msg.contents.timestamp;
@ -830,11 +830,11 @@ impl Router {
let network = self.network_map.read().unwrap();
if *target == network.our_node_id {
return Err(LightningError{err: "Cannot generate a route to ourselves", action: None});
return Err(LightningError{err: "Cannot generate a route to ourselves", action: ErrorAction::IgnoreError});
}
if final_value_msat > 21_000_000 * 1_0000_0000 * 1000 {
return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: None});
return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: ErrorAction::IgnoreError});
}
// We do a dest-to-source Dijkstra's sorting by each node's distance from the destination
@ -871,7 +871,7 @@ impl Router {
first_hop_targets.insert(chan.remote_network_id, short_channel_id);
}
if first_hop_targets.is_empty() {
return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: None});
return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: ErrorAction::IgnoreError});
}
}
@ -985,7 +985,7 @@ impl Router {
while res.last().unwrap().pubkey != *target {
let new_entry = match dist.remove(&res.last().unwrap().pubkey) {
Some(hop) => hop.3,
None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: None}),
None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: ErrorAction::IgnoreError}),
};
res.last_mut().unwrap().fee_msat = new_entry.fee_msat;
res.last_mut().unwrap().cltv_expiry_delta = new_entry.cltv_expiry_delta;
@ -1006,7 +1006,7 @@ impl Router {
}
}
Err(LightningError{err: "Failed to find a path to the given destination", action: None})
Err(LightningError{err: "Failed to find a path to the given destination", action: ErrorAction::IgnoreError})
}
}

View file

@ -210,7 +210,7 @@ pub enum MessageSendEvent {
/// The node_id of the node which should receive this message
node_id: PublicKey,
/// The action which should be taken.
action: Option<msgs::ErrorAction>
action: msgs::ErrorAction
},
/// When a payment fails we may receive updates back from the hop where it failed. In such
/// cases this event is generated so that we can inform the router of this information.

View file

@ -99,52 +99,52 @@ impl TestChannelMessageHandler {
impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
fn peer_connected(&self, _their_node_id: &PublicKey) {}
@ -169,13 +169,13 @@ impl TestRoutingMessageHandler {
}
impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> {
Err(LightningError { err: "", action: None })
Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
}
fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {}
fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, msgs::ChannelUpdate,msgs::ChannelUpdate)> {