mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Merge pull request #263 from TheBlueMatt/2018-12-monitor-fail-2
Handle monitor update failures in msg-recv functions
This commit is contained in:
commit
0bf783ee4a
3 changed files with 589 additions and 69 deletions
|
@ -2154,7 +2154,7 @@ impl Channel {
|
||||||
/// commitment update or a revoke_and_ack generation). The messages which were generated from
|
/// commitment update or a revoke_and_ack generation). The messages which were generated from
|
||||||
/// that original call must *not* have been sent to the remote end, and must instead have been
|
/// that original call must *not* have been sent to the remote end, and must instead have been
|
||||||
/// dropped. They will be regenerated when monitor_updating_restored is called.
|
/// dropped. They will be regenerated when monitor_updating_restored is called.
|
||||||
pub fn monitor_update_failed(&mut self, order: RAACommitmentOrder) {
|
pub fn monitor_update_failed(&mut self, order: RAACommitmentOrder, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, [u8; 32], HTLCFailReason)>, raa_first_dropped_cs: bool) {
|
||||||
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
|
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
|
||||||
match order {
|
match order {
|
||||||
RAACommitmentOrder::CommitmentFirst => {
|
RAACommitmentOrder::CommitmentFirst => {
|
||||||
|
@ -2163,9 +2163,13 @@ impl Channel {
|
||||||
},
|
},
|
||||||
RAACommitmentOrder::RevokeAndACKFirst => {
|
RAACommitmentOrder::RevokeAndACKFirst => {
|
||||||
self.monitor_pending_revoke_and_ack = true;
|
self.monitor_pending_revoke_and_ack = true;
|
||||||
self.monitor_pending_commitment_signed = false;
|
self.monitor_pending_commitment_signed = raa_first_dropped_cs;
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
assert!(self.monitor_pending_forwards.is_empty());
|
||||||
|
mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards);
|
||||||
|
assert!(self.monitor_pending_failures.is_empty());
|
||||||
|
mem::swap(&mut pending_fails, &mut self.monitor_pending_failures);
|
||||||
self.monitor_pending_order = Some(order);
|
self.monitor_pending_order = Some(order);
|
||||||
self.channel_state |= ChannelState::MonitorUpdateFailed as u32;
|
self.channel_state |= ChannelState::MonitorUpdateFailed as u32;
|
||||||
}
|
}
|
||||||
|
|
|
@ -442,6 +442,61 @@ macro_rules! try_chan_entry {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro_rules! return_monitor_err {
|
||||||
|
($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => {
|
||||||
|
return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new())
|
||||||
|
};
|
||||||
|
($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => {
|
||||||
|
if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); }
|
||||||
|
return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs)
|
||||||
|
};
|
||||||
|
($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => {
|
||||||
|
return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false)
|
||||||
|
};
|
||||||
|
($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => {
|
||||||
|
match $err {
|
||||||
|
ChannelMonitorUpdateErr::PermanentFailure => {
|
||||||
|
let (channel_id, mut chan) = $entry.remove_entry();
|
||||||
|
if let Some(short_id) = chan.get_short_channel_id() {
|
||||||
|
$channel_state.short_to_id.remove(&short_id);
|
||||||
|
}
|
||||||
|
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
|
||||||
|
// chain in a confused state! We need to move them into the ChannelMonitor which
|
||||||
|
// will be responsible for failing backwards once things confirm on-chain.
|
||||||
|
// It's ok that we drop $failed_forwards here - at this point we'd rather they
|
||||||
|
// broadcast HTLC-Timeout and pay the associated fees to get their funds back than
|
||||||
|
// us bother trying to claim it just to forward on to another peer. If we're
|
||||||
|
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
|
||||||
|
// given up the preimage yet, so might as well just wait until the payment is
|
||||||
|
// retried, avoiding the on-chain fees.
|
||||||
|
return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
|
||||||
|
},
|
||||||
|
ChannelMonitorUpdateErr::TemporaryFailure => {
|
||||||
|
$entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs);
|
||||||
|
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()));
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does not break in case of TemporaryFailure!
|
||||||
|
macro_rules! maybe_break_monitor_err {
|
||||||
|
($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => {
|
||||||
|
match $err {
|
||||||
|
ChannelMonitorUpdateErr::PermanentFailure => {
|
||||||
|
let (channel_id, mut chan) = $entry.remove_entry();
|
||||||
|
if let Some(short_id) = chan.get_short_channel_id() {
|
||||||
|
$channel_state.short_to_id.remove(&short_id);
|
||||||
|
}
|
||||||
|
break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
|
||||||
|
},
|
||||||
|
ChannelMonitorUpdateErr::TemporaryFailure => {
|
||||||
|
$entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ChannelManager {
|
impl ChannelManager {
|
||||||
/// Constructs a new ChannelManager to hold several channels and route between them.
|
/// Constructs a new ChannelManager to hold several channels and route between them.
|
||||||
///
|
///
|
||||||
|
@ -667,33 +722,6 @@ impl ChannelManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) {
|
|
||||||
match err {
|
|
||||||
ChannelMonitorUpdateErr::PermanentFailure => {
|
|
||||||
let mut chan = {
|
|
||||||
let channel_state = channel_state_lock.borrow_parts();
|
|
||||||
let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
|
|
||||||
if let Some(short_id) = chan.get_short_channel_id() {
|
|
||||||
channel_state.short_to_id.remove(&short_id);
|
|
||||||
}
|
|
||||||
chan
|
|
||||||
};
|
|
||||||
mem::drop(channel_state_lock);
|
|
||||||
self.finish_force_close_channel(chan.force_shutdown());
|
|
||||||
if let Ok(update) = self.get_channel_update(&chan) {
|
|
||||||
let mut channel_state = self.channel_state.lock().unwrap();
|
|
||||||
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
|
|
||||||
msg: update
|
|
||||||
});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ChannelMonitorUpdateErr::TemporaryFailure => {
|
|
||||||
let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
|
|
||||||
channel.monitor_update_failed(reason);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
|
fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
|
||||||
assert_eq!(shared_secret.len(), 32);
|
assert_eq!(shared_secret.len(), 32);
|
||||||
|
@ -1193,7 +1221,17 @@ impl ChannelManager {
|
||||||
/// May generate a SendHTLCs message event on success, which should be relayed.
|
/// May generate a SendHTLCs message event on success, which should be relayed.
|
||||||
///
|
///
|
||||||
/// Raises APIError::RoutError when invalid route or forward parameter
|
/// Raises APIError::RoutError when invalid route or forward parameter
|
||||||
/// (cltv_delta, fee, node public key) is specified
|
/// (cltv_delta, fee, node public key) is specified.
|
||||||
|
/// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates
|
||||||
|
/// (including due to previous monitor update failure or new permanent monitor update failure).
|
||||||
|
/// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
|
||||||
|
/// relevant updates.
|
||||||
|
///
|
||||||
|
/// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed
|
||||||
|
/// and you may wish to retry via a different route immediately.
|
||||||
|
/// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably
|
||||||
|
/// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry
|
||||||
|
/// the payment via a different route unless you intend to pay twice!
|
||||||
pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
|
pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
|
||||||
if route.hops.len() < 1 || route.hops.len() > 20 {
|
if route.hops.len() < 1 || route.hops.len() > 20 {
|
||||||
return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
|
return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
|
||||||
|
@ -1224,32 +1262,32 @@ impl ChannelManager {
|
||||||
Some(id) => id.clone(),
|
Some(id) => id.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
match {
|
|
||||||
let channel_state = channel_lock.borrow_parts();
|
let channel_state = channel_lock.borrow_parts();
|
||||||
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
|
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
|
||||||
|
match {
|
||||||
if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
|
if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
|
||||||
return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
|
return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
|
||||||
}
|
}
|
||||||
if chan.get().is_awaiting_monitor_update() {
|
|
||||||
return Err(APIError::MonitorUpdateFailed);
|
|
||||||
}
|
|
||||||
if !chan.get().is_live() {
|
if !chan.get().is_live() {
|
||||||
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
|
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
|
||||||
}
|
}
|
||||||
break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
|
break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
|
||||||
route: route.clone(),
|
route: route.clone(),
|
||||||
session_priv: session_priv.clone(),
|
session_priv: session_priv.clone(),
|
||||||
first_hop_htlc_msat: htlc_msat,
|
first_hop_htlc_msat: htlc_msat,
|
||||||
}, onion_packet), channel_state, chan)
|
}, onion_packet), channel_state, chan)
|
||||||
} else { unreachable!(); }
|
|
||||||
} {
|
} {
|
||||||
Some((update_add, commitment_signed, chan_monitor)) => {
|
Some((update_add, commitment_signed, chan_monitor)) => {
|
||||||
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||||
self.handle_monitor_update_fail(channel_lock, &id, e, RAACommitmentOrder::CommitmentFirst);
|
maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst);
|
||||||
|
// Note that MonitorUpdateFailed here indicates (per function docs)
|
||||||
|
// that we will resent the commitment update once we unfree monitor
|
||||||
|
// updating, so we have to take special care that we don't return
|
||||||
|
// something else in case we will resend later!
|
||||||
return Err(APIError::MonitorUpdateFailed);
|
return Err(APIError::MonitorUpdateFailed);
|
||||||
}
|
}
|
||||||
|
|
||||||
channel_lock.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||||
node_id: route.hops.first().unwrap().pubkey,
|
node_id: route.hops.first().unwrap().pubkey,
|
||||||
updates: msgs::CommitmentUpdate {
|
updates: msgs::CommitmentUpdate {
|
||||||
update_add_htlcs: vec![update_add],
|
update_add_htlcs: vec![update_add],
|
||||||
|
@ -1263,6 +1301,7 @@ impl ChannelManager {
|
||||||
},
|
},
|
||||||
None => {},
|
None => {},
|
||||||
}
|
}
|
||||||
|
} else { unreachable!(); }
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1441,7 +1480,7 @@ impl ChannelManager {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
||||||
unimplemented!();// but def dont push the event...
|
unimplemented!();
|
||||||
}
|
}
|
||||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||||
node_id: forward_chan.get_their_node_id(),
|
node_id: forward_chan.get_their_node_id(),
|
||||||
|
@ -1679,6 +1718,13 @@ impl ChannelManager {
|
||||||
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||||
match e {
|
match e {
|
||||||
ChannelMonitorUpdateErr::PermanentFailure => {
|
ChannelMonitorUpdateErr::PermanentFailure => {
|
||||||
|
// TODO: There may be some pending HTLCs that we intended to fail
|
||||||
|
// backwards when a monitor update failed. We should make sure
|
||||||
|
// knowledge of those gets moved into the appropriate in-memory
|
||||||
|
// ChannelMonitor and they get failed backwards once we get
|
||||||
|
// on-chain confirmations.
|
||||||
|
// Note I think #198 addresses this, so once its merged a test
|
||||||
|
// should be written.
|
||||||
if let Some(short_id) = channel.get_short_channel_id() {
|
if let Some(short_id) = channel.get_short_channel_id() {
|
||||||
short_to_id.remove(&short_id);
|
short_to_id.remove(&short_id);
|
||||||
}
|
}
|
||||||
|
@ -2281,8 +2327,9 @@ impl ChannelManager {
|
||||||
}
|
}
|
||||||
let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
|
let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
|
||||||
try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
|
try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
|
||||||
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||||
unimplemented!();
|
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some());
|
||||||
|
//TODO: Rebroadcast closing_signed if present on monitor update restoration
|
||||||
}
|
}
|
||||||
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
|
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
|
||||||
node_id: their_node_id.clone(),
|
node_id: their_node_id.clone(),
|
||||||
|
@ -2358,8 +2405,8 @@ impl ChannelManager {
|
||||||
}
|
}
|
||||||
let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) =
|
let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) =
|
||||||
try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan);
|
try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan);
|
||||||
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||||
unimplemented!();
|
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures);
|
||||||
}
|
}
|
||||||
if let Some(updates) = commitment_update {
|
if let Some(updates) = commitment_update {
|
||||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||||
|
@ -2453,11 +2500,21 @@ impl ChannelManager {
|
||||||
if chan.get().get_their_node_id() != *their_node_id {
|
if chan.get().get_their_node_id() != *their_node_id {
|
||||||
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
|
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
|
||||||
}
|
}
|
||||||
let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) =
|
let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) =
|
||||||
try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan);
|
try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan);
|
||||||
if let Some(monitor) = channel_monitor {
|
if let Some(monitor) = channel_monitor {
|
||||||
if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
||||||
unimplemented!();
|
// channel_reestablish doesn't guarantee the order it returns is sensical
|
||||||
|
// for the messages it returns, but if we're setting what messages to
|
||||||
|
// re-transmit on monitor update success, we need to make sure it is sane.
|
||||||
|
if revoke_and_ack.is_none() {
|
||||||
|
order = RAACommitmentOrder::CommitmentFirst;
|
||||||
|
}
|
||||||
|
if commitment_update.is_none() {
|
||||||
|
order = RAACommitmentOrder::RevokeAndACKFirst;
|
||||||
|
}
|
||||||
|
return_monitor_err!(self, e, channel_state, chan, order);
|
||||||
|
//TODO: Resend the funding_locked if needed once we get the monitor running again
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(msg) = funding_locked {
|
if let Some(msg) = funding_locked {
|
||||||
|
@ -3829,6 +3886,12 @@ mod tests {
|
||||||
_ => panic!("Unexpected event type!"),
|
_ => panic!("Unexpected event type!"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn from_node(node: &Node) -> SendEvent {
|
||||||
|
let mut events = node.node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
SendEvent::from_event(events.pop().unwrap())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! check_added_monitors {
|
macro_rules! check_added_monitors {
|
||||||
|
@ -3851,7 +3914,7 @@ mod tests {
|
||||||
commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
|
commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
|
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => {
|
||||||
{
|
{
|
||||||
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
|
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
|
||||||
check_added_monitors!($node_b, 0);
|
check_added_monitors!($node_b, 0);
|
||||||
|
@ -3876,6 +3939,23 @@ mod tests {
|
||||||
assert!($node_a.node.get_and_clear_pending_events().is_empty());
|
assert!($node_a.node.get_and_clear_pending_events().is_empty());
|
||||||
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||||
}
|
}
|
||||||
|
(extra_msg_option, bs_revoke_and_ack)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => {
|
||||||
|
{
|
||||||
|
check_added_monitors!($node_a, 0);
|
||||||
|
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
$node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
|
||||||
|
check_added_monitors!($node_a, 1);
|
||||||
|
let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
|
||||||
|
assert!(extra_msg_option.is_none());
|
||||||
|
bs_revoke_and_ack
|
||||||
|
}
|
||||||
|
};
|
||||||
|
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
|
||||||
|
{
|
||||||
|
let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
|
||||||
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
|
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
|
||||||
{
|
{
|
||||||
let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
|
let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
|
||||||
|
@ -6946,15 +7026,19 @@ mod tests {
|
||||||
let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
|
let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
|
||||||
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
|
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
|
||||||
if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
|
if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
|
||||||
check_added_monitors!(nodes[0], 1);
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
|
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
|
||||||
assert_eq!(events_1.len(), 1);
|
assert_eq!(events_1.len(), 2);
|
||||||
match events_1[0] {
|
match events_1[0] {
|
||||||
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
|
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
|
||||||
_ => panic!("Unexpected event"),
|
_ => panic!("Unexpected event"),
|
||||||
};
|
};
|
||||||
|
match events_1[1] {
|
||||||
|
MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
// TODO: Once we hit the chain with the failure transaction we should check that we get a
|
// TODO: Once we hit the chain with the failure transaction we should check that we get a
|
||||||
// PaymentFailed event
|
// PaymentFailed event
|
||||||
|
@ -7393,6 +7477,436 @@ mod tests {
|
||||||
do_test_monitor_temporary_update_fail(3 | 8 | 16);
|
do_test_monitor_temporary_update_fail(3 | 8 | 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monitor_update_fail_cs() {
|
||||||
|
// Tests handling of a monitor update failure when processing an incoming commitment_signed
|
||||||
|
let mut nodes = create_network(2);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
|
|
||||||
|
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
nodes[0].node.send_payment(route, our_payment_hash).unwrap();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
|
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
|
||||||
|
assert_eq!(err, "Failed to update ChannelMonitor");
|
||||||
|
} else { panic!(); }
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
nodes[1].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
let responses = nodes[1].node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(responses.len(), 2);
|
||||||
|
|
||||||
|
match responses[0] {
|
||||||
|
MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
|
||||||
|
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
|
||||||
|
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
}
|
||||||
|
match responses[1] {
|
||||||
|
MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
|
||||||
|
assert!(updates.update_add_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fail_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fee.is_none());
|
||||||
|
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
|
||||||
|
|
||||||
|
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
|
||||||
|
assert_eq!(err, "Failed to update ChannelMonitor");
|
||||||
|
} else { panic!(); }
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
}
|
||||||
|
|
||||||
|
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
nodes[0].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
|
let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||||
|
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
let mut events = nodes[1].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
match events[0] {
|
||||||
|
Event::PendingHTLCsForwardable { .. } => { },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||||
|
nodes[1].node.process_pending_htlc_forwards();
|
||||||
|
|
||||||
|
events = nodes[1].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
match events[0] {
|
||||||
|
Event::PaymentReceived { payment_hash, amt } => {
|
||||||
|
assert_eq!(payment_hash, our_payment_hash);
|
||||||
|
assert_eq!(amt, 1000000);
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
|
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
||||||
|
// Tests handling of a monitor update failure when processing an incoming RAA
|
||||||
|
let mut nodes = create_network(3);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||||
|
|
||||||
|
// Rebalance a bit so that we can send backwards from 2 to 1.
|
||||||
|
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
|
||||||
|
|
||||||
|
// Route a first payment that we'll fail backwards
|
||||||
|
let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
|
||||||
|
|
||||||
|
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
|
||||||
|
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, PaymentFailReason::PreimageUnknown));
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
|
||||||
|
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||||
|
assert!(updates.update_add_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||||
|
assert_eq!(updates.update_fail_htlcs.len(), 1);
|
||||||
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fee.is_none());
|
||||||
|
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
|
||||||
|
|
||||||
|
let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
|
||||||
|
check_added_monitors!(nodes[0], 0);
|
||||||
|
|
||||||
|
// While the second channel is AwaitingRAA, forward a second payment to get it into the
|
||||||
|
// holding cell.
|
||||||
|
let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
nodes[0].node.send_payment(route, payment_hash_2).unwrap();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
|
let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
|
||||||
|
|
||||||
|
let events_1 = nodes[1].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_1.len(), 1);
|
||||||
|
match events_1[0] {
|
||||||
|
Event::PendingHTLCsForwardable { .. } => { },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||||
|
nodes[1].node.process_pending_htlc_forwards();
|
||||||
|
check_added_monitors!(nodes[1], 0);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
// Now fail monitor updating.
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
|
||||||
|
assert_eq!(err, "Failed to update ChannelMonitor");
|
||||||
|
} else { panic!(); }
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
// Attempt to forward a third payment but fail due to the second channel being unavailable
|
||||||
|
// for forwarding.
|
||||||
|
|
||||||
|
let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
nodes[0].node.send_payment(route, payment_hash_3).unwrap();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
|
||||||
|
send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
|
||||||
|
check_added_monitors!(nodes[1], 0);
|
||||||
|
|
||||||
|
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(events_2.len(), 1);
|
||||||
|
match events_2.remove(0) {
|
||||||
|
MessageSendEvent::UpdateHTLCs { node_id, updates } => {
|
||||||
|
assert_eq!(node_id, nodes[0].node.get_our_node_id());
|
||||||
|
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||||
|
assert_eq!(updates.update_fail_htlcs.len(), 1);
|
||||||
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(updates.update_add_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fee.is_none());
|
||||||
|
|
||||||
|
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
|
||||||
|
|
||||||
|
let events = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events[0] {
|
||||||
|
assert_eq!(payment_hash, payment_hash_3);
|
||||||
|
assert!(!rejected_by_dest);
|
||||||
|
} else { panic!("Unexpected event!"); }
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event type!"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
|
||||||
|
// Try to route another payment backwards from 2 to make sure 1 holds off on responding
|
||||||
|
let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
nodes[2].node.send_payment(route, payment_hash_4).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
|
||||||
|
send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||||
|
if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
|
||||||
|
assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
|
||||||
|
} else { panic!(); }
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||||
|
(Some(payment_preimage_4), Some(payment_hash_4))
|
||||||
|
} else { (None, None) };
|
||||||
|
|
||||||
|
// Restore monitor updating, ensuring we immediately get a fail-back update and a
|
||||||
|
// update_add update.
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
nodes[1].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[1], 2);
|
||||||
|
|
||||||
|
let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
|
||||||
|
if test_ignore_second_cs {
|
||||||
|
assert_eq!(events_3.len(), 3);
|
||||||
|
} else {
|
||||||
|
assert_eq!(events_3.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the ordering of the events for different nodes is non-prescriptive, though the
|
||||||
|
// ordering of the two events that both go to nodes[2] have to stay in the same order.
|
||||||
|
let messages_a = match events_3.pop().unwrap() {
|
||||||
|
MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
|
||||||
|
assert_eq!(node_id, nodes[0].node.get_our_node_id());
|
||||||
|
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||||
|
assert_eq!(updates.update_fail_htlcs.len(), 1);
|
||||||
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(updates.update_add_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fee.is_none());
|
||||||
|
(updates.update_fail_htlcs.remove(0), updates.commitment_signed)
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event type!"),
|
||||||
|
};
|
||||||
|
let raa = if test_ignore_second_cs {
|
||||||
|
match events_3.remove(1) {
|
||||||
|
MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
|
||||||
|
assert_eq!(node_id, nodes[2].node.get_our_node_id());
|
||||||
|
Some(msg.clone())
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
}
|
||||||
|
} else { None };
|
||||||
|
let send_event_b = SendEvent::from_event(events_3.remove(0));
|
||||||
|
assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
|
||||||
|
|
||||||
|
// Now deliver the new messages...
|
||||||
|
|
||||||
|
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
|
||||||
|
let events_4 = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_4.len(), 1);
|
||||||
|
if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events_4[0] {
|
||||||
|
assert_eq!(payment_hash, payment_hash_1);
|
||||||
|
assert!(rejected_by_dest);
|
||||||
|
} else { panic!("Unexpected event!"); }
|
||||||
|
|
||||||
|
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap();
|
||||||
|
if test_ignore_second_cs {
|
||||||
|
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||||
|
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||||
|
assert!(bs_cs.update_add_htlcs.is_empty());
|
||||||
|
assert!(bs_cs.update_fail_htlcs.is_empty());
|
||||||
|
assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(bs_cs.update_fulfill_htlcs.is_empty());
|
||||||
|
assert!(bs_cs.update_fee.is_none());
|
||||||
|
|
||||||
|
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||||
|
assert!(as_cs.update_add_htlcs.is_empty());
|
||||||
|
assert!(as_cs.update_fail_htlcs.is_empty());
|
||||||
|
assert!(as_cs.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(as_cs.update_fulfill_htlcs.is_empty());
|
||||||
|
assert!(as_cs.update_fee.is_none());
|
||||||
|
|
||||||
|
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
|
||||||
|
|
||||||
|
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||||
|
|
||||||
|
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
} else {
|
||||||
|
commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
let events_5 = nodes[2].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_5.len(), 1);
|
||||||
|
match events_5[0] {
|
||||||
|
Event::PendingHTLCsForwardable { .. } => { },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||||
|
nodes[2].node.process_pending_htlc_forwards();
|
||||||
|
|
||||||
|
let events_6 = nodes[2].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_6.len(), 1);
|
||||||
|
match events_6[0] {
|
||||||
|
Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if test_ignore_second_cs {
|
||||||
|
let events_7 = nodes[1].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_7.len(), 1);
|
||||||
|
match events_7[0] {
|
||||||
|
Event::PendingHTLCsForwardable { .. } => { },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||||
|
nodes[1].node.process_pending_htlc_forwards();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
send_event = SendEvent::from_node(&nodes[1]);
|
||||||
|
assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
|
||||||
|
assert_eq!(send_event.msgs.len(), 1);
|
||||||
|
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
|
||||||
|
|
||||||
|
let events_8 = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_8.len(), 1);
|
||||||
|
match events_8[0] {
|
||||||
|
Event::PendingHTLCsForwardable { .. } => { },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||||
|
nodes[0].node.process_pending_htlc_forwards();
|
||||||
|
|
||||||
|
let events_9 = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events_9.len(), 1);
|
||||||
|
match events_9[0] {
|
||||||
|
Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monitor_update_fail_raa() {
|
||||||
|
do_test_monitor_update_fail_raa(false);
|
||||||
|
do_test_monitor_update_fail_raa(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monitor_update_fail_reestablish() {
|
||||||
|
// Simple test for message retransmission after monitor update failure on
|
||||||
|
// channel_reestablish generating a monitor update (which comes from freeing holding cell
|
||||||
|
// HTLCs).
|
||||||
|
let mut nodes = create_network(3);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||||
|
|
||||||
|
let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
|
||||||
|
|
||||||
|
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
|
||||||
|
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
|
||||||
|
|
||||||
|
assert!(nodes[2].node.claim_funds(our_payment_preimage));
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||||
|
assert!(updates.update_add_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fail_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fee.is_none());
|
||||||
|
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
|
||||||
|
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
|
||||||
|
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
|
||||||
|
|
||||||
|
let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
|
||||||
|
let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
|
||||||
|
|
||||||
|
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
|
||||||
|
|
||||||
|
if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
|
||||||
|
assert_eq!(err, "Failed to update ChannelMonitor");
|
||||||
|
} else { panic!(); }
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
|
||||||
|
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
|
||||||
|
|
||||||
|
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
|
||||||
|
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
|
||||||
|
|
||||||
|
assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
|
||||||
|
assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
|
||||||
|
|
||||||
|
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
|
||||||
|
|
||||||
|
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 0);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
nodes[1].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||||
|
assert!(updates.update_add_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fail_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||||
|
assert!(updates.update_fee.is_none());
|
||||||
|
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
|
||||||
|
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
|
||||||
|
|
||||||
|
let events = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
match events[0] {
|
||||||
|
Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_invalid_channel_announcement() {
|
fn test_invalid_channel_announcement() {
|
||||||
//Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
|
//Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
|
||||||
|
|
|
@ -71,6 +71,8 @@ pub enum ChannelMonitorUpdateErr {
|
||||||
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
|
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
|
||||||
/// different watchtower and cannot update with all watchtowers that were previously informed
|
/// different watchtower and cannot update with all watchtowers that were previously informed
|
||||||
/// of this channel). This will force-close the channel in question.
|
/// of this channel). This will force-close the channel in question.
|
||||||
|
///
|
||||||
|
/// Should also be used to indicate a failure to update the local copy of the channel monitor.
|
||||||
PermanentFailure,
|
PermanentFailure,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue