mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Handle monitor update failures in two more places
Best reviewed with -b
This commit is contained in:
parent
a6f0281017
commit
a138a9af01
2 changed files with 384 additions and 146 deletions
|
@ -1365,3 +1365,194 @@ fn first_message_on_recv_ordering() {
|
|||
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
|
||||
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_monitor_update_fail_claim() {
|
||||
// Basic test for monitor update failures when processing claim_funds calls.
|
||||
// We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
|
||||
// update to claim the payment. We then send a payment C->B->A, making the forward of this
|
||||
// payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
|
||||
// updating and claim the payment on B.
|
||||
let mut nodes = create_network(3);
|
||||
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||
|
||||
// Rebalance a bit so that we can send backwards from 3 to 2.
|
||||
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
|
||||
|
||||
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
|
||||
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||
assert!(nodes[1].node.claim_funds(payment_preimage_1));
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||
let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||
nodes[2].node.send_payment(route, payment_hash_2).unwrap();
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
// Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
|
||||
// paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||
|
||||
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
let payment_event = SendEvent::from_event(events.pop().unwrap());
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
|
||||
|
||||
let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||
nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
|
||||
|
||||
let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(msg_events.len(), 1);
|
||||
match msg_events[0] {
|
||||
MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
|
||||
assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
|
||||
assert_eq!(msg.contents.flags & 2, 2); // temp disabled
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
let events = nodes[2].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
|
||||
assert_eq!(payment_hash, payment_hash_2);
|
||||
assert!(!rejected_by_dest);
|
||||
} else { panic!("Unexpected event!"); }
|
||||
|
||||
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
|
||||
nodes[1].node.test_restore_channel_monitor();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
if let Event::PaymentSent { payment_preimage, .. } = events[0] {
|
||||
assert_eq!(payment_preimage, payment_preimage_1);
|
||||
} else { panic!("Unexpected event!"); }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_monitor_update_on_pending_forwards() {
|
||||
// Basic test for monitor update failures when processing pending HTLC fail/add forwards.
|
||||
// We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
|
||||
// The payment from A to C will be failed by C and pending a back-fail to A, while the payment
|
||||
// from C to A will be pending a forward to A.
|
||||
let mut nodes = create_network(3);
|
||||
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||
|
||||
// Rebalance a bit so that we can send backwards from 3 to 1.
|
||||
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
|
||||
|
||||
let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
|
||||
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 1000000));
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||
let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||
nodes[2].node.send_payment(route, payment_hash_2).unwrap();
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
let payment_event = SendEvent::from_event(events.pop().unwrap());
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
|
||||
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||
nodes[1].node.test_restore_channel_monitor();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
|
||||
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 2);
|
||||
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
|
||||
assert_eq!(payment_hash, payment_hash_1);
|
||||
assert!(rejected_by_dest);
|
||||
} else { panic!("Unexpected event!"); }
|
||||
match events[1] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[0].node.process_pending_htlc_forwards();
|
||||
expect_payment_received!(nodes[0], payment_hash_2, 1000000);
|
||||
|
||||
claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn monitor_update_claim_fail_no_response() {
|
||||
// Test for claim_funds resulting in both a monitor update failure and no message response (due
|
||||
// to channel being AwaitingRAA).
|
||||
// Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
|
||||
// code was broken.
|
||||
let mut nodes = create_network(2);
|
||||
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
|
||||
// Forward a payment for B to claim
|
||||
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
|
||||
|
||||
// Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
|
||||
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||
let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||
nodes[0].node.send_payment(route, payment_hash_2).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
|
||||
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
let payment_event = SendEvent::from_event(events.pop().unwrap());
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||
let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
|
||||
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||
assert!(nodes[1].node.claim_funds(payment_preimage_1));
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||
nodes[1].node.test_restore_channel_monitor();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
expect_payment_received!(nodes[1], payment_hash_2, 1000000);
|
||||
|
||||
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentSent { ref payment_preimage } => {
|
||||
assert_eq!(*payment_preimage, payment_preimage_1);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
|
||||
}
|
||||
|
|
|
@ -448,9 +448,9 @@ macro_rules! try_chan_entry {
|
|||
}
|
||||
}
|
||||
|
||||
macro_rules! return_monitor_err {
|
||||
macro_rules! handle_monitor_err {
|
||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
||||
return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
|
||||
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
|
||||
};
|
||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
|
||||
match $err {
|
||||
|
@ -468,7 +468,8 @@ macro_rules! return_monitor_err {
|
|||
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
|
||||
// given up the preimage yet, so might as well just wait until the payment is
|
||||
// retried, avoiding the on-chain fees.
|
||||
return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
|
||||
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
|
||||
res
|
||||
},
|
||||
ChannelMonitorUpdateErr::TemporaryFailure => {
|
||||
if !$resend_commitment {
|
||||
|
@ -478,26 +479,29 @@ macro_rules! return_monitor_err {
|
|||
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
|
||||
}
|
||||
$entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
|
||||
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()));
|
||||
Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! return_monitor_err {
|
||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
||||
return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
|
||||
};
|
||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
|
||||
return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
|
||||
}
|
||||
}
|
||||
|
||||
// Does not break in case of TemporaryFailure!
|
||||
macro_rules! maybe_break_monitor_err {
|
||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
||||
match $err {
|
||||
ChannelMonitorUpdateErr::PermanentFailure => {
|
||||
let (channel_id, mut chan) = $entry.remove_entry();
|
||||
if let Some(short_id) = chan.get_short_channel_id() {
|
||||
$channel_state.short_to_id.remove(&short_id);
|
||||
}
|
||||
break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
|
||||
},
|
||||
ChannelMonitorUpdateErr::TemporaryFailure => {
|
||||
$entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new());
|
||||
match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
|
||||
(e, ChannelMonitorUpdateErr::PermanentFailure) => {
|
||||
break e;
|
||||
},
|
||||
(_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1159,6 +1163,7 @@ impl ChannelManager {
|
|||
|
||||
let mut new_events = Vec::new();
|
||||
let mut failed_forwards = Vec::new();
|
||||
let mut handle_errors = Vec::new();
|
||||
{
|
||||
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
||||
let channel_state = channel_state_lock.borrow_parts();
|
||||
|
@ -1194,101 +1199,104 @@ impl ChannelManager {
|
|||
continue;
|
||||
}
|
||||
};
|
||||
let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
|
||||
|
||||
let mut add_htlc_msgs = Vec::new();
|
||||
let mut fail_htlc_msgs = Vec::new();
|
||||
for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
|
||||
log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
});
|
||||
match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(msg) = e {
|
||||
log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
|
||||
} else {
|
||||
panic!("Stated return value requirements in send_htlc() were not met");
|
||||
}
|
||||
let chan_update = self.get_channel_update(forward_chan).unwrap();
|
||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
|
||||
continue;
|
||||
},
|
||||
Ok(update_add) => {
|
||||
match update_add {
|
||||
Some(msg) => { add_htlc_msgs.push(msg); },
|
||||
None => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can add anymore HTLCs. The Channel
|
||||
// will automatically handle building the update_add_htlc and
|
||||
// commitment_signed messages when we can.
|
||||
// TODO: Do some kind of timer to set the channel as !is_live()
|
||||
// as we don't really want others relying on us relaying through
|
||||
// this channel currently :/.
|
||||
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
|
||||
let mut add_htlc_msgs = Vec::new();
|
||||
let mut fail_htlc_msgs = Vec::new();
|
||||
for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
|
||||
log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
});
|
||||
match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(msg) = e {
|
||||
log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
|
||||
} else {
|
||||
panic!("Stated return value requirements in send_htlc() were not met");
|
||||
}
|
||||
let chan_update = self.get_channel_update(chan.get()).unwrap();
|
||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
|
||||
continue;
|
||||
},
|
||||
Ok(update_add) => {
|
||||
match update_add {
|
||||
Some(msg) => { add_htlc_msgs.push(msg); },
|
||||
None => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can add anymore HTLCs. The Channel
|
||||
// will automatically handle building the update_add_htlc and
|
||||
// commitment_signed messages when we can.
|
||||
// TODO: Do some kind of timer to set the channel as !is_live()
|
||||
// as we don't really want others relying on us relaying through
|
||||
// this channel currently :/.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
|
||||
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
|
||||
match forward_chan.get_update_fail_htlc(htlc_id, err_packet) {
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(msg) = e {
|
||||
log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
|
||||
} else {
|
||||
panic!("Stated return value requirements in get_update_fail_htlc() were not met");
|
||||
},
|
||||
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
|
||||
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
|
||||
match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(msg) = e {
|
||||
log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
|
||||
} else {
|
||||
panic!("Stated return value requirements in get_update_fail_htlc() were not met");
|
||||
}
|
||||
// fail-backs are best-effort, we probably already have one
|
||||
// pending, and if not that's OK, if not, the channel is on
|
||||
// the chain and sending the HTLC-Timeout is their problem.
|
||||
continue;
|
||||
},
|
||||
Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
|
||||
Ok(None) => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can update the commitment
|
||||
// transaction. The Channel will automatically handle
|
||||
// building the update_fail_htlc and commitment_signed
|
||||
// messages when we can.
|
||||
// We don't need any kind of timer here as they should fail
|
||||
// the channel onto the chain if they can't get our
|
||||
// update_fail_htlc in time, it's not our problem.
|
||||
}
|
||||
// fail-backs are best-effort, we probably already have one
|
||||
// pending, and if not that's OK, if not, the channel is on
|
||||
// the chain and sending the HTLC-Timeout is their problem.
|
||||
continue;
|
||||
},
|
||||
Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
|
||||
Ok(None) => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can update the commitment
|
||||
// transaction. The Channel will automatically handle
|
||||
// building the update_fail_htlc and commitment_signed
|
||||
// messages when we can.
|
||||
// We don't need any kind of timer here as they should fail
|
||||
// the channel onto the chain if they can't get our
|
||||
// update_fail_htlc in time, it's not our problem.
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
|
||||
let (commitment_msg, monitor) = match forward_chan.send_commitment() {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(_) = e {
|
||||
panic!("Stated return value requirements in send_commitment() were not met");
|
||||
}
|
||||
//TODO: Handle...this is bad!
|
||||
if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
|
||||
let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(_) = e {
|
||||
panic!("Stated return value requirements in send_commitment() were not met");
|
||||
}
|
||||
//TODO: Handle...this is bad!
|
||||
continue;
|
||||
},
|
||||
};
|
||||
if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
||||
handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
|
||||
continue;
|
||||
},
|
||||
};
|
||||
if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
||||
unimplemented!();
|
||||
}
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||
node_id: chan.get().get_their_node_id(),
|
||||
updates: msgs::CommitmentUpdate {
|
||||
update_add_htlcs: add_htlc_msgs,
|
||||
update_fulfill_htlcs: Vec::new(),
|
||||
update_fail_htlcs: fail_htlc_msgs,
|
||||
update_fail_malformed_htlcs: Vec::new(),
|
||||
update_fee: None,
|
||||
commitment_signed: commitment_msg,
|
||||
},
|
||||
});
|
||||
}
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||
node_id: forward_chan.get_their_node_id(),
|
||||
updates: msgs::CommitmentUpdate {
|
||||
update_add_htlcs: add_htlc_msgs,
|
||||
update_fulfill_htlcs: Vec::new(),
|
||||
update_fail_htlcs: fail_htlc_msgs,
|
||||
update_fail_malformed_htlcs: Vec::new(),
|
||||
update_fee: None,
|
||||
commitment_signed: commitment_msg,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
unreachable!();
|
||||
}
|
||||
} else {
|
||||
for forward_info in pending_forwards.drain(..) {
|
||||
|
@ -1324,6 +1332,22 @@ impl ChannelManager {
|
|||
};
|
||||
}
|
||||
|
||||
for (their_node_id, err) in handle_errors.drain(..) {
|
||||
match handle_error!(self, err) {
|
||||
Ok(_) => {},
|
||||
Err(e) => {
|
||||
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
|
||||
} else {
|
||||
let mut channel_state = self.channel_state.lock().unwrap();
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
|
||||
node_id: their_node_id,
|
||||
action: e.action,
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if new_events.is_empty() { return }
|
||||
let mut events = self.pending_events.lock().unwrap();
|
||||
events.append(&mut new_events);
|
||||
|
@ -1469,56 +1493,79 @@ impl ChannelManager {
|
|||
} else { false }
|
||||
}
|
||||
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage) {
|
||||
match source {
|
||||
HTLCSource::OutboundRoute { .. } => {
|
||||
mem::drop(channel_state_lock);
|
||||
let mut pending_events = self.pending_events.lock().unwrap();
|
||||
pending_events.push(events::Event::PaymentSent {
|
||||
payment_preimage
|
||||
});
|
||||
},
|
||||
HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
|
||||
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
|
||||
let channel_state = channel_state_lock.borrow_parts();
|
||||
let (their_node_id, err) = loop {
|
||||
match source {
|
||||
HTLCSource::OutboundRoute { .. } => {
|
||||
mem::drop(channel_state_lock);
|
||||
let mut pending_events = self.pending_events.lock().unwrap();
|
||||
pending_events.push(events::Event::PaymentSent {
|
||||
payment_preimage
|
||||
});
|
||||
},
|
||||
HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
|
||||
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
|
||||
let channel_state = channel_state_lock.borrow_parts();
|
||||
|
||||
let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
|
||||
Some(chan_id) => chan_id.clone(),
|
||||
None => {
|
||||
// TODO: There is probably a channel manager somewhere that needs to
|
||||
// learn the preimage as the channel already hit the chain and that's
|
||||
// why it's missing.
|
||||
return
|
||||
}
|
||||
};
|
||||
|
||||
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
|
||||
match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
|
||||
Ok((msgs, monitor_option)) => {
|
||||
if let Some(chan_monitor) = monitor_option {
|
||||
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||
unimplemented!();// but def don't push the event...
|
||||
}
|
||||
let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
|
||||
Some(chan_id) => chan_id.clone(),
|
||||
None => {
|
||||
// TODO: There is probably a channel manager somewhere that needs to
|
||||
// learn the preimage as the channel already hit the chain and that's
|
||||
// why it's missing.
|
||||
return
|
||||
}
|
||||
if let Some((msg, commitment_signed)) = msgs {
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||
node_id: chan.get_their_node_id(),
|
||||
updates: msgs::CommitmentUpdate {
|
||||
update_add_htlcs: Vec::new(),
|
||||
update_fulfill_htlcs: vec![msg],
|
||||
update_fail_htlcs: Vec::new(),
|
||||
update_fail_malformed_htlcs: Vec::new(),
|
||||
update_fee: None,
|
||||
commitment_signed,
|
||||
};
|
||||
|
||||
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
|
||||
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
|
||||
match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
|
||||
Ok((msgs, monitor_option)) => {
|
||||
if let Some(chan_monitor) = monitor_option {
|
||||
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||
if was_frozen_for_monitor {
|
||||
assert!(msgs.is_none());
|
||||
} else {
|
||||
break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()));
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if let Some((msg, commitment_signed)) = msgs {
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||
node_id: chan.get().get_their_node_id(),
|
||||
updates: msgs::CommitmentUpdate {
|
||||
update_add_htlcs: Vec::new(),
|
||||
update_fulfill_htlcs: vec![msg],
|
||||
update_fail_htlcs: Vec::new(),
|
||||
update_fail_malformed_htlcs: Vec::new(),
|
||||
update_fee: None,
|
||||
commitment_signed,
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
Err(_e) => {
|
||||
// TODO: There is probably a channel manager somewhere that needs to
|
||||
// learn the preimage as the channel may be about to hit the chain.
|
||||
//TODO: Do something with e?
|
||||
return
|
||||
},
|
||||
}
|
||||
},
|
||||
Err(_e) => {
|
||||
// TODO: There is probably a channel manager somewhere that needs to
|
||||
// learn the preimage as the channel may be about to hit the chain.
|
||||
//TODO: Do something with e?
|
||||
return
|
||||
},
|
||||
} else { unreachable!(); }
|
||||
},
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
match handle_error!(self, err) {
|
||||
Ok(_) => {},
|
||||
Err(e) => {
|
||||
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
|
||||
} else {
|
||||
let mut channel_state = self.channel_state.lock().unwrap();
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
|
||||
node_id: their_node_id,
|
||||
action: e.action,
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue