mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Handle monitor update failures in two more places
Best reviewed with -b
This commit is contained in:
parent
a6f0281017
commit
a138a9af01
2 changed files with 384 additions and 146 deletions
|
@ -1365,3 +1365,194 @@ fn first_message_on_recv_ordering() {
|
||||||
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
|
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
|
||||||
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
|
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monitor_update_fail_claim() {
|
||||||
|
// Basic test for monitor update failures when processing claim_funds calls.
|
||||||
|
// We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
|
||||||
|
// update to claim the payment. We then send a payment C->B->A, making the forward of this
|
||||||
|
// payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
|
||||||
|
// updating and claim the payment on B.
|
||||||
|
let mut nodes = create_network(3);
|
||||||
|
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||||
|
|
||||||
|
// Rebalance a bit so that we can send backwards from 3 to 2.
|
||||||
|
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
|
||||||
|
|
||||||
|
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
assert!(nodes[1].node.claim_funds(payment_preimage_1));
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
nodes[2].node.send_payment(route, payment_hash_2).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
|
||||||
|
// Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
|
||||||
|
// paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
|
||||||
|
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
let payment_event = SendEvent::from_event(events.pop().unwrap());
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
|
||||||
|
|
||||||
|
let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||||
|
nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
|
||||||
|
|
||||||
|
let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(msg_events.len(), 1);
|
||||||
|
match msg_events[0] {
|
||||||
|
MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
|
||||||
|
assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
|
||||||
|
assert_eq!(msg.contents.flags & 2, 2); // temp disabled
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
}
|
||||||
|
|
||||||
|
let events = nodes[2].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
|
||||||
|
assert_eq!(payment_hash, payment_hash_2);
|
||||||
|
assert!(!rejected_by_dest);
|
||||||
|
} else { panic!("Unexpected event!"); }
|
||||||
|
|
||||||
|
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
|
||||||
|
nodes[1].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||||
|
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
|
||||||
|
|
||||||
|
let events = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
if let Event::PaymentSent { payment_preimage, .. } = events[0] {
|
||||||
|
assert_eq!(payment_preimage, payment_preimage_1);
|
||||||
|
} else { panic!("Unexpected event!"); }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_monitor_update_on_pending_forwards() {
|
||||||
|
// Basic test for monitor update failures when processing pending HTLC fail/add forwards.
|
||||||
|
// We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
|
||||||
|
// The payment from A to C will be failed by C and pending a back-fail to A, while the payment
|
||||||
|
// from C to A will be pending a forward to A.
|
||||||
|
let mut nodes = create_network(3);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||||
|
|
||||||
|
// Rebalance a bit so that we can send backwards from 3 to 1.
|
||||||
|
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
|
||||||
|
|
||||||
|
let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
|
||||||
|
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 1000000));
|
||||||
|
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
|
||||||
|
let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||||
|
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
nodes[2].node.send_payment(route, payment_hash_2).unwrap();
|
||||||
|
check_added_monitors!(nodes[2], 1);
|
||||||
|
|
||||||
|
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
let payment_event = SendEvent::from_event(events.pop().unwrap());
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
nodes[1].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
|
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||||
|
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
|
||||||
|
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
|
||||||
|
|
||||||
|
let events = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 2);
|
||||||
|
if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
|
||||||
|
assert_eq!(payment_hash, payment_hash_1);
|
||||||
|
assert!(rejected_by_dest);
|
||||||
|
} else { panic!("Unexpected event!"); }
|
||||||
|
match events[1] {
|
||||||
|
Event::PendingHTLCsForwardable { .. } => { },
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
};
|
||||||
|
nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||||
|
nodes[0].node.process_pending_htlc_forwards();
|
||||||
|
expect_payment_received!(nodes[0], payment_hash_2, 1000000);
|
||||||
|
|
||||||
|
claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn monitor_update_claim_fail_no_response() {
|
||||||
|
// Test for claim_funds resulting in both a monitor update failure and no message response (due
|
||||||
|
// to channel being AwaitingRAA).
|
||||||
|
// Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
|
||||||
|
// code was broken.
|
||||||
|
let mut nodes = create_network(2);
|
||||||
|
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
|
|
||||||
|
// Forward a payment for B to claim
|
||||||
|
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
|
||||||
|
|
||||||
|
// Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
|
||||||
|
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
|
||||||
|
let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
|
||||||
|
nodes[0].node.send_payment(route, payment_hash_2).unwrap();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
|
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
let payment_event = SendEvent::from_event(events.pop().unwrap());
|
||||||
|
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||||
|
let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
|
||||||
|
assert!(nodes[1].node.claim_funds(payment_preimage_1));
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||||
|
nodes[1].node.test_restore_channel_monitor();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
|
||||||
|
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||||
|
expect_payment_received!(nodes[1], payment_hash_2, 1000000);
|
||||||
|
|
||||||
|
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||||
|
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
|
||||||
|
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
|
||||||
|
|
||||||
|
let events = nodes[0].node.get_and_clear_pending_events();
|
||||||
|
assert_eq!(events.len(), 1);
|
||||||
|
match events[0] {
|
||||||
|
Event::PaymentSent { ref payment_preimage } => {
|
||||||
|
assert_eq!(*payment_preimage, payment_preimage_1);
|
||||||
|
},
|
||||||
|
_ => panic!("Unexpected event"),
|
||||||
|
}
|
||||||
|
|
||||||
|
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
|
||||||
|
}
|
||||||
|
|
|
@ -448,9 +448,9 @@ macro_rules! try_chan_entry {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! return_monitor_err {
|
macro_rules! handle_monitor_err {
|
||||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
||||||
return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
|
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
|
||||||
};
|
};
|
||||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
|
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
|
||||||
match $err {
|
match $err {
|
||||||
|
@ -468,7 +468,8 @@ macro_rules! return_monitor_err {
|
||||||
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
|
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
|
||||||
// given up the preimage yet, so might as well just wait until the payment is
|
// given up the preimage yet, so might as well just wait until the payment is
|
||||||
// retried, avoiding the on-chain fees.
|
// retried, avoiding the on-chain fees.
|
||||||
return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
|
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
|
||||||
|
res
|
||||||
},
|
},
|
||||||
ChannelMonitorUpdateErr::TemporaryFailure => {
|
ChannelMonitorUpdateErr::TemporaryFailure => {
|
||||||
if !$resend_commitment {
|
if !$resend_commitment {
|
||||||
|
@ -478,26 +479,29 @@ macro_rules! return_monitor_err {
|
||||||
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
|
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
|
||||||
}
|
}
|
||||||
$entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
|
$entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
|
||||||
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()));
|
Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro_rules! return_monitor_err {
|
||||||
|
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
||||||
|
return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
|
||||||
|
};
|
||||||
|
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
|
||||||
|
return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Does not break in case of TemporaryFailure!
|
// Does not break in case of TemporaryFailure!
|
||||||
macro_rules! maybe_break_monitor_err {
|
macro_rules! maybe_break_monitor_err {
|
||||||
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
|
||||||
match $err {
|
match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
|
||||||
ChannelMonitorUpdateErr::PermanentFailure => {
|
(e, ChannelMonitorUpdateErr::PermanentFailure) => {
|
||||||
let (channel_id, mut chan) = $entry.remove_entry();
|
break e;
|
||||||
if let Some(short_id) = chan.get_short_channel_id() {
|
|
||||||
$channel_state.short_to_id.remove(&short_id);
|
|
||||||
}
|
|
||||||
break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
|
|
||||||
},
|
|
||||||
ChannelMonitorUpdateErr::TemporaryFailure => {
|
|
||||||
$entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new());
|
|
||||||
},
|
},
|
||||||
|
(_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1159,6 +1163,7 @@ impl ChannelManager {
|
||||||
|
|
||||||
let mut new_events = Vec::new();
|
let mut new_events = Vec::new();
|
||||||
let mut failed_forwards = Vec::new();
|
let mut failed_forwards = Vec::new();
|
||||||
|
let mut handle_errors = Vec::new();
|
||||||
{
|
{
|
||||||
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
||||||
let channel_state = channel_state_lock.borrow_parts();
|
let channel_state = channel_state_lock.borrow_parts();
|
||||||
|
@ -1194,8 +1199,7 @@ impl ChannelManager {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
|
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
|
||||||
|
|
||||||
let mut add_htlc_msgs = Vec::new();
|
let mut add_htlc_msgs = Vec::new();
|
||||||
let mut fail_htlc_msgs = Vec::new();
|
let mut fail_htlc_msgs = Vec::new();
|
||||||
for forward_info in pending_forwards.drain(..) {
|
for forward_info in pending_forwards.drain(..) {
|
||||||
|
@ -1207,14 +1211,14 @@ impl ChannelManager {
|
||||||
htlc_id: prev_htlc_id,
|
htlc_id: prev_htlc_id,
|
||||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||||
});
|
});
|
||||||
match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
|
match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if let ChannelError::Ignore(msg) = e {
|
if let ChannelError::Ignore(msg) = e {
|
||||||
log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
|
log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
|
||||||
} else {
|
} else {
|
||||||
panic!("Stated return value requirements in send_htlc() were not met");
|
panic!("Stated return value requirements in send_htlc() were not met");
|
||||||
}
|
}
|
||||||
let chan_update = self.get_channel_update(forward_chan).unwrap();
|
let chan_update = self.get_channel_update(chan.get()).unwrap();
|
||||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
|
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
|
||||||
continue;
|
continue;
|
||||||
},
|
},
|
||||||
|
@ -1236,7 +1240,7 @@ impl ChannelManager {
|
||||||
},
|
},
|
||||||
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
|
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
|
||||||
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
|
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
|
||||||
match forward_chan.get_update_fail_htlc(htlc_id, err_packet) {
|
match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if let ChannelError::Ignore(msg) = e {
|
if let ChannelError::Ignore(msg) = e {
|
||||||
log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
|
log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
|
||||||
|
@ -1265,7 +1269,7 @@ impl ChannelManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
|
if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
|
||||||
let (commitment_msg, monitor) = match forward_chan.send_commitment() {
|
let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
|
||||||
Ok(res) => res,
|
Ok(res) => res,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if let ChannelError::Ignore(_) = e {
|
if let ChannelError::Ignore(_) = e {
|
||||||
|
@ -1275,11 +1279,12 @@ impl ChannelManager {
|
||||||
continue;
|
continue;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
|
||||||
unimplemented!();
|
handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||||
node_id: forward_chan.get_their_node_id(),
|
node_id: chan.get().get_their_node_id(),
|
||||||
updates: msgs::CommitmentUpdate {
|
updates: msgs::CommitmentUpdate {
|
||||||
update_add_htlcs: add_htlc_msgs,
|
update_add_htlcs: add_htlc_msgs,
|
||||||
update_fulfill_htlcs: Vec::new(),
|
update_fulfill_htlcs: Vec::new(),
|
||||||
|
@ -1290,6 +1295,9 @@ impl ChannelManager {
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
unreachable!();
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
for forward_info in pending_forwards.drain(..) {
|
for forward_info in pending_forwards.drain(..) {
|
||||||
match forward_info {
|
match forward_info {
|
||||||
|
@ -1324,6 +1332,22 @@ impl ChannelManager {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (their_node_id, err) in handle_errors.drain(..) {
|
||||||
|
match handle_error!(self, err) {
|
||||||
|
Ok(_) => {},
|
||||||
|
Err(e) => {
|
||||||
|
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
|
||||||
|
} else {
|
||||||
|
let mut channel_state = self.channel_state.lock().unwrap();
|
||||||
|
channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
|
||||||
|
node_id: their_node_id,
|
||||||
|
action: e.action,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if new_events.is_empty() { return }
|
if new_events.is_empty() { return }
|
||||||
let mut events = self.pending_events.lock().unwrap();
|
let mut events = self.pending_events.lock().unwrap();
|
||||||
events.append(&mut new_events);
|
events.append(&mut new_events);
|
||||||
|
@ -1469,6 +1493,7 @@ impl ChannelManager {
|
||||||
} else { false }
|
} else { false }
|
||||||
}
|
}
|
||||||
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage) {
|
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage) {
|
||||||
|
let (their_node_id, err) = loop {
|
||||||
match source {
|
match source {
|
||||||
HTLCSource::OutboundRoute { .. } => {
|
HTLCSource::OutboundRoute { .. } => {
|
||||||
mem::drop(channel_state_lock);
|
mem::drop(channel_state_lock);
|
||||||
|
@ -1491,17 +1516,22 @@ impl ChannelManager {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
|
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
|
||||||
match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
|
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
|
||||||
|
match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
|
||||||
Ok((msgs, monitor_option)) => {
|
Ok((msgs, monitor_option)) => {
|
||||||
if let Some(chan_monitor) = monitor_option {
|
if let Some(chan_monitor) = monitor_option {
|
||||||
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||||
unimplemented!();// but def don't push the event...
|
if was_frozen_for_monitor {
|
||||||
|
assert!(msgs.is_none());
|
||||||
|
} else {
|
||||||
|
break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some((msg, commitment_signed)) = msgs {
|
if let Some((msg, commitment_signed)) = msgs {
|
||||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||||
node_id: chan.get_their_node_id(),
|
node_id: chan.get().get_their_node_id(),
|
||||||
updates: msgs::CommitmentUpdate {
|
updates: msgs::CommitmentUpdate {
|
||||||
update_add_htlcs: Vec::new(),
|
update_add_htlcs: Vec::new(),
|
||||||
update_fulfill_htlcs: vec![msg],
|
update_fulfill_htlcs: vec![msg],
|
||||||
|
@ -1520,6 +1550,23 @@ impl ChannelManager {
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
} else { unreachable!(); }
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
match handle_error!(self, err) {
|
||||||
|
Ok(_) => {},
|
||||||
|
Err(e) => {
|
||||||
|
if let Some(msgs::ErrorAction::IgnoreError) = e.action {
|
||||||
|
} else {
|
||||||
|
let mut channel_state = self.channel_state.lock().unwrap();
|
||||||
|
channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
|
||||||
|
node_id: their_node_id,
|
||||||
|
action: e.action,
|
||||||
|
});
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue