mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #316 from TheBlueMatt/2019-03-removed-reserve-check
Fix remote reserve check with inbound claims-in-flight
This commit is contained in:
commit
8b4cb9fadb
3 changed files with 200 additions and 25 deletions
|
@ -881,9 +881,14 @@ impl Channel {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
|
||||
let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset;
|
||||
assert!(value_to_self_msat >= 0);
|
||||
// Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
|
||||
// AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
|
||||
// "violate" their reserve value by couting those against it. Thus, we have to convert
|
||||
// everything to i64 before subtracting as otherwise we can overflow.
|
||||
let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
|
||||
assert!(value_to_remote_msat >= 0);
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
|
@ -1595,7 +1600,24 @@ impl Channel {
|
|||
// Check our_channel_reserve_satoshis (we're getting paid, so they have to at least meet
|
||||
// the reserve_satoshis we told them to always have as direct payment so that they lose
|
||||
// something if we punish them for broadcasting an old state).
|
||||
if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 {
|
||||
// Note that we don't really care about having a small/no to_remote output in our local
|
||||
// commitment transactions, as the purpose of the channel reserve is to ensure we can
|
||||
// punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
|
||||
// present in the next commitment transaction we send them (at least for fulfilled ones,
|
||||
// failed ones won't modify value_to_self).
|
||||
// Note that we will send HTLCs which another instance of rust-lightning would think
|
||||
// violate the reserve value if we do not do this (as we forget inbound HTLCs from the
|
||||
// Channel state once they will not be present in the next received commitment
|
||||
// transaction).
|
||||
let mut removed_outbound_total_msat = 0;
|
||||
for ref htlc in self.pending_outbound_htlcs.iter() {
|
||||
if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(None) = htlc.state {
|
||||
removed_outbound_total_msat += htlc.amount_msat;
|
||||
} else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(None) = htlc.state {
|
||||
removed_outbound_total_msat += htlc.amount_msat;
|
||||
}
|
||||
}
|
||||
if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 + removed_outbound_total_msat {
|
||||
return Err(ChannelError::Close("Remote HTLC add would put them over their reserve value"));
|
||||
}
|
||||
if self.next_remote_htlc_id != msg.htlc_id {
|
||||
|
|
|
@ -542,6 +542,33 @@ macro_rules! expect_pending_htlcs_forwardable {
|
|||
}}
|
||||
}
|
||||
|
||||
macro_rules! expect_payment_received {
|
||||
($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentReceived { ref payment_hash, amt } => {
|
||||
assert_eq!($expected_payment_hash, *payment_hash);
|
||||
assert_eq!($expected_recv_value, amt);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! expect_payment_sent {
|
||||
($node: expr, $expected_payment_preimage: expr) => {
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentSent { ref payment_preimage } => {
|
||||
assert_eq!($expected_payment_preimage, *payment_preimage);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_along_route_with_hash(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64, our_payment_hash: PaymentHash) {
|
||||
let mut payment_event = {
|
||||
origin_node.node.send_payment(route, our_payment_hash).unwrap();
|
||||
|
@ -664,14 +691,7 @@ pub fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], s
|
|||
|
||||
if !skip_last {
|
||||
last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
|
||||
let events = origin_node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentSent { payment_preimage } => {
|
||||
assert_eq!(payment_preimage, our_payment_preimage);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
expect_payment_sent!(origin_node, our_payment_preimage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -935,20 +955,6 @@ pub fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize
|
|||
}
|
||||
}
|
||||
|
||||
macro_rules! expect_payment_received {
|
||||
($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentReceived { ref payment_hash, amt } => {
|
||||
assert_eq!($expected_payment_hash, *payment_hash);
|
||||
assert_eq!($expected_recv_value, amt);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! get_channel_value_stat {
|
||||
($node: expr, $channel_id: expr) => {{
|
||||
let chan_lock = $node.node.channel_state.lock().unwrap();
|
||||
|
|
|
@ -1450,6 +1450,153 @@ fn channel_reserve_test() {
|
|||
do_channel_reserve_test(true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn channel_reserve_in_flight_removes() {
|
||||
// In cases where one side claims an HTLC, it thinks it has additional available funds that it
|
||||
// can send to its counterparty, but due to update ordering, the other side may not yet have
|
||||
// considered those HTLCs fully removed.
|
||||
// This tests that we don't count HTLCs which will not be included in the next remote
|
||||
// commitment transaction towards the reserve value (as it implies no commitment transaction
|
||||
// will be generated which violates the remote reserve value).
|
||||
// This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
|
||||
// To test this we:
|
||||
// * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
|
||||
// you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
|
||||
// you only consider the value of the first HTLC, it may not),
|
||||
// * start routing a third HTLC from A to B,
|
||||
// * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
|
||||
// the other claim in its holding cell, as it immediately goes into AwaitingRAA),
|
||||
// * deliver the first fulfill from B
|
||||
// * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
|
||||
// claim,
|
||||
// * deliver A's response CS and RAA.
|
||||
// This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
|
||||
// removed it fully. B now has the push_msat plus the first two HTLCs in value.
|
||||
// * Now B happily sends another HTLC, potentially violating its reserve value from A's point
|
||||
// of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
|
||||
let mut nodes = create_network(2);
|
||||
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
|
||||
let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
|
||||
// Route the first two HTLCs.
|
||||
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
|
||||
let (payment_preimage_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
|
||||
|
||||
// Start routing the third HTLC (this is just used to get everyone in the right state).
|
||||
let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
|
||||
let send_1 = {
|
||||
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
|
||||
nodes[0].node.send_payment(route, payment_hash_3).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
SendEvent::from_event(events.remove(0))
|
||||
};
|
||||
|
||||
// Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
|
||||
// initial fulfill/CS.
|
||||
assert!(nodes[1].node.claim_funds(payment_preimage_1));
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
|
||||
// This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
|
||||
// remove the second HTLC when we send the HTLC back from B to A.
|
||||
assert!(nodes[1].node.claim_funds(payment_preimage_2));
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]).unwrap();
|
||||
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||
expect_payment_sent!(nodes[0], payment_preimage_1);
|
||||
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]).unwrap();
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
// B is already AwaitingRAA, so cant generate a CS here
|
||||
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
|
||||
|
||||
// The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
|
||||
// RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
|
||||
// However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
|
||||
// can no longer broadcast a commitment transaction with it and B has the preimage so can go
|
||||
// on-chain as necessary).
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]).unwrap();
|
||||
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||
expect_payment_sent!(nodes[0], payment_preimage_2);
|
||||
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
expect_payment_received!(nodes[1], payment_hash_3, 100000);
|
||||
|
||||
// Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
|
||||
// resolve the second HTLC from A's point of view.
|
||||
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
|
||||
// Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
|
||||
// to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
|
||||
let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[1]);
|
||||
let send_2 = {
|
||||
let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 10000, TEST_FINAL_CLTV).unwrap();
|
||||
nodes[1].node.send_payment(route, payment_hash_4).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let mut events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
SendEvent::from_event(events.remove(0))
|
||||
};
|
||||
|
||||
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]).unwrap();
|
||||
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||
|
||||
// Now just resolve all the outstanding messages/HTLCs for completeness...
|
||||
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
|
||||
expect_pending_htlcs_forwardable!(nodes[0]);
|
||||
expect_payment_received!(nodes[0], payment_hash_4, 10000);
|
||||
|
||||
claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
|
||||
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn channel_monitor_network_test() {
|
||||
// Simple test which builds a network of ChannelManagers, connects them to each other, and
|
||||
|
|
Loading…
Add table
Reference in a new issue