mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #1119 from TheBlueMatt/2021-10-less-aggressive-htlc-timeouts
Be less aggressive in outbound HTLC CLTV timeout checks
This commit is contained in:
commit
4d6c26248d
4 changed files with 18 additions and 13 deletions
|
@ -225,8 +225,6 @@ pub const ANTI_REORG_DELAY: u32 = 6;
|
||||||
/// fail this HTLC,
|
/// fail this HTLC,
|
||||||
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
|
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
|
||||||
/// condition with the above), we will fail this HTLC without telling the user we received it,
|
/// condition with the above), we will fail this HTLC without telling the user we received it,
|
||||||
/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
|
|
||||||
/// that HTLC expires within this many blocks, we will simply fail the HTLC instead.
|
|
||||||
///
|
///
|
||||||
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
|
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
|
||||||
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
|
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
|
||||||
|
@ -234,9 +232,6 @@ pub const ANTI_REORG_DELAY: u32 = 6;
|
||||||
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
|
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
|
||||||
/// in a race condition between the user connecting a block (which would fail it) and the user
|
/// in a race condition between the user connecting a block (which would fail it) and the user
|
||||||
/// providing us the preimage (which would claim it).
|
/// providing us the preimage (which would claim it).
|
||||||
///
|
|
||||||
/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
|
|
||||||
/// end up force-closing the channel on us to claim it.
|
|
||||||
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
|
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
|
||||||
|
|
||||||
// TODO(devrandom) replace this with HolderCommitmentTransaction
|
// TODO(devrandom) replace this with HolderCommitmentTransaction
|
||||||
|
|
|
@ -32,7 +32,7 @@ use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputIn
|
||||||
use ln::chan_utils;
|
use ln::chan_utils;
|
||||||
use chain::BestBlock;
|
use chain::BestBlock;
|
||||||
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
|
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
|
||||||
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
|
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
|
||||||
use chain::transaction::{OutPoint, TransactionData};
|
use chain::transaction::{OutPoint, TransactionData};
|
||||||
use chain::keysinterface::{Sign, KeysInterface};
|
use chain::keysinterface::{Sign, KeysInterface};
|
||||||
use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
|
use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
|
||||||
|
@ -4197,7 +4197,10 @@ impl<Signer: Sign> Channel<Signer> {
|
||||||
pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
|
pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
|
||||||
-> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
|
-> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
|
||||||
let mut timed_out_htlcs = Vec::new();
|
let mut timed_out_htlcs = Vec::new();
|
||||||
let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
|
// This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
|
||||||
|
// forward an HTLC when our counterparty should almost certainly just fail it for expiring
|
||||||
|
// ~now.
|
||||||
|
let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
|
||||||
self.holding_cell_htlc_updates.retain(|htlc_update| {
|
self.holding_cell_htlc_updates.retain(|htlc_update| {
|
||||||
match htlc_update {
|
match htlc_update {
|
||||||
&HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
|
&HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
|
||||||
|
|
|
@ -1959,17 +1959,24 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
|
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
|
||||||
}
|
}
|
||||||
let cur_height = self.best_block.read().unwrap().height() + 1;
|
let cur_height = self.best_block.read().unwrap().height() + 1;
|
||||||
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
|
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
|
||||||
// packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
|
// but we want to be robust wrt to counterparty packet sanitization (see
|
||||||
|
// HTLC_FAIL_BACK_BUFFER rationale).
|
||||||
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
|
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
|
||||||
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
|
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
|
||||||
}
|
}
|
||||||
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
|
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
|
||||||
break Some(("CLTV expiry is too far in the future", 21, None));
|
break Some(("CLTV expiry is too far in the future", 21, None));
|
||||||
}
|
}
|
||||||
// In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
|
// If the HTLC expires ~now, don't bother trying to forward it to our
|
||||||
// But, to be safe against policy reception, we use a longer delay.
|
// counterparty. They should fail it anyway, but we don't want to bother with
|
||||||
if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
|
// the round-trips or risk them deciding they definitely want the HTLC and
|
||||||
|
// force-closing to ensure they get it if we're offline.
|
||||||
|
// We previously had a much more aggressive check here which tried to ensure
|
||||||
|
// our counterparty receives an HTLC which has *our* risk threshold met on it,
|
||||||
|
// but there is no need to do that, and since we're a bit conservative with our
|
||||||
|
// risk threshold it just results in failing to forward payments.
|
||||||
|
if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
|
||||||
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
|
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3994,7 +3994,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
|
||||||
}
|
}
|
||||||
check_added_monitors!(nodes[1], 0);
|
check_added_monitors!(nodes[1], 0);
|
||||||
|
|
||||||
connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
|
connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
|
||||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||||
connect_blocks(&nodes[1], 1);
|
connect_blocks(&nodes[1], 1);
|
||||||
|
|
Loading…
Add table
Reference in a new issue