mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Add a test of stale-feerate-force-closure behavior
This commit is contained in:
parent
5a1cc288b7
commit
17b77e0bcf
2 changed files with 57 additions and 1 deletions
|
@ -965,7 +965,7 @@ const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
|
|||
/// The number of blocks of historical feerate estimates we keep around and consider when deciding
|
||||
/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
|
||||
/// after startup before we consider force-closing channels for having too-low fees.
|
||||
const FEERATE_TRACKING_BLOCKS: usize = 144;
|
||||
pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
|
||||
|
||||
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
|
||||
/// actually ours and not some duplicate HTLC sent to us by a node along the route.
|
||||
|
|
|
@ -1464,3 +1464,59 @@ fn batch_funding_failure() {
|
|||
check_closed_events(&nodes[0], &close);
|
||||
assert_eq!(nodes[0].node.list_channels().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_force_closure_on_low_stale_fee() {
|
||||
// Check that we force-close channels if they have a low fee and that has gotten stale (without
|
||||
// update).
|
||||
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
|
||||
// Start by connecting lots of blocks to give LDK some feerate history
|
||||
for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS * 2 {
|
||||
connect_blocks(&nodes[1], 1);
|
||||
}
|
||||
|
||||
// Now connect a handful of blocks with a "high" feerate
|
||||
{
|
||||
let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
|
||||
*feerate_lock *= 2;
|
||||
}
|
||||
for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
|
||||
connect_blocks(&nodes[1], 1);
|
||||
}
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
|
||||
// Now, note that one more block would cause us to force-close, it won't because we've dropped
|
||||
// the feerate
|
||||
{
|
||||
let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
|
||||
*feerate_lock /= 2;
|
||||
}
|
||||
connect_blocks(&nodes[1], super::channelmanager::FEERATE_TRACKING_BLOCKS as u32 * 2);
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
|
||||
// Now, connect another FEERATE_TRACKING_BLOCKS - 1 blocks at a high feerate, note that none of
|
||||
// these will cause a force-closure because LDK only looks at the minimium feerate over the
|
||||
// last FEERATE_TRACKING_BLOCKS blocks.
|
||||
{
|
||||
let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
|
||||
*feerate_lock *= 2;
|
||||
}
|
||||
|
||||
for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
|
||||
connect_blocks(&nodes[1], 1);
|
||||
}
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
|
||||
// Finally, connect one more block and check the force-close happened.
|
||||
connect_blocks(&nodes[1], 1);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, required_feerate_sat_per_kw: 253 * 2 };
|
||||
check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue