Catch up test nodes to latest block height

In a future commit, we plan to correctly enforce that the spending
transaction has a valid locktime relative to the chain for the node
broascasting it in `TestBroadcaster::broadcast_transaction` to. We catch
up these test node instances to their expected height, such that we do
not fail said enforcement.
This commit is contained in:
Wilmer Paulino 2023-04-15 22:12:11 -07:00
parent 69d0bfacd9
commit e904d68fa8
No known key found for this signature in database
GPG key ID: 634FE5FC544DCA31
6 changed files with 38 additions and 27 deletions

View file

@ -1028,12 +1028,12 @@ mod tests {
}
fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
let network = Network::Testnet;
let mut nodes = Vec::new();
for i in 0..num_nodes {
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let network = Network::Testnet;
let genesis_block = genesis_block(network);
let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
let scorer = Arc::new(Mutex::new(TestScorer::new()));

View file

@ -4169,7 +4169,7 @@ mod tests {
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 });
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 });
let broadcaster = TestBroadcaster::new(Arc::clone(&nodes[1].blocks));
let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks));
assert!(
pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
.is_err());
@ -4195,10 +4195,7 @@ mod tests {
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(TestLogger::new());
let broadcaster = Arc::new(TestBroadcaster {
txn_broadcasted: Mutex::new(Vec::new()),
blocks: Arc::new(Mutex::new(Vec::new()))
});
let broadcaster = Arc::new(TestBroadcaster::new(Network::Testnet));
let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());

View file

@ -9035,7 +9035,7 @@ pub mod bench {
// calls per node.
let network = bitcoin::Network::Testnet;
let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
let tx_broadcaster = test_utils::TestBroadcaster::new(network);
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = Mutex::new(test_utils::TestScorer::new());

View file

@ -2435,10 +2435,7 @@ pub fn fail_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &
pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
let mut chan_mon_cfgs = Vec::new();
for i in 0..node_count {
let tx_broadcaster = test_utils::TestBroadcaster {
txn_broadcasted: Mutex::new(Vec::new()),
blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 0)])),
};
let tx_broadcaster = test_utils::TestBroadcaster::new(Network::Testnet);
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let logger = test_utils::TestLogger::with_id(format!("node {}", i));

View file

@ -8434,7 +8434,7 @@ fn test_update_err_monitor_lockdown() {
let block = Block { header, txdata: vec![] };
// Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
// transaction lock time requirements here.
chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 0));
chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
watchtower.chain_monitor.block_connected(&block, 200);
// Try to update ChannelMonitor
@ -8486,6 +8486,9 @@ fn test_concurrent_monitor_claim() {
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
let persister = test_utils::TestPersister::new();
let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(
Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())),
);
let watchtower_alice = {
let new_monitor = {
let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
@ -8494,20 +8497,21 @@ fn test_concurrent_monitor_claim() {
assert!(new_monitor == *monitor);
new_monitor
};
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let block = Block { header, txdata: vec![] };
// Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
// transaction lock time requirements here.
chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (block.clone(), 0));
watchtower_alice.chain_monitor.block_connected(&block, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
// Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
// requirements here.
const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST));
watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
// Watchtower Alice should have broadcast a commitment/HTLC-timeout
let alice_state = {
let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcast();
let mut txn = alice_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
txn.remove(0)
};
@ -8516,6 +8520,7 @@ fn test_concurrent_monitor_claim() {
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
let persister = test_utils::TestPersister::new();
let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks));
let watchtower_bob = {
let new_monitor = {
let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
@ -8524,12 +8529,12 @@ fn test_concurrent_monitor_claim() {
assert!(new_monitor == *monitor);
new_monitor
};
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, HTLC_TIMEOUT_BROADCAST - 1);
// Route another payment to generate another update with still previous HTLC pending
let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
@ -8556,21 +8561,26 @@ fn test_concurrent_monitor_claim() {
//// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, HTLC_TIMEOUT_BROADCAST);
// Watchtower Bob should have broadcast a commitment/HTLC-timeout
let bob_state_y;
{
let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcast();
let mut txn = bob_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
bob_state_y = txn.remove(0);
};
// We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, height);
check_added_monitors(&nodes[0], 1);
{
let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcast();
let htlc_txn = alice_broadcaster.txn_broadcast();
assert_eq!(htlc_txn.len(), 2);
check_spends!(htlc_txn[0], bob_state_y);
// Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for

View file

@ -308,8 +308,15 @@ pub struct TestBroadcaster {
}
impl TestBroadcaster {
pub fn new(blocks: Arc<Mutex<Vec<(Block, u32)>>>) -> TestBroadcaster {
TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks }
pub fn new(network: Network) -> Self {
Self {
txn_broadcasted: Mutex::new(Vec::new()),
blocks: Arc::new(Mutex::new(vec![(genesis_block(network), 0)])),
}
}
pub fn with_blocks(blocks: Arc<Mutex<Vec<(Block, u32)>>>) -> Self {
Self { txn_broadcasted: Mutex::new(Vec::new()), blocks }
}
pub fn txn_broadcast(&self) -> Vec<Transaction> {