2020-08-10 15:00:09 -04:00
|
|
|
// This file is Copyright its original authors, visible in version control
|
|
|
|
// history.
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
|
|
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
|
|
// You may not use this file except in accordance with one or both of these
|
|
|
|
// licenses.
|
|
|
|
|
2019-01-23 16:46:44 -05:00
|
|
|
//! A bunch of useful utilities for building networks of nodes and exchanging messages between
|
|
|
|
//! nodes for functional tests.
|
|
|
|
|
2021-07-03 01:58:30 +00:00
|
|
|
use chain::{BestBlock, Confirm, Listen, Watch};
|
2020-08-07 10:58:15 -07:00
|
|
|
use chain::channelmonitor::ChannelMonitor;
|
2019-01-23 16:46:44 -05:00
|
|
|
use chain::transaction::OutPoint;
|
2021-04-28 17:28:10 -04:00
|
|
|
use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
|
2021-07-03 01:58:30 +00:00
|
|
|
use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
|
2020-05-02 15:05:04 -04:00
|
|
|
use routing::router::{Route, get_route};
|
2020-05-06 18:34:37 -04:00
|
|
|
use routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
|
2021-04-23 03:15:37 +00:00
|
|
|
use ln::features::{InitFeatures, InvoiceFeatures};
|
2019-01-23 16:46:44 -05:00
|
|
|
use ln::msgs;
|
2020-01-06 17:54:02 -05:00
|
|
|
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
|
2021-02-16 16:30:08 -05:00
|
|
|
use util::enforcing_trait_impls::EnforcingSigner;
|
2019-01-23 16:46:44 -05:00
|
|
|
use util::test_utils;
|
2020-12-02 18:50:17 +01:00
|
|
|
use util::test_utils::TestChainMonitor;
|
2021-06-30 18:35:36 -04:00
|
|
|
use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
|
2019-01-23 16:46:44 -05:00
|
|
|
use util::errors::APIError;
|
|
|
|
use util::config::UserConfig;
|
2020-05-06 18:34:37 -04:00
|
|
|
use util::ser::{ReadableArgs, Writeable, Readable};
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-06-16 15:10:17 -07:00
|
|
|
use bitcoin::blockdata::block::{Block, BlockHeader};
|
2021-03-03 11:24:55 -08:00
|
|
|
use bitcoin::blockdata::constants::genesis_block;
|
2019-01-23 16:46:44 -05:00
|
|
|
use bitcoin::blockdata::transaction::{Transaction, TxOut};
|
|
|
|
use bitcoin::network::constants::Network;
|
|
|
|
|
2020-04-27 16:41:54 +02:00
|
|
|
use bitcoin::hashes::sha256::Hash as Sha256;
|
|
|
|
use bitcoin::hashes::Hash;
|
2020-04-27 17:53:13 +02:00
|
|
|
use bitcoin::hash_types::BlockHash;
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-04-27 16:51:59 +02:00
|
|
|
use bitcoin::secp256k1::key::PublicKey;
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2021-08-01 18:22:06 +02:00
|
|
|
use io;
|
2021-05-19 04:21:39 +00:00
|
|
|
use prelude::*;
|
2021-05-23 23:22:46 +00:00
|
|
|
use core::cell::RefCell;
|
2019-01-23 16:46:44 -05:00
|
|
|
use std::rc::Rc;
|
2021-07-19 15:01:58 +02:00
|
|
|
use sync::{Arc, Mutex};
|
2021-05-23 23:22:46 +00:00
|
|
|
use core::mem;
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2021-03-17 13:11:48 -04:00
|
|
|
pub const CHAN_CONFIRM_DEPTH: u32 = 10;
|
2020-07-14 10:11:50 -07:00
|
|
|
|
2021-03-17 13:11:48 -04:00
|
|
|
/// Mine the given transaction in the next block and then mine CHAN_CONFIRM_DEPTH - 1 blocks on
|
|
|
|
/// top, giving the given transaction CHAN_CONFIRM_DEPTH confirmations.
|
2020-07-14 10:11:50 -07:00
|
|
|
pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
|
2021-03-17 13:11:48 -04:00
|
|
|
confirm_transaction_at(node, tx, node.best_block_info().1 + 1);
|
2021-03-16 23:22:59 -04:00
|
|
|
connect_blocks(node, CHAN_CONFIRM_DEPTH - 1);
|
2021-03-17 13:11:48 -04:00
|
|
|
}
|
|
|
|
/// Mine a signle block containing the given transaction
|
|
|
|
pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
|
|
|
|
let height = node.best_block_info().1 + 1;
|
|
|
|
confirm_transaction_at(node, tx, height);
|
|
|
|
}
|
|
|
|
/// Mine the given transaction at the given height, mining blocks as required to build to that
|
|
|
|
/// height
|
|
|
|
pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
|
2021-03-20 00:28:10 -04:00
|
|
|
let first_connect_height = node.best_block_info().1 + 1;
|
|
|
|
assert!(first_connect_height <= conf_height);
|
2021-03-22 17:59:59 -04:00
|
|
|
if conf_height > first_connect_height {
|
2021-03-20 00:28:10 -04:00
|
|
|
connect_blocks(node, conf_height - first_connect_height);
|
|
|
|
}
|
2020-06-16 15:10:17 -07:00
|
|
|
let mut block = Block {
|
2021-04-28 14:59:54 -04:00
|
|
|
header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: conf_height, bits: 42, nonce: 42 },
|
2021-03-17 13:11:48 -04:00
|
|
|
txdata: Vec::new(),
|
2020-06-16 15:10:17 -07:00
|
|
|
};
|
2021-03-17 13:11:48 -04:00
|
|
|
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
|
|
|
|
block.txdata.push(Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() });
|
|
|
|
}
|
|
|
|
block.txdata.push(tx.clone());
|
2021-03-17 22:00:47 -04:00
|
|
|
connect_block(node, &block);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-03-20 00:28:10 -04:00
|
|
|
/// The possible ways we may notify a ChannelManager of a new block
|
2021-03-22 18:07:13 -04:00
|
|
|
#[derive(Clone, Copy, PartialEq)]
|
2021-03-20 00:28:10 -04:00
|
|
|
pub enum ConnectStyle {
|
2021-04-20 13:39:00 -07:00
|
|
|
/// Calls best_block_updated first, detecting transactions in the block only after receiving the
|
2021-03-20 00:28:10 -04:00
|
|
|
/// header and height information.
|
|
|
|
BestBlockFirst,
|
|
|
|
/// The same as BestBlockFirst, however when we have multiple blocks to connect, we only
|
2021-04-20 13:39:00 -07:00
|
|
|
/// make a single best_block_updated call.
|
2021-03-20 00:28:10 -04:00
|
|
|
BestBlockFirstSkippingBlocks,
|
|
|
|
/// Calls transactions_confirmed first, detecting transactions in the block before updating the
|
|
|
|
/// header and height information.
|
|
|
|
TransactionsFirst,
|
|
|
|
/// The same as TransactionsFirst, however when we have multiple blocks to connect, we only
|
2021-04-20 13:39:00 -07:00
|
|
|
/// make a single best_block_updated call.
|
2021-03-20 00:28:10 -04:00
|
|
|
TransactionsFirstSkippingBlocks,
|
|
|
|
/// Provides the full block via the chain::Listen interface. In the current code this is
|
|
|
|
/// equivalent to TransactionsFirst with some additional assertions.
|
|
|
|
FullBlockViaListen,
|
|
|
|
}
|
|
|
|
|
2021-03-16 23:22:59 -04:00
|
|
|
pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
|
2021-03-20 00:28:10 -04:00
|
|
|
let skip_intermediaries = match *node.connect_style.borrow() {
|
|
|
|
ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => true,
|
|
|
|
_ => false,
|
|
|
|
};
|
|
|
|
|
2021-04-28 14:59:54 -04:00
|
|
|
let height = node.best_block_info().1 + 1;
|
2020-06-16 15:10:17 -07:00
|
|
|
let mut block = Block {
|
2021-04-28 14:59:54 -04:00
|
|
|
header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: height, bits: 42, nonce: 42 },
|
2020-06-16 15:10:17 -07:00
|
|
|
txdata: vec![],
|
|
|
|
};
|
2021-03-20 00:28:10 -04:00
|
|
|
assert!(depth >= 1);
|
2021-04-28 14:59:54 -04:00
|
|
|
for i in 1..depth {
|
2021-03-20 00:28:10 -04:00
|
|
|
do_connect_block(node, &block, skip_intermediaries);
|
2020-06-16 15:10:17 -07:00
|
|
|
block = Block {
|
2021-04-28 14:59:54 -04:00
|
|
|
header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
|
2020-06-16 15:10:17 -07:00
|
|
|
txdata: vec![],
|
|
|
|
};
|
2019-05-30 20:54:02 -04:00
|
|
|
}
|
2021-03-20 00:28:10 -04:00
|
|
|
connect_block(node, &block);
|
2020-06-16 15:10:17 -07:00
|
|
|
block.header.block_hash()
|
2019-02-08 21:43:56 -05:00
|
|
|
}
|
|
|
|
|
2021-03-17 22:00:47 -04:00
|
|
|
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
|
2021-03-20 00:28:10 -04:00
|
|
|
do_connect_block(node, block, false);
|
|
|
|
}
|
|
|
|
|
2021-04-07 12:02:33 -07:00
|
|
|
fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, skip_intermediaries: bool) {
|
2021-03-17 22:00:47 -04:00
|
|
|
let height = node.best_block_info().1 + 1;
|
2021-04-07 12:02:33 -07:00
|
|
|
if !skip_intermediaries {
|
|
|
|
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
|
2021-03-20 00:28:10 -04:00
|
|
|
match *node.connect_style.borrow() {
|
|
|
|
ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks => {
|
2021-04-20 13:39:00 -07:00
|
|
|
node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
|
2021-04-07 12:02:33 -07:00
|
|
|
node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
|
2021-04-20 13:39:00 -07:00
|
|
|
node.node.best_block_updated(&block.header, height);
|
|
|
|
node.node.transactions_confirmed(&block.header, &txdata, height);
|
2021-03-20 00:28:10 -04:00
|
|
|
},
|
|
|
|
ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks => {
|
2021-04-07 12:02:33 -07:00
|
|
|
node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
|
2021-04-20 13:39:00 -07:00
|
|
|
node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
|
|
|
|
node.node.transactions_confirmed(&block.header, &txdata, height);
|
|
|
|
node.node.best_block_updated(&block.header, height);
|
2021-03-20 00:28:10 -04:00
|
|
|
},
|
|
|
|
ConnectStyle::FullBlockViaListen => {
|
2021-04-22 09:52:10 -07:00
|
|
|
node.chain_monitor.chain_monitor.block_connected(&block, height);
|
|
|
|
node.node.block_connected(&block, height);
|
2021-03-20 00:28:10 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Process monitor update events in block_[dis]connected asynchronously
The instructions for `ChannelManagerReadArgs` indicate that you need
to connect blocks on a newly-deserialized `ChannelManager` in a
separate pass from the newly-deserialized `ChannelMontiors` as the
`ChannelManager` assumes the ability to update the monitors during
block [dis]connected events, saying that users need to:
```
4) Reconnect blocks on your ChannelMonitors
5) Move the ChannelMonitors into your local chain::Watch.
6) Disconnect/connect blocks on the ChannelManager.
```
This is fine for `ChannelManager`'s purpose, but is very awkward
for users. Notably, our new `lightning-block-sync` implemented
on-load reconnection in the most obvious (and performant) way -
connecting the blocks all at once, violating the
`ChannelManagerReadArgs` API.
Luckily, the events in question really don't need to be processed
with the same urgency as most channel monitor updates. The only two
monitor updates which can occur in block_[dis]connected is either
a) in block_connected, we identify a now-confirmed commitment
transaction, closing one of our channels, or
b) in block_disconnected, the funding transaction is reorganized
out of the chain, making our channel no longer funded.
In the case of (a), sending a monitor update which broadcasts a
conflicting holder commitment transaction is far from
time-critical, though we should still ensure we do it. In the case
of (b), we should try to broadcast our holder commitment transaction
when we can, but within a few minutes is fine on the scale of
block mining anyway.
Note that in both cases cannot simply move the logic to
ChannelMonitor::block[dis]_connected, as this could result in us
broadcasting a commitment transaction from ChannelMonitor, then
revoking the now-broadcasted state, and only then receiving the
block_[dis]connected event in the ChannelManager.
Thus, we move both events into an internal invent queue and process
them in timer_chan_freshness_every_min().
2021-02-26 12:02:11 -05:00
|
|
|
node.node.test_process_background_events();
|
2021-05-26 19:05:00 +00:00
|
|
|
node.blocks.lock().unwrap().push((block.header, height));
|
2020-07-14 10:29:11 -07:00
|
|
|
}
|
|
|
|
|
2021-03-17 13:11:48 -04:00
|
|
|
pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) {
|
2021-03-20 00:28:10 -04:00
|
|
|
for i in 0..count {
|
2021-05-26 19:05:00 +00:00
|
|
|
let orig_header = node.blocks.lock().unwrap().pop().unwrap();
|
2021-03-18 13:16:58 -04:00
|
|
|
assert!(orig_header.1 > 0); // Cannot disconnect genesis
|
2021-05-26 19:05:00 +00:00
|
|
|
let prev_header = node.blocks.lock().unwrap().last().unwrap().clone();
|
2021-03-20 00:28:10 -04:00
|
|
|
|
|
|
|
match *node.connect_style.borrow() {
|
|
|
|
ConnectStyle::FullBlockViaListen => {
|
2021-04-07 12:02:33 -07:00
|
|
|
node.chain_monitor.chain_monitor.block_disconnected(&orig_header.0, orig_header.1);
|
2021-03-20 00:28:10 -04:00
|
|
|
Listen::block_disconnected(node.node, &orig_header.0, orig_header.1);
|
|
|
|
},
|
|
|
|
ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
|
|
|
|
if i == count - 1 {
|
2021-04-20 13:39:00 -07:00
|
|
|
node.chain_monitor.chain_monitor.best_block_updated(&prev_header.0, prev_header.1);
|
|
|
|
node.node.best_block_updated(&prev_header.0, prev_header.1);
|
2021-03-20 00:28:10 -04:00
|
|
|
}
|
|
|
|
},
|
|
|
|
_ => {
|
2021-04-20 13:39:00 -07:00
|
|
|
node.chain_monitor.chain_monitor.best_block_updated(&prev_header.0, prev_header.1);
|
|
|
|
node.node.best_block_updated(&prev_header.0, prev_header.1);
|
2021-03-20 00:28:10 -04:00
|
|
|
},
|
|
|
|
}
|
2021-03-17 13:11:48 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn disconnect_all_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
|
2021-05-26 19:05:00 +00:00
|
|
|
let count = node.blocks.lock().unwrap().len() as u32 - 1;
|
2021-03-17 13:11:48 -04:00
|
|
|
disconnect_blocks(node, count);
|
|
|
|
}
|
2020-07-14 10:29:11 -07:00
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub struct TestChanMonCfg {
|
|
|
|
pub tx_broadcaster: test_utils::TestBroadcaster,
|
2020-02-27 11:33:03 -05:00
|
|
|
pub fee_estimator: test_utils::TestFeeEstimator,
|
2020-07-17 22:08:34 -07:00
|
|
|
pub chain_source: test_utils::TestChainSource,
|
2020-09-14 20:50:04 -04:00
|
|
|
pub persister: test_utils::TestPersister,
|
2020-03-02 12:55:53 -05:00
|
|
|
pub logger: test_utils::TestLogger,
|
2020-12-02 18:50:17 +01:00
|
|
|
pub keys_manager: test_utils::TestKeysInterface,
|
2020-02-20 14:14:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct NodeCfg<'a> {
|
2020-07-17 22:08:34 -07:00
|
|
|
pub chain_source: &'a test_utils::TestChainSource,
|
2020-02-20 14:14:12 -05:00
|
|
|
pub tx_broadcaster: &'a test_utils::TestBroadcaster,
|
2020-02-27 11:33:03 -05:00
|
|
|
pub fee_estimator: &'a test_utils::TestFeeEstimator,
|
2020-07-20 22:12:14 -07:00
|
|
|
pub chain_monitor: test_utils::TestChainMonitor<'a>,
|
2020-12-02 18:50:17 +01:00
|
|
|
pub keys_manager: &'a test_utils::TestKeysInterface,
|
2020-03-02 12:55:53 -05:00
|
|
|
pub logger: &'a test_utils::TestLogger,
|
2020-01-16 13:26:38 -05:00
|
|
|
pub node_seed: [u8; 32],
|
2021-08-07 01:02:11 -05:00
|
|
|
pub features: InitFeatures,
|
2020-01-16 13:26:38 -05:00
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub struct Node<'a, 'b: 'a, 'c: 'b> {
|
2020-07-17 22:08:34 -07:00
|
|
|
pub chain_source: &'c test_utils::TestChainSource,
|
2020-02-20 14:14:12 -05:00
|
|
|
pub tx_broadcaster: &'c test_utils::TestBroadcaster,
|
2020-07-20 22:12:14 -07:00
|
|
|
pub chain_monitor: &'b test_utils::TestChainMonitor<'c>,
|
2020-02-26 16:00:26 -05:00
|
|
|
pub keys_manager: &'b test_utils::TestKeysInterface,
|
2021-02-16 16:30:08 -05:00
|
|
|
pub node: &'a ChannelManager<EnforcingSigner, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
|
2020-07-17 22:08:34 -07:00
|
|
|
pub net_graph_msg_handler: NetGraphMsgHandler<&'c test_utils::TestChainSource, &'c test_utils::TestLogger>,
|
2019-01-23 16:46:44 -05:00
|
|
|
pub node_seed: [u8; 32],
|
|
|
|
pub network_payment_count: Rc<RefCell<u8>>,
|
|
|
|
pub network_chan_count: Rc<RefCell<u32>>,
|
2020-03-02 12:55:53 -05:00
|
|
|
pub logger: &'c test_utils::TestLogger,
|
2021-05-26 19:05:00 +00:00
|
|
|
pub blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>,
|
2021-03-20 00:28:10 -04:00
|
|
|
pub connect_style: Rc<RefCell<ConnectStyle>>,
|
2021-03-05 11:02:42 -05:00
|
|
|
}
|
|
|
|
impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
|
|
|
|
pub fn best_block_hash(&self) -> BlockHash {
|
2021-05-26 19:05:00 +00:00
|
|
|
self.blocks.lock().unwrap().last().unwrap().0.block_hash()
|
2021-03-05 11:02:42 -05:00
|
|
|
}
|
|
|
|
pub fn best_block_info(&self) -> (BlockHash, u32) {
|
2021-05-26 19:05:00 +00:00
|
|
|
self.blocks.lock().unwrap().last().map(|(a, b)| (a.block_hash(), *b)).unwrap()
|
2021-03-05 11:02:42 -05:00
|
|
|
}
|
2021-06-25 04:16:35 +00:00
|
|
|
pub fn get_block_header(&self, height: u32) -> BlockHeader {
|
|
|
|
self.blocks.lock().unwrap()[height as usize].0
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
2020-01-16 13:26:38 -05:00
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
|
2019-01-23 16:46:44 -05:00
|
|
|
fn drop(&mut self) {
|
|
|
|
if !::std::thread::panicking() {
|
|
|
|
// Check that we processed all pending events
|
|
|
|
assert!(self.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
assert!(self.node.get_and_clear_pending_events().is_empty());
|
2020-07-20 22:12:14 -07:00
|
|
|
assert!(self.chain_monitor.added_monitors.lock().unwrap().is_empty());
|
2020-01-20 22:13:44 -05:00
|
|
|
|
2020-02-23 23:26:10 -05:00
|
|
|
// Check that if we serialize the Router, we can deserialize it again.
|
|
|
|
{
|
|
|
|
let mut w = test_utils::TestVecWriter(Vec::new());
|
2021-08-10 09:47:27 -05:00
|
|
|
let network_graph_ser = &self.net_graph_msg_handler.network_graph;
|
2020-05-06 18:34:37 -04:00
|
|
|
network_graph_ser.write(&mut w).unwrap();
|
2021-08-01 18:22:06 +02:00
|
|
|
let network_graph_deser = <NetworkGraph>::read(&mut io::Cursor::new(&w.0)).unwrap();
|
2021-08-10 09:47:27 -05:00
|
|
|
assert!(network_graph_deser == self.net_graph_msg_handler.network_graph);
|
2021-08-12 16:02:42 -05:00
|
|
|
let net_graph_msg_handler = NetGraphMsgHandler::new(
|
|
|
|
network_graph_deser, Some(self.chain_source), self.logger
|
2020-05-06 18:34:37 -04:00
|
|
|
);
|
2020-02-23 23:26:10 -05:00
|
|
|
let mut chan_progress = 0;
|
|
|
|
loop {
|
2020-05-02 15:05:04 -04:00
|
|
|
let orig_announcements = self.net_graph_msg_handler.get_next_channel_announcements(chan_progress, 255);
|
|
|
|
let deserialized_announcements = net_graph_msg_handler.get_next_channel_announcements(chan_progress, 255);
|
2020-02-23 23:26:10 -05:00
|
|
|
assert!(orig_announcements == deserialized_announcements);
|
|
|
|
chan_progress = match orig_announcements.last() {
|
|
|
|
Some(announcement) => announcement.0.contents.short_channel_id + 1,
|
|
|
|
None => break,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
let mut node_progress = None;
|
|
|
|
loop {
|
2020-05-02 15:05:04 -04:00
|
|
|
let orig_announcements = self.net_graph_msg_handler.get_next_node_announcements(node_progress.as_ref(), 255);
|
|
|
|
let deserialized_announcements = net_graph_msg_handler.get_next_node_announcements(node_progress.as_ref(), 255);
|
2020-02-23 23:26:10 -05:00
|
|
|
assert!(orig_announcements == deserialized_announcements);
|
|
|
|
node_progress = match orig_announcements.last() {
|
|
|
|
Some(announcement) => Some(announcement.contents.node_id),
|
|
|
|
None => break,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-20 22:13:44 -05:00
|
|
|
// Check that if we serialize and then deserialize all our channel monitors we get the
|
|
|
|
// same set of outputs to watch for on chain as we have now. Note that if we write
|
|
|
|
// tests that fully close channels and remove the monitors at some point this may break.
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
|
2020-02-17 13:50:46 -05:00
|
|
|
let mut deserialized_monitors = Vec::new();
|
2020-03-18 16:30:05 -04:00
|
|
|
{
|
2021-02-28 22:26:41 -08:00
|
|
|
let old_monitors = self.chain_monitor.chain_monitor.monitors.read().unwrap();
|
2020-03-18 16:30:05 -04:00
|
|
|
for (_, old_monitor) in old_monitors.iter() {
|
|
|
|
let mut w = test_utils::TestVecWriter(Vec::new());
|
2020-11-25 15:03:19 -05:00
|
|
|
old_monitor.write(&mut w).unwrap();
|
2021-03-05 13:28:20 -08:00
|
|
|
let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
|
2021-08-01 18:22:06 +02:00
|
|
|
&mut io::Cursor::new(&w.0), self.keys_manager).unwrap();
|
2020-03-18 16:30:05 -04:00
|
|
|
deserialized_monitors.push(deserialized_monitor);
|
|
|
|
}
|
2020-02-17 13:50:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Before using all the new monitors to check the watch outpoints, use the full set of
|
|
|
|
// them to ensure we can write and reload our ChannelManager.
|
|
|
|
{
|
|
|
|
let mut channel_monitors = HashMap::new();
|
|
|
|
for monitor in deserialized_monitors.iter_mut() {
|
2020-06-09 23:00:30 -04:00
|
|
|
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
|
2020-02-17 13:50:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
let mut w = test_utils::TestVecWriter(Vec::new());
|
|
|
|
self.node.write(&mut w).unwrap();
|
2021-08-01 18:22:06 +02:00
|
|
|
<(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(w.0), ChannelManagerReadArgs {
|
2021-03-22 17:59:59 -04:00
|
|
|
default_config: *self.node.get_current_default_configuration(),
|
2020-02-26 16:00:26 -05:00
|
|
|
keys_manager: self.keys_manager,
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
|
2020-07-20 22:12:14 -07:00
|
|
|
chain_monitor: self.chain_monitor,
|
2020-10-12 23:24:00 -04:00
|
|
|
tx_broadcaster: &test_utils::TestBroadcaster {
|
2021-05-26 19:05:00 +00:00
|
|
|
txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
|
|
|
|
blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
|
2020-10-12 23:24:00 -04:00
|
|
|
},
|
2021-08-24 16:53:29 +01:00
|
|
|
logger: &self.logger,
|
2020-08-07 16:27:26 -04:00
|
|
|
channel_monitors,
|
2020-02-17 13:50:46 -05:00
|
|
|
}).unwrap();
|
|
|
|
}
|
|
|
|
|
2020-10-02 19:33:16 -04:00
|
|
|
let persister = test_utils::TestPersister::new();
|
2020-10-12 23:24:00 -04:00
|
|
|
let broadcaster = test_utils::TestBroadcaster {
|
2021-05-26 19:05:00 +00:00
|
|
|
txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
|
|
|
|
blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
|
2020-10-12 23:24:00 -04:00
|
|
|
};
|
2020-07-30 10:27:41 -07:00
|
|
|
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
|
2020-12-02 18:50:17 +01:00
|
|
|
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
|
2020-02-17 13:50:46 -05:00
|
|
|
for deserialized_monitor in deserialized_monitors.drain(..) {
|
2020-07-30 10:27:41 -07:00
|
|
|
if let Err(_) = chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
|
2020-01-20 22:13:44 -05:00
|
|
|
panic!();
|
|
|
|
}
|
|
|
|
}
|
2020-07-30 10:27:41 -07:00
|
|
|
assert_eq!(*chain_source.watched_txn.lock().unwrap(), *self.chain_source.watched_txn.lock().unwrap());
|
|
|
|
assert_eq!(*chain_source.watched_outputs.lock().unwrap(), *self.chain_source.watched_outputs.lock().unwrap());
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
|
2019-07-11 14:02:48 -04:00
|
|
|
create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001, a_flags, b_flags)
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
|
2019-07-11 14:02:48 -04:00
|
|
|
let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
|
2019-01-23 16:46:44 -05:00
|
|
|
let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
|
|
|
|
(announcement, as_update, bs_update, channel_id, tx)
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! get_revoke_commit_msgs {
|
|
|
|
($node: expr, $node_id: expr) => {
|
|
|
|
{
|
|
|
|
let events = $node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 2);
|
|
|
|
(match events[0] {
|
|
|
|
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(*node_id, $node_id);
|
|
|
|
(*msg).clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}, match events[1] {
|
|
|
|
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
|
|
|
|
assert_eq!(*node_id, $node_id);
|
|
|
|
assert!(updates.update_add_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fulfill_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fail_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fee.is_none());
|
|
|
|
updates.commitment_signed.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:46:11 -04:00
|
|
|
/// Get an specific event message from the pending events queue.
|
|
|
|
#[macro_export]
|
2019-01-23 16:46:44 -05:00
|
|
|
macro_rules! get_event_msg {
|
|
|
|
($node: expr, $event_type: path, $node_id: expr) => {
|
|
|
|
{
|
|
|
|
let events = $node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
|
|
|
$event_type { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(*node_id, $node_id);
|
|
|
|
(*msg).clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 19:52:37 -04:00
|
|
|
/// Get a specific event from the pending events queue.
|
|
|
|
#[macro_export]
|
|
|
|
macro_rules! get_event {
|
|
|
|
($node: expr, $event_type: path) => {
|
|
|
|
{
|
|
|
|
let mut events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
let ev = events.pop().unwrap();
|
|
|
|
match ev {
|
|
|
|
$event_type { .. } => {
|
|
|
|
ev
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:27:13 -04:00
|
|
|
#[cfg(test)]
|
2019-01-23 16:46:44 -05:00
|
|
|
macro_rules! get_htlc_update_msgs {
|
|
|
|
($node: expr, $node_id: expr) => {
|
|
|
|
{
|
|
|
|
let events = $node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
|
|
|
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
|
|
|
|
assert_eq!(*node_id, $node_id);
|
|
|
|
(*updates).clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-26 22:50:49 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
macro_rules! get_channel_ref {
|
|
|
|
($node: expr, $lock: ident, $channel_id: expr) => {
|
|
|
|
{
|
|
|
|
$lock = $node.node.channel_state.lock().unwrap();
|
|
|
|
$lock.by_id.get_mut(&$channel_id).unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:27:13 -04:00
|
|
|
#[cfg(test)]
|
2019-01-23 16:46:44 -05:00
|
|
|
macro_rules! get_feerate {
|
|
|
|
($node: expr, $channel_id: expr) => {
|
|
|
|
{
|
2021-07-26 22:50:49 +00:00
|
|
|
let mut lock;
|
|
|
|
let chan = get_channel_ref!($node, lock, $channel_id);
|
2019-01-23 16:46:44 -05:00
|
|
|
chan.get_feerate()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-21 00:42:58 -04:00
|
|
|
/// Returns any local commitment transactions for the channel.
|
|
|
|
#[macro_export]
|
2020-03-18 21:30:34 -04:00
|
|
|
macro_rules! get_local_commitment_txn {
|
|
|
|
($node: expr, $channel_id: expr) => {
|
2020-03-18 21:37:09 -04:00
|
|
|
{
|
2021-02-28 22:26:41 -08:00
|
|
|
let monitors = $node.chain_monitor.chain_monitor.monitors.read().unwrap();
|
2020-03-18 21:37:09 -04:00
|
|
|
let mut commitment_txn = None;
|
2021-02-28 22:26:41 -08:00
|
|
|
for (funding_txo, monitor) in monitors.iter() {
|
2020-03-18 21:37:09 -04:00
|
|
|
if funding_txo.to_channel_id() == $channel_id {
|
2020-09-06 19:51:21 -04:00
|
|
|
commitment_txn = Some(monitor.unsafe_get_latest_holder_commitment_txn(&$node.logger));
|
2020-03-18 21:37:09 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
commitment_txn.unwrap()
|
|
|
|
}
|
2020-03-18 21:30:34 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:46:11 -04:00
|
|
|
/// Check the error from attempting a payment.
|
|
|
|
#[macro_export]
|
2020-01-06 20:29:33 -05:00
|
|
|
macro_rules! unwrap_send_err {
|
|
|
|
($res: expr, $all_failed: expr, $type: pat, $check: expr) => {
|
|
|
|
match &$res {
|
|
|
|
&Err(PaymentSendFailure::AllFailedRetrySafe(ref fails)) if $all_failed => {
|
|
|
|
assert_eq!(fails.len(), 1);
|
|
|
|
match fails[0] {
|
|
|
|
$type => { $check },
|
|
|
|
_ => panic!(),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
&Err(PaymentSendFailure::PartialFailure(ref fails)) if !$all_failed => {
|
|
|
|
assert_eq!(fails.len(), 1);
|
|
|
|
match fails[0] {
|
|
|
|
Err($type) => { $check },
|
|
|
|
_ => panic!(),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
_ => panic!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:27:13 -04:00
|
|
|
/// Check whether N channel monitor(s) have been added.
|
|
|
|
#[macro_export]
|
2020-04-18 16:35:01 -04:00
|
|
|
macro_rules! check_added_monitors {
|
|
|
|
($node: expr, $count: expr) => {
|
|
|
|
{
|
2020-07-20 22:12:14 -07:00
|
|
|
let mut added_monitors = $node.chain_monitor.added_monitors.lock().unwrap();
|
2020-04-18 16:35:01 -04:00
|
|
|
assert_eq!(added_monitors.len(), $count);
|
|
|
|
added_monitors.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
|
2019-07-26 18:05:05 -04:00
|
|
|
let chan_id = *node.network_chan_count.borrow();
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2019-07-26 18:05:05 -04:00
|
|
|
let events = node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
2019-01-23 16:46:44 -05:00
|
|
|
Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
|
2019-07-26 18:05:05 -04:00
|
|
|
assert_eq!(*channel_value_satoshis, expected_chan_value);
|
|
|
|
assert_eq!(user_channel_id, expected_user_chan_id);
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-08-25 17:12:00 -04:00
|
|
|
let tx = Transaction { version: chan_id as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
|
2019-01-23 16:46:44 -05:00
|
|
|
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
|
|
|
|
}]};
|
2020-05-12 13:17:49 -04:00
|
|
|
let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
|
2019-07-26 18:05:05 -04:00
|
|
|
(*temporary_channel_id, tx, funding_outpoint)
|
2019-01-23 16:46:44 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
2019-07-26 18:05:05 -04:00
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> Transaction {
|
2020-02-25 12:03:25 +00:00
|
|
|
node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
|
|
|
|
node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
|
2019-07-26 18:05:05 -04:00
|
|
|
|
|
|
|
let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42);
|
|
|
|
|
2021-03-26 18:07:24 -04:00
|
|
|
node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
|
2020-04-18 16:35:01 -04:00
|
|
|
check_added_monitors!(node_a, 0);
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
|
2019-01-23 16:46:44 -05:00
|
|
|
{
|
2020-07-20 22:12:14 -07:00
|
|
|
let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(added_monitors.len(), 1);
|
|
|
|
assert_eq!(added_monitors[0].0, funding_output);
|
|
|
|
added_monitors.clear();
|
|
|
|
}
|
|
|
|
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
|
2019-01-23 16:46:44 -05:00
|
|
|
{
|
2020-07-20 22:12:14 -07:00
|
|
|
let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(added_monitors.len(), 1);
|
|
|
|
assert_eq!(added_monitors[0].0, funding_output);
|
|
|
|
added_monitors.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
let events_4 = node_a.node.get_and_clear_pending_events();
|
2021-03-26 18:07:24 -04:00
|
|
|
assert_eq!(events_4.len(), 0);
|
|
|
|
|
|
|
|
assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
|
|
|
|
assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
|
|
|
|
node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
tx
|
|
|
|
}
|
|
|
|
|
2021-03-17 13:11:48 -04:00
|
|
|
pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
|
|
|
|
confirm_transaction_at(node_conf, tx, conf_height);
|
2021-03-16 23:22:59 -04:00
|
|
|
connect_blocks(node_conf, CHAN_CONFIRM_DEPTH - 1);
|
2019-11-05 18:51:05 -05:00
|
|
|
node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id()));
|
2019-07-29 13:45:35 -04:00
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
|
2019-01-23 16:46:44 -05:00
|
|
|
let channel_id;
|
2019-07-29 13:45:35 -04:00
|
|
|
let events_6 = node_conf.node.get_and_clear_pending_msg_events();
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(events_6.len(), 2);
|
|
|
|
((match events_6[0] {
|
|
|
|
MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
|
|
|
|
channel_id = msg.channel_id.clone();
|
2019-07-29 13:45:35 -04:00
|
|
|
assert_eq!(*node_id, node_recv.node.get_our_node_id());
|
2019-01-23 16:46:44 -05:00
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}, match events_6[1] {
|
|
|
|
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
|
2019-07-29 13:45:35 -04:00
|
|
|
assert_eq!(*node_id, node_recv.node.get_our_node_id());
|
2019-01-23 16:46:44 -05:00
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}), channel_id)
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
|
2021-05-23 23:22:46 +00:00
|
|
|
let conf_height = core::cmp::max(node_a.best_block_info().1 + 1, node_b.best_block_info().1 + 1);
|
2021-03-17 13:11:48 -04:00
|
|
|
create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height);
|
|
|
|
confirm_transaction_at(node_a, tx, conf_height);
|
2021-03-16 23:22:59 -04:00
|
|
|
connect_blocks(node_a, CHAN_CONFIRM_DEPTH - 1);
|
2019-07-29 13:45:35 -04:00
|
|
|
create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
|
2019-07-11 14:02:48 -04:00
|
|
|
let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
|
2019-01-23 16:46:44 -05:00
|
|
|
let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
|
|
|
|
(msgs, chan_id, tx)
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0);
|
2019-01-23 16:46:44 -05:00
|
|
|
let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1);
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
let events_7 = node_b.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events_7.len(), 1);
|
|
|
|
let (announcement, bs_update) = match events_7[0] {
|
|
|
|
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
|
|
|
|
(msg, update_msg)
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs);
|
2019-01-23 16:46:44 -05:00
|
|
|
let events_8 = node_a.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events_8.len(), 1);
|
|
|
|
let as_update = match events_8[0] {
|
|
|
|
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
|
|
|
|
assert!(*announcement == *msg);
|
|
|
|
assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
|
|
|
|
assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
|
|
|
|
update_msg
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
|
|
|
|
*node_a.network_chan_count.borrow_mut() += 1;
|
|
|
|
|
|
|
|
((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
|
2019-07-11 14:02:48 -04:00
|
|
|
create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags)
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
|
2019-07-11 14:02:48 -04:00
|
|
|
let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags);
|
2020-11-15 16:44:09 -05:00
|
|
|
update_nodes_with_chan_announce(nodes, a, b, &chan_announcement.0, &chan_announcement.1, &chan_announcement.2);
|
|
|
|
(chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
|
|
|
|
}
|
2020-01-02 20:32:37 -05:00
|
|
|
|
2020-11-15 16:44:09 -05:00
|
|
|
pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, ann: &msgs::ChannelAnnouncement, upd_1: &msgs::ChannelUpdate, upd_2: &msgs::ChannelUpdate) {
|
2020-01-02 20:32:37 -05:00
|
|
|
nodes[a].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new());
|
|
|
|
let a_events = nodes[a].node.get_and_clear_pending_msg_events();
|
2021-05-06 01:31:39 +00:00
|
|
|
assert!(a_events.len() >= 2);
|
|
|
|
|
|
|
|
// ann should be re-generated by broadcast_node_announcement - check that we have it.
|
|
|
|
let mut found_ann_1 = false;
|
|
|
|
for event in a_events.iter() {
|
|
|
|
match event {
|
|
|
|
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => {
|
|
|
|
if msg == ann { found_ann_1 = true; }
|
|
|
|
},
|
|
|
|
MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
|
|
|
|
_ => panic!("Unexpected event {:?}", event),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(found_ann_1);
|
|
|
|
|
|
|
|
let a_node_announcement = match a_events.last().unwrap() {
|
2020-01-02 20:32:37 -05:00
|
|
|
MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => {
|
|
|
|
(*msg).clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
|
|
|
|
nodes[b].node.broadcast_node_announcement([1, 1, 1], [1; 32], Vec::new());
|
|
|
|
let b_events = nodes[b].node.get_and_clear_pending_msg_events();
|
2021-05-06 01:31:39 +00:00
|
|
|
assert!(b_events.len() >= 2);
|
|
|
|
|
|
|
|
// ann should be re-generated by broadcast_node_announcement - check that we have it.
|
|
|
|
let mut found_ann_2 = false;
|
|
|
|
for event in b_events.iter() {
|
|
|
|
match event {
|
|
|
|
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => {
|
|
|
|
if msg == ann { found_ann_2 = true; }
|
|
|
|
},
|
|
|
|
MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(found_ann_2);
|
|
|
|
|
|
|
|
let b_node_announcement = match b_events.last().unwrap() {
|
2020-01-02 20:32:37 -05:00
|
|
|
MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => {
|
|
|
|
(*msg).clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
|
2019-01-23 16:46:44 -05:00
|
|
|
for node in nodes {
|
2020-11-15 16:44:09 -05:00
|
|
|
assert!(node.net_graph_msg_handler.handle_channel_announcement(ann).unwrap());
|
|
|
|
node.net_graph_msg_handler.handle_channel_update(upd_1).unwrap();
|
|
|
|
node.net_graph_msg_handler.handle_channel_update(upd_2).unwrap();
|
2020-05-02 15:05:04 -04:00
|
|
|
node.net_graph_msg_handler.handle_node_announcement(&a_node_announcement).unwrap();
|
|
|
|
node.net_graph_msg_handler.handle_node_announcement(&b_node_announcement).unwrap();
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! check_spends {
|
2020-03-04 17:45:27 -05:00
|
|
|
($tx: expr, $($spends_txn: expr),*) => {
|
2019-01-23 16:46:44 -05:00
|
|
|
{
|
2020-09-18 18:28:19 -04:00
|
|
|
let get_output = |out_point: &bitcoin::blockdata::transaction::OutPoint| {
|
2020-03-04 17:45:27 -05:00
|
|
|
$(
|
|
|
|
if out_point.txid == $spends_txn.txid() {
|
|
|
|
return $spends_txn.output.get(out_point.vout as usize).cloned()
|
|
|
|
}
|
|
|
|
)*
|
|
|
|
None
|
2020-09-18 18:28:19 -04:00
|
|
|
};
|
|
|
|
let mut total_value_in = 0;
|
|
|
|
for input in $tx.input.iter() {
|
|
|
|
total_value_in += get_output(&input.previous_output).unwrap().value;
|
|
|
|
}
|
|
|
|
let mut total_value_out = 0;
|
|
|
|
for output in $tx.output.iter() {
|
|
|
|
total_value_out += output.value;
|
|
|
|
}
|
|
|
|
let min_fee = ($tx.get_weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
|
|
|
|
// Input amount - output amount = fee, so check that out + min_fee is smaller than input
|
|
|
|
assert!(total_value_out + min_fee <= total_value_in);
|
|
|
|
$tx.verify(get_output).unwrap();
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! get_closing_signed_broadcast {
|
|
|
|
($node: expr, $dest_pubkey: expr) => {
|
|
|
|
{
|
|
|
|
let events = $node.get_and_clear_pending_msg_events();
|
|
|
|
assert!(events.len() == 1 || events.len() == 2);
|
|
|
|
(match events[events.len() - 1] {
|
|
|
|
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
|
|
|
assert_eq!(msg.contents.flags & 2, 2);
|
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}, if events.len() == 2 {
|
|
|
|
match events[0] {
|
|
|
|
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(*node_id, $dest_pubkey);
|
|
|
|
Some(msg.clone())
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
} else { None })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:27:13 -04:00
|
|
|
/// Check that a channel's closing channel update has been broadcasted, and optionally
|
|
|
|
/// check whether an error message event has occurred.
|
|
|
|
#[macro_export]
|
2019-01-23 16:46:44 -05:00
|
|
|
macro_rules! check_closed_broadcast {
|
2019-11-05 18:51:05 -05:00
|
|
|
($node: expr, $with_error_msg: expr) => {{
|
2021-09-21 12:25:38 -04:00
|
|
|
let msg_events = $node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(msg_events.len(), if $with_error_msg { 2 } else { 1 });
|
|
|
|
match msg_events[0] {
|
2019-01-23 16:46:44 -05:00
|
|
|
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
|
|
|
assert_eq!(msg.contents.flags & 2, 2);
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
2019-11-05 18:51:05 -05:00
|
|
|
if $with_error_msg {
|
2021-09-21 12:25:38 -04:00
|
|
|
match msg_events[1] {
|
2019-11-05 18:51:05 -05:00
|
|
|
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
|
|
|
|
// TODO: Check node_id
|
|
|
|
Some(msg.clone())
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
} else { None }
|
2019-01-23 16:46:44 -05:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2021-09-21 12:25:38 -04:00
|
|
|
/// Check that a channel's closing channel event has been issued
|
|
|
|
#[macro_export]
|
|
|
|
macro_rules! check_closed_event {
|
|
|
|
($node: expr, $events: expr, $reason: expr) => {{
|
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), $events);
|
|
|
|
let expected_reason = $reason;
|
|
|
|
for event in events {
|
|
|
|
match event {
|
|
|
|
Event::ChannelClosed { ref reason, .. } => {
|
|
|
|
assert_eq!(*reason, expected_reason);
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
|
2019-01-23 16:46:44 -05:00
|
|
|
let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
|
2021-07-20 03:19:01 +00:00
|
|
|
let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
|
2019-01-23 16:46:44 -05:00
|
|
|
let (tx_a, tx_b);
|
|
|
|
|
|
|
|
node_a.close_channel(channel_id).unwrap();
|
2021-02-05 15:14:12 +01:00
|
|
|
node_b.handle_shutdown(&node_a.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id()));
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
let events_1 = node_b.get_and_clear_pending_msg_events();
|
|
|
|
assert!(events_1.len() >= 1);
|
|
|
|
let shutdown_b = match events_1[0] {
|
|
|
|
MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(node_id, &node_a.get_our_node_id());
|
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
|
|
|
|
let closing_signed_b = if !close_inbound_first {
|
|
|
|
assert_eq!(events_1.len(), 1);
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(match events_1[1] {
|
|
|
|
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(node_id, &node_a.get_our_node_id());
|
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
2021-02-05 15:14:12 +01:00
|
|
|
node_a.handle_shutdown(&node_b.get_our_node_id(), &InitFeatures::known(), &shutdown_b);
|
2019-01-23 16:46:44 -05:00
|
|
|
let (as_update, bs_update) = if close_inbound_first {
|
|
|
|
assert!(node_a.get_and_clear_pending_msg_events().is_empty());
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap());
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2021-07-20 03:19:01 +00:00
|
|
|
node_b.handle_closing_signed(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()));
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
|
|
|
|
tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
|
2021-07-20 03:19:01 +00:00
|
|
|
let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
|
|
|
|
|
|
|
|
node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap());
|
|
|
|
let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
|
|
|
|
assert!(none_a.is_none());
|
|
|
|
assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
|
|
|
|
tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
|
2019-01-23 16:46:44 -05:00
|
|
|
(as_update, bs_update)
|
|
|
|
} else {
|
|
|
|
let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id());
|
|
|
|
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a);
|
2021-07-20 03:19:01 +00:00
|
|
|
node_a.handle_closing_signed(&node_b.get_our_node_id(), &get_event_msg!(struct_b, MessageSendEvent::SendClosingSigned, node_a.get_our_node_id()));
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
|
|
|
|
tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
|
2021-07-20 03:19:01 +00:00
|
|
|
let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
|
|
|
|
|
|
|
|
node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap());
|
|
|
|
let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
|
|
|
|
assert!(none_b.is_none());
|
|
|
|
assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
|
|
|
|
tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
|
2019-01-23 16:46:44 -05:00
|
|
|
(as_update, bs_update)
|
|
|
|
};
|
|
|
|
assert_eq!(tx_a, tx_b);
|
|
|
|
check_spends!(tx_a, funding_tx);
|
|
|
|
|
|
|
|
(as_update, bs_update, tx_a)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct SendEvent {
|
|
|
|
pub node_id: PublicKey,
|
|
|
|
pub msgs: Vec<msgs::UpdateAddHTLC>,
|
|
|
|
pub commitment_msg: msgs::CommitmentSigned,
|
|
|
|
}
|
|
|
|
impl SendEvent {
|
|
|
|
pub fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent {
|
|
|
|
assert!(updates.update_fulfill_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fail_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
|
|
|
assert!(updates.update_fee.is_none());
|
|
|
|
SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn from_event(event: MessageSendEvent) -> SendEvent {
|
|
|
|
match event {
|
|
|
|
MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates),
|
|
|
|
_ => panic!("Unexpected event type!"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn from_node<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>) -> SendEvent {
|
2019-01-23 16:46:44 -05:00
|
|
|
let mut events = node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
SendEvent::from_event(events.pop().unwrap())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! commitment_signed_dance {
|
|
|
|
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
|
|
|
|
{
|
|
|
|
check_added_monitors!($node_a, 0);
|
|
|
|
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
2019-11-05 18:51:05 -05:00
|
|
|
$node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed);
|
2019-01-23 16:46:44 -05:00
|
|
|
check_added_monitors!($node_a, 1);
|
|
|
|
commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => {
|
|
|
|
{
|
|
|
|
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
|
|
|
|
check_added_monitors!($node_b, 0);
|
|
|
|
assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
|
2019-11-05 18:51:05 -05:00
|
|
|
$node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack);
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
check_added_monitors!($node_b, 1);
|
2019-11-05 18:51:05 -05:00
|
|
|
$node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed);
|
2019-01-23 16:46:44 -05:00
|
|
|
let (bs_revoke_and_ack, extra_msg_option) = {
|
|
|
|
let events = $node_b.node.get_and_clear_pending_msg_events();
|
|
|
|
assert!(events.len() <= 2);
|
|
|
|
(match events[0] {
|
|
|
|
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(*node_id, $node_a.node.get_our_node_id());
|
|
|
|
(*msg).clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}, events.get(1).map(|e| e.clone()))
|
|
|
|
};
|
|
|
|
check_added_monitors!($node_b, 1);
|
|
|
|
if $fail_backwards {
|
|
|
|
assert!($node_a.node.get_and_clear_pending_events().is_empty());
|
|
|
|
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
}
|
|
|
|
(extra_msg_option, bs_revoke_and_ack)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => {
|
|
|
|
{
|
|
|
|
check_added_monitors!($node_a, 0);
|
|
|
|
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
2019-11-05 18:51:05 -05:00
|
|
|
$node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed);
|
2019-01-23 16:46:44 -05:00
|
|
|
check_added_monitors!($node_a, 1);
|
|
|
|
let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
|
|
|
|
assert!(extra_msg_option.is_none());
|
|
|
|
bs_revoke_and_ack
|
|
|
|
}
|
|
|
|
};
|
|
|
|
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
|
|
|
|
{
|
|
|
|
let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
|
2019-11-05 18:51:05 -05:00
|
|
|
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack);
|
2019-01-23 16:46:44 -05:00
|
|
|
check_added_monitors!($node_a, 1);
|
|
|
|
extra_msg_option
|
|
|
|
}
|
|
|
|
};
|
|
|
|
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
|
|
|
|
{
|
|
|
|
assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
|
|
|
|
{
|
|
|
|
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
|
|
|
|
if $fail_backwards {
|
|
|
|
expect_pending_htlcs_forwardable!($node_a);
|
|
|
|
check_added_monitors!($node_a, 1);
|
|
|
|
|
|
|
|
let channel_state = $node_a.node.channel_state.lock().unwrap();
|
|
|
|
assert_eq!(channel_state.pending_msg_events.len(), 1);
|
|
|
|
if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
|
|
|
|
assert_ne!(*node_id, $node_b.node.get_our_node_id());
|
|
|
|
} else { panic!("Unexpected event"); }
|
|
|
|
} else {
|
|
|
|
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:46:11 -04:00
|
|
|
/// Get a payment preimage and hash.
|
|
|
|
#[macro_export]
|
2019-01-23 16:46:44 -05:00
|
|
|
macro_rules! get_payment_preimage_hash {
|
2021-04-22 22:45:14 +00:00
|
|
|
($dest_node: expr) => {
|
2019-01-23 16:46:44 -05:00
|
|
|
{
|
2021-04-22 22:45:14 +00:00
|
|
|
let payment_preimage = PaymentPreimage([*$dest_node.network_payment_count.borrow(); 32]);
|
|
|
|
*$dest_node.network_payment_count.borrow_mut() += 1;
|
2019-01-23 16:46:44 -05:00
|
|
|
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
|
2021-04-27 01:29:39 +00:00
|
|
|
let payment_secret = $dest_node.node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
|
2021-04-22 22:45:14 +00:00
|
|
|
(payment_preimage, payment_hash, payment_secret)
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-22 19:13:14 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
macro_rules! get_route_and_payment_hash {
|
|
|
|
($send_node: expr, $recv_node: expr, $recv_value: expr) => {{
|
2021-04-22 22:45:14 +00:00
|
|
|
let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!($recv_node);
|
2021-04-22 19:13:14 +00:00
|
|
|
let net_graph_msg_handler = &$send_node.net_graph_msg_handler;
|
|
|
|
let route = get_route(&$send_node.node.get_our_node_id(),
|
2021-08-10 09:47:27 -05:00
|
|
|
&net_graph_msg_handler.network_graph,
|
2021-04-22 19:13:14 +00:00
|
|
|
&$recv_node.node.get_our_node_id(), None, None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, $send_node.logger).unwrap();
|
2021-04-22 22:45:14 +00:00
|
|
|
(route, payment_hash, payment_preimage, payment_secret)
|
2021-04-22 19:13:14 +00:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2020-01-09 14:09:25 -05:00
|
|
|
macro_rules! expect_pending_htlcs_forwardable_ignore {
|
2019-01-23 16:46:44 -05:00
|
|
|
($node: expr) => {{
|
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
|
|
|
Event::PendingHTLCsForwardable { .. } => { },
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
2020-01-09 14:09:25 -05:00
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! expect_pending_htlcs_forwardable {
|
|
|
|
($node: expr) => {{
|
|
|
|
expect_pending_htlcs_forwardable_ignore!($node);
|
2019-01-23 16:46:44 -05:00
|
|
|
$node.node.process_pending_htlc_forwards();
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2021-09-21 12:25:38 -04:00
|
|
|
#[cfg(test)]
|
|
|
|
macro_rules! expect_pending_htlcs_forwardable_from_events {
|
|
|
|
($node: expr, $events: expr, $ignore: expr) => {{
|
|
|
|
assert_eq!($events.len(), 1);
|
|
|
|
match $events[0] {
|
|
|
|
Event::PendingHTLCsForwardable { .. } => { },
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
if $ignore {
|
|
|
|
$node.node.process_pending_htlc_forwards();
|
|
|
|
}
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
2021-03-31 20:16:21 -04:00
|
|
|
#[cfg(any(test, feature = "unstable"))]
|
2019-03-04 15:36:05 -05:00
|
|
|
macro_rules! expect_payment_received {
|
2021-04-23 03:19:52 +00:00
|
|
|
($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
|
2019-03-04 15:36:05 -05:00
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
2021-06-30 18:35:36 -04:00
|
|
|
Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
|
2019-03-04 15:36:05 -05:00
|
|
|
assert_eq!($expected_payment_hash, *payment_hash);
|
|
|
|
assert_eq!($expected_recv_value, amt);
|
2021-06-30 18:35:36 -04:00
|
|
|
match purpose {
|
|
|
|
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
|
|
|
|
assert!(payment_preimage.is_none());
|
|
|
|
assert_eq!($expected_payment_secret, *payment_secret);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
2019-03-04 15:36:05 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! expect_payment_sent {
|
|
|
|
($node: expr, $expected_payment_preimage: expr) => {
|
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
|
|
|
Event::PaymentSent { ref payment_preimage } => {
|
|
|
|
assert_eq!($expected_payment_preimage, *payment_preimage);
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-16 02:16:50 +00:00
|
|
|
macro_rules! expect_payment_forwarded {
|
|
|
|
($node: expr, $expected_fee: expr, $upstream_force_closed: expr) => {
|
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
|
|
|
Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
|
|
|
|
assert_eq!(fee_earned_msat, $expected_fee);
|
|
|
|
assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-25 16:48:34 +00:00
|
|
|
#[cfg(test)]
|
2021-08-12 15:30:53 -05:00
|
|
|
macro_rules! expect_payment_failed_with_update {
|
|
|
|
($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr, $scid: expr, $chan_closed: expr) => {
|
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
2021-06-25 16:48:34 +00:00
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
2021-09-20 12:18:49 -04:00
|
|
|
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, ref network_update, ref error_code, ref error_data, .. } => {
|
2021-08-12 15:30:53 -05:00
|
|
|
assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash");
|
|
|
|
assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value");
|
|
|
|
assert!(error_code.is_some(), "expected error_code.is_some() = true");
|
|
|
|
assert!(error_data.is_some(), "expected error_data.is_some() = true");
|
|
|
|
match network_update {
|
|
|
|
&Some(NetworkUpdate::ChannelUpdateMessage { ref msg }) if !$chan_closed => {
|
2021-06-25 16:48:34 +00:00
|
|
|
assert_eq!(msg.contents.short_channel_id, $scid);
|
|
|
|
assert_eq!(msg.contents.flags & 2, 0);
|
|
|
|
},
|
2021-08-12 15:30:53 -05:00
|
|
|
&Some(NetworkUpdate::ChannelClosed { short_channel_id, is_permanent }) if $chan_closed => {
|
2021-06-25 16:48:34 +00:00
|
|
|
assert_eq!(short_channel_id, $scid);
|
|
|
|
assert!(is_permanent);
|
|
|
|
},
|
2021-08-12 15:30:53 -05:00
|
|
|
Some(_) => panic!("Unexpected update type"),
|
|
|
|
None => panic!("Expected update"),
|
2021-06-25 16:48:34 +00:00
|
|
|
}
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:27:13 -04:00
|
|
|
#[cfg(test)]
|
2020-02-18 17:57:15 -05:00
|
|
|
macro_rules! expect_payment_failed {
|
2020-04-20 15:46:35 -04:00
|
|
|
($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => {
|
2020-02-18 17:57:15 -05:00
|
|
|
let events = $node.node.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
2021-09-20 12:18:49 -04:00
|
|
|
Event::PaymentPathFailed { ref payment_hash, rejected_by_dest, network_update: _, ref error_code, ref error_data, .. } => {
|
2021-03-17 13:11:48 -04:00
|
|
|
assert_eq!(*payment_hash, $expected_payment_hash, "unexpected payment_hash");
|
|
|
|
assert_eq!(rejected_by_dest, $rejected_by_dest, "unexpected rejected_by_dest value");
|
|
|
|
assert!(error_code.is_some(), "expected error_code.is_some() = true");
|
|
|
|
assert!(error_data.is_some(), "expected error_data.is_some() = true");
|
2020-04-20 15:46:35 -04:00
|
|
|
$(
|
2021-03-17 13:11:48 -04:00
|
|
|
assert_eq!(error_code.unwrap(), $expected_error_code, "unexpected error code");
|
|
|
|
assert_eq!(&error_data.as_ref().unwrap()[..], $expected_error_data, "unexpected error data");
|
2020-04-20 15:46:35 -04:00
|
|
|
)*
|
2020-02-18 17:57:15 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-23 03:19:52 +00:00
|
|
|
pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) {
|
|
|
|
origin_node.node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
|
2020-01-06 20:30:08 -05:00
|
|
|
check_added_monitors!(origin_node, expected_paths.len());
|
2020-03-19 00:34:15 -04:00
|
|
|
pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret);
|
|
|
|
}
|
2020-01-06 20:30:08 -05:00
|
|
|
|
2021-07-08 12:44:39 -04:00
|
|
|
pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option<PaymentPreimage>) {
|
2020-03-19 00:34:15 -04:00
|
|
|
let mut payment_event = SendEvent::from_event(ev);
|
|
|
|
let mut prev_node = origin_node;
|
|
|
|
|
|
|
|
for (idx, &node) in expected_path.iter().enumerate() {
|
|
|
|
assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
|
|
|
|
|
|
|
|
node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
|
|
|
|
check_added_monitors!(node, 0);
|
|
|
|
commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
|
|
|
|
|
|
|
|
expect_pending_htlcs_forwardable!(node);
|
|
|
|
|
|
|
|
if idx == expected_path.len() - 1 {
|
|
|
|
let events_2 = node.node.get_and_clear_pending_events();
|
|
|
|
if payment_received_expected {
|
|
|
|
assert_eq!(events_2.len(), 1);
|
|
|
|
match events_2[0] {
|
2021-06-30 18:35:36 -04:00
|
|
|
Event::PaymentReceived { ref payment_hash, ref purpose, amt} => {
|
2020-03-19 00:34:15 -04:00
|
|
|
assert_eq!(our_payment_hash, *payment_hash);
|
2021-06-30 18:35:36 -04:00
|
|
|
match &purpose {
|
|
|
|
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
|
2021-06-25 16:36:18 -04:00
|
|
|
assert_eq!(expected_preimage, *payment_preimage);
|
2021-07-08 12:44:39 -04:00
|
|
|
assert_eq!(our_payment_secret.unwrap(), *payment_secret);
|
|
|
|
},
|
|
|
|
PaymentPurpose::SpontaneousPayment(payment_preimage) => {
|
|
|
|
assert_eq!(expected_preimage.unwrap(), *payment_preimage);
|
|
|
|
assert!(our_payment_secret.is_none());
|
2021-06-30 18:35:36 -04:00
|
|
|
},
|
|
|
|
}
|
2020-03-19 00:34:15 -04:00
|
|
|
assert_eq!(amt, recv_value);
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
2020-01-06 20:30:08 -05:00
|
|
|
}
|
|
|
|
} else {
|
2020-03-19 00:34:15 -04:00
|
|
|
assert!(events_2.is_empty());
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
2020-03-19 00:34:15 -04:00
|
|
|
} else {
|
|
|
|
let mut events_2 = node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events_2.len(), 1);
|
|
|
|
check_added_monitors!(node, 1);
|
|
|
|
payment_event = SendEvent::from_event(events_2.remove(0));
|
|
|
|
assert_eq!(payment_event.msgs.len(), 1);
|
2020-01-06 20:30:08 -05:00
|
|
|
}
|
2020-03-19 00:34:15 -04:00
|
|
|
|
|
|
|
prev_node = node;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-23 03:19:52 +00:00
|
|
|
pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) {
|
2020-03-19 00:34:15 -04:00
|
|
|
let mut events = origin_node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), expected_route.len());
|
|
|
|
for (path_idx, (ev, expected_path)) in events.drain(..).zip(expected_route.iter()).enumerate() {
|
|
|
|
// Once we've gotten through all the HTLCs, the last one should result in a
|
|
|
|
// PaymentReceived (but each previous one should not!), .
|
|
|
|
let expect_payment = path_idx == expected_route.len() - 1;
|
2021-07-08 12:44:39 -04:00
|
|
|
pass_along_path(origin_node, expected_path, recv_value, our_payment_hash.clone(), Some(our_payment_secret), ev, expect_payment, None);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-22 22:45:14 +00:00
|
|
|
pub fn send_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
|
|
|
|
let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(expected_route.last().unwrap());
|
2021-04-23 03:19:52 +00:00
|
|
|
send_along_route_with_secret(origin_node, route, &[expected_route], recv_value, our_payment_hash, our_payment_secret);
|
2021-04-22 22:45:14 +00:00
|
|
|
(our_payment_preimage, our_payment_hash, our_payment_secret)
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-04-26 23:05:56 +00:00
|
|
|
pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_preimage: PaymentPreimage) {
|
2020-01-06 20:30:08 -05:00
|
|
|
for path in expected_paths.iter() {
|
|
|
|
assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
|
|
|
|
}
|
2021-04-26 23:05:56 +00:00
|
|
|
assert!(expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage));
|
2020-01-06 20:30:08 -05:00
|
|
|
check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len());
|
|
|
|
|
|
|
|
macro_rules! msgs_from_ev {
|
|
|
|
($ev: expr) => {
|
|
|
|
match $ev {
|
|
|
|
&MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
|
|
|
|
assert!(update_add_htlcs.is_empty());
|
|
|
|
assert_eq!(update_fulfill_htlcs.len(), 1);
|
|
|
|
assert!(update_fail_htlcs.is_empty());
|
|
|
|
assert!(update_fail_malformed_htlcs.is_empty());
|
|
|
|
assert!(update_fee.is_none());
|
|
|
|
((update_fulfill_htlcs[0].clone(), commitment_signed.clone()), node_id.clone())
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-06 20:30:08 -05:00
|
|
|
let mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len());
|
|
|
|
let events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), expected_paths.len());
|
|
|
|
for ev in events.iter() {
|
|
|
|
per_path_msgs.push(msgs_from_ev!(ev));
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-01-06 20:30:08 -05:00
|
|
|
for (expected_route, (path_msgs, next_hop)) in expected_paths.iter().zip(per_path_msgs.drain(..)) {
|
|
|
|
let mut next_msgs = Some(path_msgs);
|
|
|
|
let mut expected_next_node = next_hop;
|
|
|
|
|
|
|
|
macro_rules! last_update_fulfill_dance {
|
|
|
|
($node: expr, $prev_node: expr) => {
|
|
|
|
{
|
|
|
|
$node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
|
|
|
|
check_added_monitors!($node, 0);
|
|
|
|
assert!($node.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
2020-01-06 20:30:08 -05:00
|
|
|
macro_rules! mid_update_fulfill_dance {
|
|
|
|
($node: expr, $prev_node: expr, $new_msgs: expr) => {
|
|
|
|
{
|
|
|
|
$node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
|
2021-07-16 02:16:50 +00:00
|
|
|
let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat;
|
|
|
|
expect_payment_forwarded!($node, Some(fee as u64), false);
|
2020-01-06 20:30:08 -05:00
|
|
|
check_added_monitors!($node, 1);
|
|
|
|
let new_next_msgs = if $new_msgs {
|
|
|
|
let events = $node.node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
let (res, nexthop) = msgs_from_ev!(&events[0]);
|
|
|
|
expected_next_node = nexthop;
|
|
|
|
Some(res)
|
|
|
|
} else {
|
|
|
|
assert!($node.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
None
|
|
|
|
};
|
|
|
|
commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
|
|
|
|
next_msgs = new_next_msgs;
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-06 20:30:08 -05:00
|
|
|
let mut prev_node = expected_route.last().unwrap();
|
|
|
|
for (idx, node) in expected_route.iter().rev().enumerate().skip(1) {
|
|
|
|
assert_eq!(expected_next_node, node.node.get_our_node_id());
|
|
|
|
let update_next_msgs = !skip_last || idx != expected_route.len() - 1;
|
|
|
|
if next_msgs.is_some() {
|
|
|
|
mid_update_fulfill_dance!(node, prev_node, update_next_msgs);
|
|
|
|
} else {
|
|
|
|
assert!(!update_next_msgs);
|
|
|
|
assert!(node.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
}
|
|
|
|
if !skip_last && idx == expected_route.len() - 1 {
|
|
|
|
assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-01-06 20:30:08 -05:00
|
|
|
prev_node = node;
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2020-01-06 20:30:08 -05:00
|
|
|
if !skip_last {
|
|
|
|
last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
2021-08-19 20:44:45 -04:00
|
|
|
if !skip_last {
|
|
|
|
expect_payment_sent!(origin_node, our_payment_preimage);
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-04-26 23:05:56 +00:00
|
|
|
pub fn claim_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], our_payment_preimage: PaymentPreimage) {
|
|
|
|
claim_payment_along_route(origin_node, &[expected_route], false, our_payment_preimage);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-05-05 02:04:58 +00:00
|
|
|
pub const TEST_FINAL_CLTV: u32 = 70;
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2021-04-22 22:45:14 +00:00
|
|
|
pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
|
2020-05-02 15:05:04 -04:00
|
|
|
let net_graph_msg_handler = &origin_node.net_graph_msg_handler;
|
2021-08-10 09:47:27 -05:00
|
|
|
let route = get_route(&origin_node.node.get_our_node_id(), &net_graph_msg_handler.network_graph,
|
2021-06-26 14:15:30 +00:00
|
|
|
&expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()),
|
|
|
|
Some(&origin_node.node.list_usable_channels().iter().collect::<Vec<_>>()), &[],
|
2021-08-24 16:53:29 +01:00
|
|
|
recv_value, TEST_FINAL_CLTV, origin_node.logger).unwrap();
|
2020-01-03 19:31:40 -05:00
|
|
|
assert_eq!(route.paths.len(), 1);
|
|
|
|
assert_eq!(route.paths[0].len(), expected_route.len());
|
|
|
|
for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) {
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(hop.pubkey, node.node.get_our_node_id());
|
|
|
|
}
|
|
|
|
|
|
|
|
send_along_route(origin_node, route, expected_route, recv_value)
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) {
|
2020-05-02 15:05:04 -04:00
|
|
|
let net_graph_msg_handler = &origin_node.net_graph_msg_handler;
|
2021-08-10 09:47:27 -05:00
|
|
|
let route = get_route(&origin_node.node.get_our_node_id(), &net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), recv_value, TEST_FINAL_CLTV, origin_node.logger).unwrap();
|
2020-01-03 19:31:40 -05:00
|
|
|
assert_eq!(route.paths.len(), 1);
|
|
|
|
assert_eq!(route.paths[0].len(), expected_route.len());
|
|
|
|
for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) {
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(hop.pubkey, node.node.get_our_node_id());
|
|
|
|
}
|
|
|
|
|
2021-04-23 03:19:52 +00:00
|
|
|
let (_, our_payment_hash, our_payment_preimage) = get_payment_preimage_hash!(expected_route.last().unwrap());
|
|
|
|
unwrap_send_err!(origin_node.node.send_payment(&route, our_payment_hash, &Some(our_payment_preimage)), true, APIError::ChannelUnavailable { ref err },
|
2020-07-13 13:16:32 +09:00
|
|
|
assert!(err.contains("Cannot send value that would put us over the max HTLC value in flight our peer will accept")));
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-04-26 23:05:56 +00:00
|
|
|
pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) {
|
2019-01-23 16:46:44 -05:00
|
|
|
let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
|
2021-04-26 23:05:56 +00:00
|
|
|
claim_payment(&origin, expected_route, our_payment_preimage);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-09-01 17:30:11 -04:00
|
|
|
pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) {
|
|
|
|
let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect();
|
|
|
|
for path in expected_paths.iter() {
|
|
|
|
assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
|
|
|
|
}
|
|
|
|
assert!(expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
|
|
|
|
expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
|
|
|
|
check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len());
|
|
|
|
|
|
|
|
let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len());
|
|
|
|
let events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), expected_paths.len());
|
|
|
|
for ev in events.iter() {
|
|
|
|
let (update_fail, commitment_signed, node_id) = match ev {
|
|
|
|
&MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
|
|
|
|
assert!(update_add_htlcs.is_empty());
|
|
|
|
assert!(update_fulfill_htlcs.is_empty());
|
|
|
|
assert_eq!(update_fail_htlcs.len(), 1);
|
|
|
|
assert!(update_fail_malformed_htlcs.is_empty());
|
|
|
|
assert!(update_fee.is_none());
|
|
|
|
(update_fail_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
|
|
|
per_path_msgs.push(((update_fail, commitment_signed), node_id));
|
|
|
|
}
|
|
|
|
per_path_msgs.sort_unstable_by(|(_, node_id_a), (_, node_id_b)| node_id_a.cmp(node_id_b));
|
|
|
|
expected_paths.sort_unstable_by(|path_a, path_b| path_a[path_a.len() - 2].node.get_our_node_id().cmp(&path_b[path_b.len() - 2].node.get_our_node_id()));
|
|
|
|
|
|
|
|
for (i, (expected_route, (path_msgs, next_hop))) in expected_paths.iter().zip(per_path_msgs.drain(..)).enumerate() {
|
|
|
|
let mut next_msgs = Some(path_msgs);
|
|
|
|
let mut expected_next_node = next_hop;
|
|
|
|
let mut prev_node = expected_route.last().unwrap();
|
|
|
|
|
|
|
|
for (idx, node) in expected_route.iter().rev().enumerate().skip(1) {
|
|
|
|
assert_eq!(expected_next_node, node.node.get_our_node_id());
|
|
|
|
let update_next_node = !skip_last || idx != expected_route.len() - 1;
|
|
|
|
if next_msgs.is_some() {
|
|
|
|
node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
|
|
|
|
commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
|
|
|
|
if !update_next_node {
|
|
|
|
expect_pending_htlcs_forwardable!(node);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
2021-09-01 17:30:11 -04:00
|
|
|
let events = node.node.get_and_clear_pending_msg_events();
|
|
|
|
if update_next_node {
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
|
|
|
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
|
|
|
|
assert!(update_add_htlcs.is_empty());
|
|
|
|
assert!(update_fulfill_htlcs.is_empty());
|
|
|
|
assert_eq!(update_fail_htlcs.len(), 1);
|
|
|
|
assert!(update_fail_malformed_htlcs.is_empty());
|
|
|
|
assert!(update_fee.is_none());
|
|
|
|
expected_next_node = node_id.clone();
|
|
|
|
next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert!(events.is_empty());
|
|
|
|
}
|
|
|
|
if !skip_last && idx == expected_route.len() - 1 {
|
|
|
|
assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
2021-09-01 17:30:11 -04:00
|
|
|
prev_node = node;
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-09-01 17:30:11 -04:00
|
|
|
if !skip_last {
|
|
|
|
let prev_node = expected_route.first().unwrap();
|
|
|
|
origin_node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
|
|
|
|
check_added_monitors!(origin_node, 0);
|
|
|
|
assert!(origin_node.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
commitment_signed_dance!(origin_node, prev_node, next_msgs.as_ref().unwrap().1, false);
|
|
|
|
let events = origin_node.node.get_and_clear_pending_events();
|
2019-01-23 16:46:44 -05:00
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
match events[0] {
|
2021-09-20 12:56:33 -04:00
|
|
|
Event::PaymentPathFailed { payment_hash, rejected_by_dest, all_paths_failed, ref path, .. } => {
|
2021-09-01 17:30:11 -04:00
|
|
|
assert_eq!(payment_hash, our_payment_hash);
|
|
|
|
assert!(rejected_by_dest);
|
2021-09-16 21:09:46 -04:00
|
|
|
assert_eq!(all_paths_failed, i == expected_paths.len() - 1);
|
2021-09-20 12:56:33 -04:00
|
|
|
for (idx, hop) in expected_route.iter().enumerate() {
|
|
|
|
assert_eq!(hop.node.get_our_node_id(), path[idx].pubkey);
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-01 17:30:11 -04:00
|
|
|
pub fn fail_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], our_payment_hash: PaymentHash) {
|
|
|
|
fail_payment_along_route(origin_node, &[&expected_path[..]], false, our_payment_hash);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
|
|
|
|
let mut chan_mon_cfgs = Vec::new();
|
2020-03-02 12:55:53 -05:00
|
|
|
for i in 0..node_count {
|
2021-05-26 19:05:00 +00:00
|
|
|
let tx_broadcaster = test_utils::TestBroadcaster {
|
|
|
|
txn_broadcasted: Mutex::new(Vec::new()),
|
|
|
|
blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 0)])),
|
|
|
|
};
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
|
2020-07-17 22:08:34 -07:00
|
|
|
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
|
2020-03-02 12:55:53 -05:00
|
|
|
let logger = test_utils::TestLogger::with_id(format!("node {}", i));
|
2020-10-02 19:33:16 -04:00
|
|
|
let persister = test_utils::TestPersister::new();
|
2020-12-02 18:50:17 +01:00
|
|
|
let seed = [i as u8; 32];
|
|
|
|
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
|
|
|
|
|
|
|
|
chan_mon_cfgs.push(TestChanMonCfg{ tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager });
|
2020-02-20 14:14:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
chan_mon_cfgs
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>) -> Vec<NodeCfg<'a>> {
|
2019-01-23 16:46:44 -05:00
|
|
|
let mut nodes = Vec::new();
|
|
|
|
|
|
|
|
for i in 0..node_count {
|
2020-12-02 18:50:17 +01:00
|
|
|
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[i].chain_source), &chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator, &chanmon_cfgs[i].persister, &chanmon_cfgs[i].keys_manager);
|
2020-06-17 08:29:30 -07:00
|
|
|
let seed = [i as u8; 32];
|
2021-08-07 01:02:11 -05:00
|
|
|
nodes.push(NodeCfg {
|
|
|
|
chain_source: &chanmon_cfgs[i].chain_source,
|
|
|
|
logger: &chanmon_cfgs[i].logger,
|
|
|
|
tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster,
|
|
|
|
fee_estimator: &chanmon_cfgs[i].fee_estimator,
|
|
|
|
chain_monitor,
|
|
|
|
keys_manager: &chanmon_cfgs[i].keys_manager,
|
|
|
|
node_seed: seed,
|
|
|
|
features: InitFeatures::known(),
|
|
|
|
});
|
2020-01-16 13:26:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
nodes
|
|
|
|
}
|
|
|
|
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
pub fn test_default_channel_config() -> UserConfig {
|
|
|
|
let mut default_config = UserConfig::default();
|
|
|
|
// Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our
|
|
|
|
// tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT).
|
|
|
|
default_config.channel_options.cltv_expiry_delta = 6*6;
|
|
|
|
default_config.channel_options.announced_channel = true;
|
|
|
|
default_config.peer_channel_config_limits.force_announced_channel_preference = false;
|
|
|
|
// When most of our tests were written, the default HTLC minimum was fixed at 1000.
|
|
|
|
// It now defaults to 1, so we simply set it to the expected value here.
|
|
|
|
default_config.own_channel_config.our_htlc_minimum_msat = 1000;
|
2021-07-28 19:55:11 -04:00
|
|
|
// When most of our tests were written, we didn't have the notion of a `max_dust_htlc_exposure_msat`,
|
|
|
|
// It now defaults to 5_000_000 msat; to avoid interfering with tests we bump it to 50_000_000 msat.
|
|
|
|
default_config.channel_options.max_dust_htlc_exposure_msat = 50_000_000;
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
default_config
|
|
|
|
}
|
|
|
|
|
2021-02-16 16:30:08 -05:00
|
|
|
pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingSigner, &'a TestChainMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
|
2020-01-16 13:26:38 -05:00
|
|
|
let mut chanmgrs = Vec::new();
|
|
|
|
for i in 0..node_count {
|
2021-03-03 11:24:55 -08:00
|
|
|
let network = Network::Testnet;
|
|
|
|
let params = ChainParameters {
|
|
|
|
network,
|
2021-04-08 23:36:30 -07:00
|
|
|
best_block: BestBlock::from_genesis(network),
|
2021-03-03 11:24:55 -08:00
|
|
|
};
|
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
|
|
|
let node = ChannelManager::new(cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger, cfgs[i].keys_manager,
|
|
|
|
if node_config[i].is_some() { node_config[i].clone().unwrap() } else { test_default_channel_config() }, params);
|
2020-01-16 13:26:38 -05:00
|
|
|
chanmgrs.push(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
chanmgrs
|
|
|
|
}
|
|
|
|
|
2021-02-16 16:30:08 -05:00
|
|
|
pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingSigner, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
|
2020-01-16 13:26:38 -05:00
|
|
|
let mut nodes = Vec::new();
|
|
|
|
let chan_count = Rc::new(RefCell::new(0));
|
|
|
|
let payment_count = Rc::new(RefCell::new(0));
|
2021-03-20 00:28:10 -04:00
|
|
|
let connect_style = Rc::new(RefCell::new(ConnectStyle::FullBlockViaListen));
|
2020-01-16 13:26:38 -05:00
|
|
|
|
|
|
|
for i in 0..node_count {
|
2021-08-12 16:02:42 -05:00
|
|
|
let network_graph = NetworkGraph::new(cfgs[i].chain_source.genesis_hash);
|
|
|
|
let net_graph_msg_handler = NetGraphMsgHandler::new(network_graph, None, cfgs[i].logger);
|
2020-07-28 18:08:46 -07:00
|
|
|
nodes.push(Node{ chain_source: cfgs[i].chain_source,
|
2020-07-20 22:12:14 -07:00
|
|
|
tx_broadcaster: cfgs[i].tx_broadcaster, chain_monitor: &cfgs[i].chain_monitor,
|
2020-05-11 18:36:58 -04:00
|
|
|
keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler,
|
|
|
|
node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
|
2020-03-02 12:55:53 -05:00
|
|
|
network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
|
2021-05-26 19:05:00 +00:00
|
|
|
blocks: Arc::clone(&cfgs[i].tx_broadcaster.blocks),
|
2021-03-20 00:28:10 -04:00
|
|
|
connect_style: Rc::clone(&connect_style),
|
2020-01-16 13:26:38 -05:00
|
|
|
})
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
2021-07-30 18:21:12 +00:00
|
|
|
for i in 0..node_count {
|
|
|
|
for j in (i+1)..node_count {
|
2021-08-07 01:02:11 -05:00
|
|
|
nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone() });
|
|
|
|
nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone() });
|
2021-07-30 18:21:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-23 16:46:44 -05:00
|
|
|
nodes
|
|
|
|
}
|
|
|
|
|
2021-03-17 13:11:48 -04:00
|
|
|
// Note that the following only works for CLTV values up to 128
|
|
|
|
pub const ACCEPTED_HTLC_SCRIPT_WEIGHT: usize = 137; //Here we have a diff due to HTLC CLTV expiry being < 2^15 in test
|
2020-01-14 13:47:01 -05:00
|
|
|
pub const OFFERED_HTLC_SCRIPT_WEIGHT: usize = 133;
|
|
|
|
|
2019-01-23 16:46:44 -05:00
|
|
|
#[derive(PartialEq)]
|
|
|
|
pub enum HTLCType { NONE, TIMEOUT, SUCCESS }
|
|
|
|
/// Tests that the given node has broadcast transactions for the given Channel
|
|
|
|
///
|
2020-09-06 19:51:21 -04:00
|
|
|
/// First checks that the latest holder commitment tx has been broadcast, unless an explicit
|
2019-01-23 16:46:44 -05:00
|
|
|
/// commitment_tx is provided, which may be used to test that a remote commitment tx was
|
|
|
|
/// broadcast and the revoked outputs were claimed.
|
|
|
|
///
|
|
|
|
/// Next tests that there is (or is not) a transaction that spends the commitment transaction
|
|
|
|
/// that appears to be the type of HTLC transaction specified in has_htlc_tx.
|
|
|
|
///
|
|
|
|
/// All broadcast transactions must be accounted for in one of the above three types of we'll
|
|
|
|
/// also fail.
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
|
2019-01-23 16:46:44 -05:00
|
|
|
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
|
|
assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
|
|
|
|
|
|
|
|
let mut res = Vec::with_capacity(2);
|
|
|
|
node_txn.retain(|tx| {
|
|
|
|
if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
|
2020-03-04 17:36:12 -05:00
|
|
|
check_spends!(tx, chan.3);
|
2019-01-23 16:46:44 -05:00
|
|
|
if commitment_tx.is_none() {
|
|
|
|
res.push(tx.clone());
|
|
|
|
}
|
|
|
|
false
|
|
|
|
} else { true }
|
|
|
|
});
|
|
|
|
if let Some(explicit_tx) = commitment_tx {
|
|
|
|
res.push(explicit_tx.clone());
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(res.len(), 1);
|
|
|
|
|
|
|
|
if has_htlc_tx != HTLCType::NONE {
|
|
|
|
node_txn.retain(|tx| {
|
|
|
|
if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
|
2020-03-04 17:36:12 -05:00
|
|
|
check_spends!(tx, res[0]);
|
2019-01-23 16:46:44 -05:00
|
|
|
if has_htlc_tx == HTLCType::TIMEOUT {
|
|
|
|
assert!(tx.lock_time != 0);
|
|
|
|
} else {
|
|
|
|
assert!(tx.lock_time == 0);
|
|
|
|
}
|
|
|
|
res.push(tx.clone());
|
|
|
|
false
|
|
|
|
} else { true }
|
|
|
|
});
|
|
|
|
assert!(res.len() == 2 || res.len() == 3);
|
|
|
|
if res.len() == 3 {
|
|
|
|
assert_eq!(res[1], res[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(node_txn.is_empty());
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tests that the given node has broadcast a claim transaction against the provided revoked
|
|
|
|
/// HTLC transaction.
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn test_revoked_htlc_claim_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, revoked_tx: Transaction, commitment_revoked_tx: Transaction) {
|
2019-01-23 16:46:44 -05:00
|
|
|
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
|
2020-02-26 18:18:27 -05:00
|
|
|
// We may issue multiple claiming transaction on revoked outputs due to block rescan
|
|
|
|
// for revoked htlc outputs
|
|
|
|
if node_txn.len() != 1 && node_txn.len() != 2 && node_txn.len() != 3 { assert!(false); }
|
2019-01-23 16:46:44 -05:00
|
|
|
node_txn.retain(|tx| {
|
|
|
|
if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
|
2019-12-09 16:59:08 -05:00
|
|
|
check_spends!(tx, revoked_tx);
|
2019-01-23 16:46:44 -05:00
|
|
|
false
|
|
|
|
} else { true }
|
|
|
|
});
|
2019-12-09 16:59:08 -05:00
|
|
|
node_txn.retain(|tx| {
|
|
|
|
check_spends!(tx, commitment_revoked_tx);
|
|
|
|
false
|
|
|
|
});
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(node_txn.is_empty());
|
|
|
|
}
|
|
|
|
|
2020-02-20 14:14:12 -05:00
|
|
|
pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
|
2019-01-23 16:46:44 -05:00
|
|
|
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
|
|
|
|
|
|
assert!(node_txn.len() >= 1);
|
|
|
|
assert_eq!(node_txn[0].input.len(), 1);
|
|
|
|
let mut found_prev = false;
|
|
|
|
|
|
|
|
for tx in prev_txn {
|
|
|
|
if node_txn[0].input[0].previous_output.txid == tx.txid() {
|
2020-03-04 17:36:12 -05:00
|
|
|
check_spends!(node_txn[0], tx);
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
|
|
|
|
assert_eq!(tx.input.len(), 1); // must spend a commitment tx
|
|
|
|
|
|
|
|
found_prev = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(found_prev);
|
|
|
|
|
|
|
|
let mut res = Vec::new();
|
|
|
|
mem::swap(&mut *node_txn, &mut res);
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2021-03-22 18:07:13 -04:00
|
|
|
pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize, needs_err_handle: bool, expected_error: &str) {
|
2019-01-23 16:46:44 -05:00
|
|
|
let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
|
2021-03-18 20:32:20 -04:00
|
|
|
assert_eq!(events_1.len(), 2);
|
2019-01-23 16:46:44 -05:00
|
|
|
let as_update = match events_1[0] {
|
|
|
|
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
2021-03-18 20:32:20 -04:00
|
|
|
match events_1[1] {
|
|
|
|
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
|
|
|
|
assert_eq!(node_id, nodes[b].node.get_our_node_id());
|
2021-03-22 18:07:13 -04:00
|
|
|
assert_eq!(msg.data, expected_error);
|
|
|
|
if needs_err_handle {
|
|
|
|
nodes[b].node.handle_error(&nodes[a].node.get_our_node_id(), msg);
|
|
|
|
}
|
2021-03-18 20:32:20 -04:00
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
|
2021-03-22 18:07:13 -04:00
|
|
|
assert_eq!(events_2.len(), if needs_err_handle { 1 } else { 2 });
|
2019-01-23 16:46:44 -05:00
|
|
|
let bs_update = match events_2[0] {
|
|
|
|
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
|
|
|
msg.clone()
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
};
|
2021-03-22 18:07:13 -04:00
|
|
|
if !needs_err_handle {
|
|
|
|
match events_2[1] {
|
|
|
|
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
|
|
|
|
assert_eq!(node_id, nodes[a].node.get_our_node_id());
|
|
|
|
assert_eq!(msg.data, expected_error);
|
|
|
|
},
|
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
2021-03-18 20:32:20 -04:00
|
|
|
}
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
for node in nodes {
|
2020-05-02 15:05:04 -04:00
|
|
|
node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
|
|
|
|
node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-22 18:07:13 -04:00
|
|
|
pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize) {
|
|
|
|
handle_announce_close_broadcast_events(nodes, a, b, false, "Commitment or closing transaction was confirmed on chain.");
|
|
|
|
}
|
|
|
|
|
2020-09-16 17:27:13 -04:00
|
|
|
#[cfg(test)]
|
2019-01-23 16:46:44 -05:00
|
|
|
macro_rules! get_channel_value_stat {
|
|
|
|
($node: expr, $channel_id: expr) => {{
|
|
|
|
let chan_lock = $node.node.channel_state.lock().unwrap();
|
|
|
|
let chan = chan_lock.by_id.get(&$channel_id).unwrap();
|
|
|
|
chan.get_value_stat()
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! get_chan_reestablish_msgs {
|
|
|
|
($src_node: expr, $dst_node: expr) => {
|
|
|
|
{
|
|
|
|
let mut res = Vec::with_capacity(1);
|
|
|
|
for msg in $src_node.node.get_and_clear_pending_msg_events() {
|
|
|
|
if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
res.push(msg.clone());
|
|
|
|
} else {
|
|
|
|
panic!("Unexpected event")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
res
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! handle_chan_reestablish_msgs {
|
|
|
|
($src_node: expr, $dst_node: expr) => {
|
|
|
|
{
|
|
|
|
let msg_events = $src_node.node.get_and_clear_pending_msg_events();
|
|
|
|
let mut idx = 0;
|
|
|
|
let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) {
|
|
|
|
idx += 1;
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
Some(msg.clone())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
2021-03-18 12:44:31 -04:00
|
|
|
if let Some(&MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ }) = msg_events.get(idx) {
|
|
|
|
idx += 1;
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
}
|
|
|
|
|
2019-01-23 16:46:44 -05:00
|
|
|
let mut revoke_and_ack = None;
|
|
|
|
let mut commitment_update = None;
|
|
|
|
let order = if let Some(ev) = msg_events.get(idx) {
|
|
|
|
match ev {
|
|
|
|
&MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
revoke_and_ack = Some(msg.clone());
|
2021-06-12 21:58:50 +00:00
|
|
|
idx += 1;
|
2019-01-23 16:46:44 -05:00
|
|
|
RAACommitmentOrder::RevokeAndACKFirst
|
|
|
|
},
|
|
|
|
&MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
commitment_update = Some(updates.clone());
|
2021-06-12 21:58:50 +00:00
|
|
|
idx += 1;
|
2019-01-23 16:46:44 -05:00
|
|
|
RAACommitmentOrder::CommitmentFirst
|
|
|
|
},
|
2021-06-12 21:58:50 +00:00
|
|
|
&MessageSendEvent::SendChannelUpdate { .. } => RAACommitmentOrder::CommitmentFirst,
|
2019-01-23 16:46:44 -05:00
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
RAACommitmentOrder::CommitmentFirst
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some(ev) = msg_events.get(idx) {
|
|
|
|
match ev {
|
|
|
|
&MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
assert!(revoke_and_ack.is_none());
|
|
|
|
revoke_and_ack = Some(msg.clone());
|
2021-06-12 21:58:50 +00:00
|
|
|
idx += 1;
|
2019-01-23 16:46:44 -05:00
|
|
|
},
|
|
|
|
&MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
assert!(commitment_update.is_none());
|
|
|
|
commitment_update = Some(updates.clone());
|
2021-06-12 21:58:50 +00:00
|
|
|
idx += 1;
|
2019-01-23 16:46:44 -05:00
|
|
|
},
|
2021-06-12 21:58:50 +00:00
|
|
|
&MessageSendEvent::SendChannelUpdate { .. } => {},
|
2019-01-23 16:46:44 -05:00
|
|
|
_ => panic!("Unexpected event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-12 21:58:50 +00:00
|
|
|
if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
|
|
|
|
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
|
|
|
|
assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
|
|
|
|
}
|
|
|
|
|
2019-01-23 16:46:44 -05:00
|
|
|
(funding_locked, revoke_and_ack, commitment_update, order)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
|
|
|
|
/// for claims/fails they are separated out.
|
2021-07-14 18:19:45 +00:00
|
|
|
pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
|
2019-12-27 22:50:42 -05:00
|
|
|
node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
|
2019-01-23 16:46:44 -05:00
|
|
|
let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
|
2019-12-27 22:50:42 -05:00
|
|
|
node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
|
2019-01-23 16:46:44 -05:00
|
|
|
let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
|
|
|
|
|
|
|
|
if send_funding_locked.0 {
|
|
|
|
// If a expects a funding_locked, it better not think it has received a revoke_and_ack
|
|
|
|
// from b
|
|
|
|
for reestablish in reestablish_1.iter() {
|
|
|
|
assert_eq!(reestablish.next_remote_commitment_number, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if send_funding_locked.1 {
|
|
|
|
// If b expects a funding_locked, it better not think it has received a revoke_and_ack
|
|
|
|
// from a
|
|
|
|
for reestablish in reestablish_2.iter() {
|
|
|
|
assert_eq!(reestablish.next_remote_commitment_number, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if send_funding_locked.0 || send_funding_locked.1 {
|
|
|
|
// If we expect any funding_locked's, both sides better have set
|
2020-09-06 19:51:21 -04:00
|
|
|
// next_holder_commitment_number to 1
|
2019-01-23 16:46:44 -05:00
|
|
|
for reestablish in reestablish_1.iter() {
|
|
|
|
assert_eq!(reestablish.next_local_commitment_number, 1);
|
|
|
|
}
|
|
|
|
for reestablish in reestablish_2.iter() {
|
|
|
|
assert_eq!(reestablish.next_local_commitment_number, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut resp_1 = Vec::new();
|
|
|
|
for msg in reestablish_1 {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg);
|
2019-01-23 16:46:44 -05:00
|
|
|
resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a));
|
|
|
|
}
|
|
|
|
if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
|
|
|
|
check_added_monitors!(node_b, 1);
|
|
|
|
} else {
|
|
|
|
check_added_monitors!(node_b, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut resp_2 = Vec::new();
|
|
|
|
for msg in reestablish_2 {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg);
|
2019-01-23 16:46:44 -05:00
|
|
|
resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b));
|
|
|
|
}
|
|
|
|
if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
|
|
|
|
check_added_monitors!(node_a, 1);
|
|
|
|
} else {
|
|
|
|
check_added_monitors!(node_a, 0);
|
|
|
|
}
|
|
|
|
|
2019-01-24 16:41:51 +02:00
|
|
|
// We don't yet support both needing updates, as that would require a different commitment dance:
|
2021-07-14 18:19:45 +00:00
|
|
|
assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_htlc_fails.0 == 0 &&
|
|
|
|
pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
|
|
|
|
(pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_htlc_fails.1 == 0 &&
|
|
|
|
pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
|
2019-01-23 16:46:44 -05:00
|
|
|
|
|
|
|
for chan_msgs in resp_1.drain(..) {
|
|
|
|
if send_funding_locked.0 {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
|
2019-01-23 16:46:44 -05:00
|
|
|
let announcement_event = node_a.node.get_and_clear_pending_msg_events();
|
|
|
|
if !announcement_event.is_empty() {
|
|
|
|
assert_eq!(announcement_event.len(), 1);
|
|
|
|
if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
|
|
|
|
//TODO: Test announcement_sigs re-sending
|
|
|
|
} else { panic!("Unexpected event!"); }
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert!(chan_msgs.0.is_none());
|
|
|
|
}
|
|
|
|
if pending_raa.0 {
|
|
|
|
assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap());
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
check_added_monitors!(node_a, 1);
|
|
|
|
} else {
|
|
|
|
assert!(chan_msgs.1.is_none());
|
|
|
|
}
|
2021-07-14 18:19:45 +00:00
|
|
|
if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
|
2019-01-23 16:46:44 -05:00
|
|
|
let commitment_update = chan_msgs.2.unwrap();
|
|
|
|
if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
|
|
|
|
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
|
|
|
|
} else {
|
|
|
|
assert!(commitment_update.update_add_htlcs.is_empty());
|
|
|
|
}
|
|
|
|
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
|
2021-07-14 18:19:45 +00:00
|
|
|
assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0 + pending_cell_htlc_fails.0);
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
|
|
|
|
for update_add in commitment_update.update_add_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
for update_fulfill in commitment_update.update_fulfill_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
for update_fail in commitment_update.update_fail_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
|
|
|
|
commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
|
|
|
|
} else {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed);
|
2019-01-23 16:46:44 -05:00
|
|
|
check_added_monitors!(node_a, 1);
|
|
|
|
let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id());
|
|
|
|
// No commitment_signed so get_event_msg's assert(len == 1) passes
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack);
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
check_added_monitors!(node_b, 1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert!(chan_msgs.2.is_none());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for chan_msgs in resp_2.drain(..) {
|
|
|
|
if send_funding_locked.1 {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap());
|
2019-01-23 16:46:44 -05:00
|
|
|
let announcement_event = node_b.node.get_and_clear_pending_msg_events();
|
|
|
|
if !announcement_event.is_empty() {
|
|
|
|
assert_eq!(announcement_event.len(), 1);
|
|
|
|
if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
|
|
|
|
//TODO: Test announcement_sigs re-sending
|
|
|
|
} else { panic!("Unexpected event!"); }
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert!(chan_msgs.0.is_none());
|
|
|
|
}
|
|
|
|
if pending_raa.1 {
|
|
|
|
assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap());
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
check_added_monitors!(node_b, 1);
|
|
|
|
} else {
|
|
|
|
assert!(chan_msgs.1.is_none());
|
|
|
|
}
|
2021-07-14 18:19:45 +00:00
|
|
|
if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
|
2019-01-23 16:46:44 -05:00
|
|
|
let commitment_update = chan_msgs.2.unwrap();
|
|
|
|
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
|
|
|
|
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
|
|
|
|
}
|
2021-07-14 18:19:45 +00:00
|
|
|
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
|
|
|
|
assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.1 + pending_cell_htlc_fails.1);
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
|
|
|
|
for update_add in commitment_update.update_add_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
for update_fulfill in commitment_update.update_fulfill_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
for update_fail in commitment_update.update_fail_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail);
|
2019-01-23 16:46:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
|
|
|
|
commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
|
|
|
|
} else {
|
2019-11-05 18:51:05 -05:00
|
|
|
node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed);
|
2019-01-23 16:46:44 -05:00
|
|
|
check_added_monitors!(node_b, 1);
|
|
|
|
let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id());
|
|
|
|
// No commitment_signed so get_event_msg's assert(len == 1) passes
|
2019-11-05 18:51:05 -05:00
|
|
|
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
|
2019-01-23 16:46:44 -05:00
|
|
|
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
|
|
|
|
check_added_monitors!(node_a, 1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert!(chan_msgs.2.is_none());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|