2020-08-10 15:00:09 -04:00
|
|
|
// This file is Copyright its original authors, visible in version control
|
|
|
|
// history.
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
|
|
|
|
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
|
|
|
|
// You may not use this file except in accordance with one or both of these
|
|
|
|
// licenses.
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
//! Test that monitor update failures don't get our channel state out of sync.
|
|
|
|
//! One of the biggest concern with the monitor update failure handling code is that messages
|
|
|
|
//! resent after monitor updating is restored are delivered out-of-order, resulting in
|
|
|
|
//! commitment_signed messages having "invalid signatures".
|
|
|
|
//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
|
|
|
|
//! actions such as sending payments, handling events, or changing monitor update return values on
|
|
|
|
//! a per-node basis. This should allow it to find any cases where the ordering of actions results
|
|
|
|
//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
|
|
|
|
//! send-side handling is correct, other peers. We consider it a failure if any action results in a
|
|
|
|
//! channel being force-closed.
|
|
|
|
|
|
|
|
use bitcoin::blockdata::block::BlockHeader;
|
|
|
|
use bitcoin::blockdata::transaction::{Transaction, TxOut};
|
|
|
|
use bitcoin::blockdata::script::{Builder, Script};
|
|
|
|
use bitcoin::blockdata::opcodes;
|
|
|
|
use bitcoin::network::constants::Network;
|
|
|
|
|
2020-04-27 16:41:54 +02:00
|
|
|
use bitcoin::hashes::Hash as TraitImport;
|
|
|
|
use bitcoin::hashes::sha256::Hash as Sha256;
|
2020-04-27 18:13:27 +02:00
|
|
|
use bitcoin::hash_types::{BlockHash, WPubkeyHash};
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2020-07-20 17:03:52 -07:00
|
|
|
use lightning::chain;
|
2020-10-01 08:50:15 -07:00
|
|
|
use lightning::chain::chainmonitor;
|
2020-08-07 10:58:15 -07:00
|
|
|
use lightning::chain::channelmonitor;
|
|
|
|
use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
|
2019-01-07 17:17:36 -05:00
|
|
|
use lightning::chain::transaction::OutPoint;
|
2020-07-29 13:02:29 -07:00
|
|
|
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
|
2019-12-06 14:08:34 -05:00
|
|
|
use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
|
2020-11-17 15:22:59 -05:00
|
|
|
use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, PaymentSendFailure, ChannelManagerReadArgs};
|
2019-12-28 01:10:14 -05:00
|
|
|
use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
|
2020-11-25 12:23:47 -05:00
|
|
|
use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, ErrorAction, UpdateAddHTLC, Init};
|
2019-12-06 14:08:34 -05:00
|
|
|
use lightning::util::enforcing_trait_impls::EnforcingChannelKeys;
|
2020-11-17 15:22:59 -05:00
|
|
|
use lightning::util::errors::APIError;
|
2019-07-18 22:21:00 -04:00
|
|
|
use lightning::util::events;
|
2019-01-07 17:17:36 -05:00
|
|
|
use lightning::util::logger::Logger;
|
|
|
|
use lightning::util::config::UserConfig;
|
|
|
|
use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
|
2019-07-22 17:28:49 -04:00
|
|
|
use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
|
2020-05-02 09:37:38 -04:00
|
|
|
use lightning::routing::router::{Route, RouteHop};
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
|
|
|
|
use utils::test_logger;
|
2020-09-14 20:50:04 -04:00
|
|
|
use utils::test_persister::TestPersister;
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2020-04-27 16:51:59 +02:00
|
|
|
use bitcoin::secp256k1::key::{PublicKey,SecretKey};
|
|
|
|
use bitcoin::secp256k1::Secp256k1;
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2019-07-23 15:39:11 -04:00
|
|
|
use std::mem;
|
2019-03-07 13:09:59 -05:00
|
|
|
use std::cmp::Ordering;
|
2019-07-25 11:07:45 -04:00
|
|
|
use std::collections::{HashSet, hash_map, HashMap};
|
2019-01-07 17:17:36 -05:00
|
|
|
use std::sync::{Arc,Mutex};
|
2019-07-18 22:17:36 -04:00
|
|
|
use std::sync::atomic;
|
2019-01-07 17:17:36 -05:00
|
|
|
use std::io::Cursor;
|
|
|
|
|
|
|
|
struct FuzzEstimator {}
|
|
|
|
impl FeeEstimator for FuzzEstimator {
|
2020-06-15 17:28:01 -04:00
|
|
|
fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
|
2019-01-07 17:17:36 -05:00
|
|
|
253
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct TestBroadcaster {}
|
|
|
|
impl BroadcasterInterface for TestBroadcaster {
|
|
|
|
fn broadcast_transaction(&self, _tx: &Transaction) { }
|
|
|
|
}
|
|
|
|
|
2019-07-22 17:28:49 -04:00
|
|
|
pub struct VecWriter(pub Vec<u8>);
|
|
|
|
impl Writer for VecWriter {
|
|
|
|
fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
|
|
|
|
self.0.extend_from_slice(buf);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
fn size_hint(&mut self, size: usize) {
|
|
|
|
self.0.reserve_exact(size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-20 22:12:14 -07:00
|
|
|
struct TestChainMonitor {
|
2020-02-11 18:34:29 -05:00
|
|
|
pub logger: Arc<dyn Logger>,
|
2020-09-14 20:50:04 -04:00
|
|
|
pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingChannelKeys, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
|
2019-01-07 17:17:36 -05:00
|
|
|
pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
|
2020-02-11 18:34:29 -05:00
|
|
|
// If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
|
|
|
|
// logic will automatically force-close our channels for us (as we don't have an up-to-date
|
|
|
|
// monitor implying we are not able to punish misbehaving counterparties). Because this test
|
|
|
|
// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
|
|
|
|
// fully-serialized monitor state here, as well as the corresponding update_id.
|
|
|
|
pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
|
2019-07-22 17:28:49 -04:00
|
|
|
pub should_update_manager: atomic::AtomicBool,
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
2020-07-20 22:12:14 -07:00
|
|
|
impl TestChainMonitor {
|
2020-09-14 20:50:04 -04:00
|
|
|
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>) -> Self {
|
2019-01-07 17:17:36 -05:00
|
|
|
Self {
|
2020-09-14 20:50:04 -04:00
|
|
|
chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)),
|
2020-02-11 18:34:29 -05:00
|
|
|
logger,
|
2019-01-07 17:17:36 -05:00
|
|
|
update_ret: Mutex::new(Ok(())),
|
2020-02-11 18:34:29 -05:00
|
|
|
latest_monitors: Mutex::new(HashMap::new()),
|
2019-07-22 17:28:49 -04:00
|
|
|
should_update_manager: atomic::AtomicBool::new(false),
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-07-20 22:12:14 -07:00
|
|
|
impl chain::Watch for TestChainMonitor {
|
2020-05-12 13:20:31 -04:00
|
|
|
type Keys = EnforcingChannelKeys;
|
|
|
|
|
2020-07-20 17:03:52 -07:00
|
|
|
fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
|
2020-02-11 18:34:29 -05:00
|
|
|
let mut ser = VecWriter(Vec::new());
|
2020-11-25 15:03:19 -05:00
|
|
|
monitor.write(&mut ser).unwrap();
|
2020-02-11 18:34:29 -05:00
|
|
|
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
|
2020-07-20 17:03:52 -07:00
|
|
|
panic!("Already had monitor pre-watch_channel");
|
2019-07-22 17:28:49 -04:00
|
|
|
}
|
2020-02-11 18:34:29 -05:00
|
|
|
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
|
2020-07-20 22:12:14 -07:00
|
|
|
assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok());
|
2020-02-11 18:34:29 -05:00
|
|
|
self.update_ret.lock().unwrap().clone()
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
|
2020-07-20 17:03:52 -07:00
|
|
|
fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
|
2020-02-11 18:34:29 -05:00
|
|
|
let mut map_lock = self.latest_monitors.lock().unwrap();
|
|
|
|
let mut map_entry = match map_lock.entry(funding_txo) {
|
|
|
|
hash_map::Entry::Occupied(entry) => entry,
|
|
|
|
hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
|
|
|
|
};
|
2020-04-27 17:53:13 +02:00
|
|
|
let mut deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
|
2020-03-02 12:55:53 -05:00
|
|
|
read(&mut Cursor::new(&map_entry.get().1)).unwrap().1;
|
2020-10-26 15:25:40 -04:00
|
|
|
deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap();
|
2020-02-11 18:34:29 -05:00
|
|
|
let mut ser = VecWriter(Vec::new());
|
2020-11-25 15:03:19 -05:00
|
|
|
deserialized_monitor.write(&mut ser).unwrap();
|
2020-02-11 18:34:29 -05:00
|
|
|
map_entry.insert((update.update_id, ser.0));
|
|
|
|
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
|
|
|
|
self.update_ret.lock().unwrap().clone()
|
2020-02-05 19:39:31 -05:00
|
|
|
}
|
|
|
|
|
2020-07-20 17:03:52 -07:00
|
|
|
fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
|
2020-07-20 22:12:14 -07:00
|
|
|
return self.chain_monitor.release_pending_monitor_events();
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct KeyProvider {
|
|
|
|
node_id: u8,
|
2020-08-23 17:06:33 -04:00
|
|
|
rand_bytes_id: atomic::AtomicU8,
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
impl KeysInterface for KeyProvider {
|
2019-12-06 14:08:34 -05:00
|
|
|
type ChanKeySigner = EnforcingChannelKeys;
|
2019-11-26 16:46:33 -05:00
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
fn get_node_secret(&self) -> SecretKey {
|
|
|
|
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_destination_script(&self) -> Script {
|
|
|
|
let secp_ctx = Secp256k1::signing_only();
|
|
|
|
let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
|
2020-04-27 18:13:27 +02:00
|
|
|
let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
|
2019-01-07 17:17:36 -05:00
|
|
|
Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_shutdown_pubkey(&self) -> PublicKey {
|
|
|
|
let secp_ctx = Secp256k1::signing_only();
|
|
|
|
PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
|
|
|
|
}
|
|
|
|
|
2020-01-23 13:33:31 -08:00
|
|
|
fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> EnforcingChannelKeys {
|
2020-02-04 09:15:59 -08:00
|
|
|
let secp_ctx = Secp256k1::signing_only();
|
|
|
|
EnforcingChannelKeys::new(InMemoryChannelKeys::new(
|
|
|
|
&secp_ctx,
|
|
|
|
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
|
|
|
|
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
|
|
|
|
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
|
|
|
|
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
|
|
|
|
SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
|
|
|
|
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
|
2020-01-23 13:33:31 -08:00
|
|
|
channel_value_satoshis,
|
2020-05-05 20:00:01 -04:00
|
|
|
(0, 0),
|
2020-02-04 09:15:59 -08:00
|
|
|
))
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
|
2020-08-23 17:06:33 -04:00
|
|
|
fn get_secure_random_bytes(&self) -> [u8; 32] {
|
|
|
|
let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
|
2019-07-18 22:17:36 -04:00
|
|
|
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
2020-11-25 12:23:47 -05:00
|
|
|
|
|
|
|
fn read_chan_signer(&self, data: &[u8]) -> Result<EnforcingChannelKeys, DecodeError> {
|
|
|
|
EnforcingChannelKeys::read(&mut std::io::Cursor::new(data))
|
|
|
|
}
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
|
2020-11-17 15:22:59 -05:00
|
|
|
#[inline]
|
|
|
|
fn check_api_err(api_err: APIError) {
|
|
|
|
match api_err {
|
|
|
|
APIError::APIMisuseError { .. } => panic!("We can't misuse the API"),
|
|
|
|
APIError::FeeRateTooHigh { .. } => panic!("We can't send too much fee?"),
|
|
|
|
APIError::RouteError { .. } => panic!("Our routes should work"),
|
|
|
|
APIError::ChannelUnavailable { err } => {
|
|
|
|
// Test the error against a list of errors we can hit, and reject
|
|
|
|
// all others. If you hit this panic, the list of acceptable errors
|
|
|
|
// is probably just stale and you should add new messages here.
|
|
|
|
match err.as_str() {
|
|
|
|
"Peer for first hop currently disconnected/pending monitor update!" => {},
|
|
|
|
_ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {},
|
|
|
|
_ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {},
|
|
|
|
_ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
|
|
|
|
_ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {},
|
|
|
|
_ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {},
|
|
|
|
_ => panic!(err),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
APIError::MonitorUpdateFailed => {
|
|
|
|
// We can (obviously) temp-fail a monitor update
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn check_payment_err(send_err: PaymentSendFailure) {
|
|
|
|
match send_err {
|
|
|
|
PaymentSendFailure::ParameterError(api_err) => check_api_err(api_err),
|
|
|
|
PaymentSendFailure::PathParameterError(per_path_results) => {
|
|
|
|
for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } }
|
|
|
|
},
|
|
|
|
PaymentSendFailure::AllFailedRetrySafe(per_path_results) => {
|
|
|
|
for api_err in per_path_results { check_api_err(api_err); }
|
|
|
|
},
|
|
|
|
PaymentSendFailure::PartialFailure(per_path_results) => {
|
|
|
|
for res in per_path_results { if let Err(api_err) = res { check_api_err(api_err); } }
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-21 12:09:40 -05:00
|
|
|
type ChanMan = ChannelManager<EnforcingChannelKeys, Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8) -> bool {
|
|
|
|
let payment_hash = Sha256::hash(&[*payment_id; 1]);
|
|
|
|
*payment_id = payment_id.wrapping_add(1);
|
|
|
|
if let Err(err) = source.send_payment(&Route {
|
|
|
|
paths: vec![vec![RouteHop {
|
|
|
|
pubkey: dest.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: dest_chan_id,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
|
|
|
fee_msat: amt,
|
|
|
|
cltv_expiry_delta: 200,
|
|
|
|
}]],
|
|
|
|
}, PaymentHash(payment_hash.into_inner()), &None) {
|
|
|
|
check_payment_err(err);
|
|
|
|
false
|
|
|
|
} else { true }
|
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8) -> bool {
|
|
|
|
let payment_hash = Sha256::hash(&[*payment_id; 1]);
|
|
|
|
*payment_id = payment_id.wrapping_add(1);
|
|
|
|
if let Err(err) = source.send_payment(&Route {
|
|
|
|
paths: vec![vec![RouteHop {
|
|
|
|
pubkey: middle.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: middle_chan_id,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
|
|
|
fee_msat: 50000,
|
|
|
|
cltv_expiry_delta: 100,
|
|
|
|
},RouteHop {
|
|
|
|
pubkey: dest.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: dest_chan_id,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
|
|
|
fee_msat: amt,
|
|
|
|
cltv_expiry_delta: 200,
|
|
|
|
}]],
|
|
|
|
}, PaymentHash(payment_hash.into_inner()), &None) {
|
|
|
|
check_payment_err(err);
|
|
|
|
false
|
|
|
|
} else { true }
|
|
|
|
}
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
#[inline]
|
2020-02-20 20:11:40 -05:00
|
|
|
pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
|
2019-01-07 17:17:36 -05:00
|
|
|
let fee_est = Arc::new(FuzzEstimator{});
|
|
|
|
let broadcast = Arc::new(TestBroadcaster{});
|
|
|
|
|
|
|
|
macro_rules! make_node {
|
|
|
|
($node_id: expr) => { {
|
2020-02-20 20:11:40 -05:00
|
|
|
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
|
2020-09-14 20:50:04 -04:00
|
|
|
let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{})));
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2020-08-23 17:06:33 -04:00
|
|
|
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
|
2019-10-18 14:19:49 +01:00
|
|
|
let mut config = UserConfig::default();
|
2019-01-07 17:17:36 -05:00
|
|
|
config.channel_options.fee_proportional_millionths = 0;
|
|
|
|
config.channel_options.announced_channel = true;
|
2019-03-26 12:16:20 -07:00
|
|
|
config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
|
2020-11-21 12:09:40 -05:00
|
|
|
(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0),
|
2019-01-07 17:17:36 -05:00
|
|
|
monitor)
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
2019-07-22 17:28:49 -04:00
|
|
|
macro_rules! reload_node {
|
|
|
|
($ser: expr, $node_id: expr, $old_monitors: expr) => { {
|
2020-02-20 20:11:40 -05:00
|
|
|
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
|
2020-09-14 20:50:04 -04:00
|
|
|
let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{})));
|
2019-07-22 17:28:49 -04:00
|
|
|
|
2020-08-23 17:06:33 -04:00
|
|
|
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
|
2019-10-18 14:19:49 +01:00
|
|
|
let mut config = UserConfig::default();
|
2019-07-22 17:28:49 -04:00
|
|
|
config.channel_options.fee_proportional_millionths = 0;
|
|
|
|
config.channel_options.announced_channel = true;
|
|
|
|
config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
|
|
|
|
|
|
|
|
let mut monitors = HashMap::new();
|
2020-02-11 18:34:29 -05:00
|
|
|
let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
|
|
|
|
for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
|
2020-03-02 12:55:53 -05:00
|
|
|
monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut Cursor::new(&monitor_ser)).expect("Failed to read monitor").1);
|
2020-07-20 17:03:52 -07:00
|
|
|
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
|
2019-07-22 17:28:49 -04:00
|
|
|
}
|
|
|
|
let mut monitor_refs = HashMap::new();
|
2019-12-13 01:58:08 -05:00
|
|
|
for (outpoint, monitor) in monitors.iter_mut() {
|
2019-07-22 17:28:49 -04:00
|
|
|
monitor_refs.insert(*outpoint, monitor);
|
|
|
|
}
|
|
|
|
|
|
|
|
let read_args = ChannelManagerReadArgs {
|
|
|
|
keys_manager,
|
|
|
|
fee_estimator: fee_est.clone(),
|
2020-07-20 17:03:52 -07:00
|
|
|
chain_monitor: chain_monitor.clone(),
|
2019-07-22 17:28:49 -04:00
|
|
|
tx_broadcaster: broadcast.clone(),
|
|
|
|
logger,
|
|
|
|
default_config: config,
|
2020-08-07 16:27:26 -04:00
|
|
|
channel_monitors: monitor_refs,
|
2019-07-22 17:28:49 -04:00
|
|
|
};
|
|
|
|
|
2020-11-21 12:09:40 -05:00
|
|
|
(<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
|
2019-07-22 17:28:49 -04:00
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
let mut channel_txn = Vec::new();
|
|
|
|
macro_rules! make_channel {
|
|
|
|
($source: expr, $dest: expr, $chan_id: expr) => { {
|
2020-11-17 15:24:20 -05:00
|
|
|
$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
|
2019-01-07 17:17:36 -05:00
|
|
|
let open_channel = {
|
|
|
|
let events = $source.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
|
|
|
|
msg.clone()
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
};
|
|
|
|
|
2020-04-15 17:16:45 -07:00
|
|
|
$dest.handle_open_channel(&$source.get_our_node_id(), InitFeatures::known(), &open_channel);
|
2019-01-07 17:17:36 -05:00
|
|
|
let accept_channel = {
|
|
|
|
let events = $dest.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
|
|
|
|
msg.clone()
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
};
|
|
|
|
|
2020-04-15 17:16:45 -07:00
|
|
|
$source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::known(), &accept_channel);
|
2020-02-11 18:34:29 -05:00
|
|
|
let funding_output;
|
2019-01-07 17:17:36 -05:00
|
|
|
{
|
|
|
|
let events = $source.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
|
|
|
|
let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
|
|
|
|
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
|
|
|
|
}]};
|
2020-05-12 13:17:49 -04:00
|
|
|
funding_output = OutPoint { txid: tx.txid(), index: 0 };
|
2019-01-07 17:17:36 -05:00
|
|
|
$source.funding_transaction_generated(&temporary_channel_id, funding_output);
|
|
|
|
channel_txn.push(tx);
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
}
|
|
|
|
|
|
|
|
let funding_created = {
|
|
|
|
let events = $source.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
|
|
|
|
msg.clone()
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
};
|
2019-11-05 18:51:05 -05:00
|
|
|
$dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
|
2019-01-07 17:17:36 -05:00
|
|
|
|
|
|
|
let funding_signed = {
|
|
|
|
let events = $dest.get_and_clear_pending_msg_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
|
|
|
|
msg.clone()
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
};
|
2019-11-05 18:51:05 -05:00
|
|
|
$source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
|
2019-01-07 17:17:36 -05:00
|
|
|
|
|
|
|
{
|
|
|
|
let events = $source.get_and_clear_pending_events();
|
|
|
|
assert_eq!(events.len(), 1);
|
|
|
|
if let events::Event::FundingBroadcastSafe { .. } = events[0] {
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
}
|
2020-02-11 18:34:29 -05:00
|
|
|
funding_output
|
2019-01-07 17:17:36 -05:00
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! confirm_txn {
|
|
|
|
($node: expr) => { {
|
|
|
|
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
2020-06-16 15:10:17 -07:00
|
|
|
let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
|
|
|
|
$node.block_connected(&header, &txdata, 1);
|
2019-01-07 17:17:36 -05:00
|
|
|
for i in 2..100 {
|
2020-08-25 17:12:00 -04:00
|
|
|
header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
2020-06-16 15:10:17 -07:00
|
|
|
$node.block_connected(&header, &[], i);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! lock_fundings {
|
|
|
|
($nodes: expr) => { {
|
|
|
|
let mut node_events = Vec::new();
|
|
|
|
for node in $nodes.iter() {
|
|
|
|
node_events.push(node.get_and_clear_pending_msg_events());
|
|
|
|
}
|
|
|
|
for (idx, node_event) in node_events.iter().enumerate() {
|
|
|
|
for event in node_event {
|
|
|
|
if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
|
|
|
|
for node in $nodes.iter() {
|
|
|
|
if node.get_our_node_id() == *node_id {
|
2019-11-05 18:51:05 -05:00
|
|
|
node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for node in $nodes.iter() {
|
|
|
|
let events = node.get_and_clear_pending_msg_events();
|
|
|
|
for event in events {
|
|
|
|
if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
|
|
|
|
} else { panic!("Wrong event type"); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
|
|
|
|
// forwarding.
|
2020-11-21 12:09:40 -05:00
|
|
|
let (node_a, mut monitor_a) = make_node!(0);
|
|
|
|
let (node_b, mut monitor_b) = make_node!(1);
|
|
|
|
let (node_c, mut monitor_c) = make_node!(2);
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2019-07-22 17:28:49 -04:00
|
|
|
let mut nodes = [node_a, node_b, node_c];
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2020-02-11 18:34:29 -05:00
|
|
|
let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
|
|
|
|
let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
|
2019-01-07 17:17:36 -05:00
|
|
|
|
|
|
|
for node in nodes.iter() {
|
|
|
|
confirm_txn!(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
lock_fundings!(nodes);
|
|
|
|
|
|
|
|
let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
|
|
|
|
let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
|
|
|
|
|
2020-11-21 12:09:40 -05:00
|
|
|
let mut payment_id: u8 = 0;
|
2019-01-07 17:17:36 -05:00
|
|
|
|
|
|
|
let mut chan_a_disconnected = false;
|
|
|
|
let mut chan_b_disconnected = false;
|
2019-07-23 15:39:11 -04:00
|
|
|
let mut ba_events = Vec::new();
|
|
|
|
let mut bc_events = Vec::new();
|
2019-01-07 17:17:36 -05:00
|
|
|
|
2019-07-22 17:28:49 -04:00
|
|
|
let mut node_a_ser = VecWriter(Vec::new());
|
|
|
|
nodes[0].write(&mut node_a_ser).unwrap();
|
|
|
|
let mut node_b_ser = VecWriter(Vec::new());
|
|
|
|
nodes[1].write(&mut node_b_ser).unwrap();
|
|
|
|
let mut node_c_ser = VecWriter(Vec::new());
|
|
|
|
nodes[2].write(&mut node_c_ser).unwrap();
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
macro_rules! test_return {
|
|
|
|
() => { {
|
|
|
|
assert_eq!(nodes[0].list_channels().len(), 1);
|
|
|
|
assert_eq!(nodes[1].list_channels().len(), 2);
|
|
|
|
assert_eq!(nodes[2].list_channels().len(), 1);
|
|
|
|
return;
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut read_pos = 0;
|
|
|
|
macro_rules! get_slice {
|
|
|
|
($len: expr) => {
|
|
|
|
{
|
|
|
|
let slice_len = $len as usize;
|
|
|
|
if data.len() < read_pos + slice_len {
|
|
|
|
test_return!();
|
|
|
|
}
|
|
|
|
read_pos += slice_len;
|
|
|
|
&data[read_pos - slice_len..read_pos]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
2020-01-10 17:24:33 -05:00
|
|
|
macro_rules! send_payment_with_secret {
|
|
|
|
($source: expr, $middle: expr, $dest: expr) => { {
|
|
|
|
let payment_hash = Sha256::hash(&[payment_id; 1]);
|
|
|
|
payment_id = payment_id.wrapping_add(1);
|
|
|
|
let payment_secret = Sha256::hash(&[payment_id; 1]);
|
|
|
|
payment_id = payment_id.wrapping_add(1);
|
2020-11-17 15:22:59 -05:00
|
|
|
if let Err(err) = $source.send_payment(&Route {
|
2020-01-10 17:24:33 -05:00
|
|
|
paths: vec![vec![RouteHop {
|
|
|
|
pubkey: $middle.0.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: $middle.1,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
2020-11-17 15:24:20 -05:00
|
|
|
fee_msat: 50_000,
|
2020-01-10 17:24:33 -05:00
|
|
|
cltv_expiry_delta: 100,
|
|
|
|
},RouteHop {
|
|
|
|
pubkey: $dest.0.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: $dest.1,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
2020-11-17 15:24:20 -05:00
|
|
|
fee_msat: 10_000_000,
|
2020-01-10 17:24:33 -05:00
|
|
|
cltv_expiry_delta: 200,
|
|
|
|
}],vec![RouteHop {
|
|
|
|
pubkey: $middle.0.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: $middle.1,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
2020-11-17 15:24:20 -05:00
|
|
|
fee_msat: 50_000,
|
2020-01-10 17:24:33 -05:00
|
|
|
cltv_expiry_delta: 100,
|
|
|
|
},RouteHop {
|
|
|
|
pubkey: $dest.0.get_our_node_id(),
|
|
|
|
node_features: NodeFeatures::empty(),
|
|
|
|
short_channel_id: $dest.1,
|
|
|
|
channel_features: ChannelFeatures::empty(),
|
2020-11-17 15:24:20 -05:00
|
|
|
fee_msat: 10_000_000,
|
2020-01-10 17:24:33 -05:00
|
|
|
cltv_expiry_delta: 200,
|
|
|
|
}]],
|
|
|
|
}, PaymentHash(payment_hash.into_inner()), &Some(PaymentSecret(payment_secret.into_inner()))) {
|
2020-11-17 15:22:59 -05:00
|
|
|
check_payment_err(err);
|
2020-01-10 17:24:33 -05:00
|
|
|
}
|
|
|
|
} }
|
|
|
|
}
|
2019-01-07 17:17:36 -05:00
|
|
|
|
|
|
|
macro_rules! process_msg_events {
|
|
|
|
($node: expr, $corrupt_forward: expr) => { {
|
2019-07-23 15:39:11 -04:00
|
|
|
let events = if $node == 1 {
|
|
|
|
let mut new_events = Vec::new();
|
|
|
|
mem::swap(&mut new_events, &mut ba_events);
|
|
|
|
new_events.extend_from_slice(&bc_events[..]);
|
|
|
|
bc_events.clear();
|
|
|
|
new_events
|
|
|
|
} else { Vec::new() };
|
2020-11-17 21:07:15 -05:00
|
|
|
let mut had_events = false;
|
2019-07-23 15:39:11 -04:00
|
|
|
for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) {
|
2020-11-17 21:07:15 -05:00
|
|
|
had_events = true;
|
2019-01-07 17:17:36 -05:00
|
|
|
match event {
|
|
|
|
events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
|
2019-07-23 15:39:11 -04:00
|
|
|
for dest in nodes.iter() {
|
|
|
|
if dest.get_our_node_id() == *node_id {
|
2019-01-07 17:17:36 -05:00
|
|
|
assert!(update_fee.is_none());
|
|
|
|
for update_add in update_add_htlcs {
|
|
|
|
if !$corrupt_forward {
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add);
|
2019-01-07 17:17:36 -05:00
|
|
|
} else {
|
|
|
|
// Corrupt the update_add_htlc message so that its HMAC
|
|
|
|
// check will fail and we generate a
|
|
|
|
// update_fail_malformed_htlc instead of an
|
|
|
|
// update_fail_htlc as we do when we reject a payment.
|
|
|
|
let mut msg_ser = update_add.encode();
|
|
|
|
msg_ser[1000] ^= 0xff;
|
|
|
|
let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for update_fulfill in update_fulfill_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
for update_fail in update_fail_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
for update_fail_malformed in update_fail_malformed_htlcs {
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
|
2019-07-23 15:39:11 -04:00
|
|
|
for dest in nodes.iter() {
|
|
|
|
if dest.get_our_node_id() == *node_id {
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
|
2019-07-23 15:39:11 -04:00
|
|
|
for dest in nodes.iter() {
|
2019-01-07 17:17:36 -05:00
|
|
|
if dest.get_our_node_id() == *node_id {
|
2019-11-05 18:51:05 -05:00
|
|
|
dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::SendFundingLocked { .. } => {
|
|
|
|
// Can be generated as a reestablish response
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
|
|
|
|
// Can be generated due to a payment forward being rejected due to a
|
|
|
|
// channel having previously failed a monitor update
|
|
|
|
},
|
|
|
|
_ => panic!("Unhandled message event"),
|
|
|
|
}
|
|
|
|
}
|
2020-11-17 21:07:15 -05:00
|
|
|
had_events
|
2019-01-07 17:17:36 -05:00
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
2019-07-23 15:39:11 -04:00
|
|
|
macro_rules! drain_msg_events_on_disconnect {
|
|
|
|
($counterparty_id: expr) => { {
|
|
|
|
if $counterparty_id == 0 {
|
|
|
|
for event in nodes[0].get_and_clear_pending_msg_events() {
|
|
|
|
match event {
|
|
|
|
events::MessageSendEvent::UpdateHTLCs { .. } => {},
|
|
|
|
events::MessageSendEvent::SendRevokeAndACK { .. } => {},
|
|
|
|
events::MessageSendEvent::SendChannelReestablish { .. } => {},
|
|
|
|
events::MessageSendEvent::SendFundingLocked { .. } => {},
|
|
|
|
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
|
2019-11-05 18:51:05 -05:00
|
|
|
events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
|
2019-07-23 15:39:11 -04:00
|
|
|
_ => panic!("Unhandled message event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ba_events.clear();
|
|
|
|
} else {
|
|
|
|
for event in nodes[2].get_and_clear_pending_msg_events() {
|
|
|
|
match event {
|
|
|
|
events::MessageSendEvent::UpdateHTLCs { .. } => {},
|
|
|
|
events::MessageSendEvent::SendRevokeAndACK { .. } => {},
|
|
|
|
events::MessageSendEvent::SendChannelReestablish { .. } => {},
|
|
|
|
events::MessageSendEvent::SendFundingLocked { .. } => {},
|
|
|
|
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
|
2019-11-05 18:51:05 -05:00
|
|
|
events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {},
|
2019-07-23 15:39:11 -04:00
|
|
|
_ => panic!("Unhandled message event"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bc_events.clear();
|
|
|
|
}
|
|
|
|
let mut events = nodes[1].get_and_clear_pending_msg_events();
|
|
|
|
let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
|
|
|
|
let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
|
|
|
|
for event in events.drain(..) {
|
|
|
|
let push = match event {
|
|
|
|
events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
|
|
|
|
if *node_id != drop_node_id { true } else { false }
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
|
|
|
|
if *node_id != drop_node_id { true } else { false }
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
|
|
|
|
if *node_id != drop_node_id { true } else { false }
|
|
|
|
},
|
|
|
|
events::MessageSendEvent::SendFundingLocked { .. } => false,
|
|
|
|
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
|
2019-11-05 18:51:05 -05:00
|
|
|
events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false,
|
2019-07-23 15:39:11 -04:00
|
|
|
_ => panic!("Unhandled message event"),
|
|
|
|
};
|
|
|
|
if push { msg_sink.push(event); }
|
|
|
|
}
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
macro_rules! process_events {
|
|
|
|
($node: expr, $fail: expr) => { {
|
2019-03-07 13:09:59 -05:00
|
|
|
// In case we get 256 payments we may have a hash collision, resulting in the
|
|
|
|
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
|
|
|
|
// deduplicate the calls here.
|
|
|
|
let mut claim_set = HashSet::new();
|
|
|
|
let mut events = nodes[$node].get_and_clear_pending_events();
|
|
|
|
// Sort events so that PendingHTLCsForwardable get processed last. This avoids a
|
|
|
|
// case where we first process a PendingHTLCsForwardable, then claim/fail on a
|
|
|
|
// PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
|
|
|
|
// PaymentReceived event for the second HTLC in our pending_events (and breaking
|
|
|
|
// our claim_set deduplication).
|
|
|
|
events.sort_by(|a, b| {
|
|
|
|
if let events::Event::PaymentReceived { .. } = a {
|
|
|
|
if let events::Event::PendingHTLCsForwardable { .. } = b {
|
|
|
|
Ordering::Less
|
|
|
|
} else { Ordering::Equal }
|
|
|
|
} else if let events::Event::PendingHTLCsForwardable { .. } = a {
|
|
|
|
if let events::Event::PaymentReceived { .. } = b {
|
|
|
|
Ordering::Greater
|
|
|
|
} else { Ordering::Equal }
|
|
|
|
} else { Ordering::Equal }
|
|
|
|
});
|
2020-11-17 21:07:15 -05:00
|
|
|
let had_events = !events.is_empty();
|
2019-03-07 13:09:59 -05:00
|
|
|
for event in events.drain(..) {
|
2019-01-07 17:17:36 -05:00
|
|
|
match event {
|
2020-05-06 19:18:51 -04:00
|
|
|
events::Event::PaymentReceived { payment_hash, payment_secret, amt } => {
|
2019-03-07 13:09:59 -05:00
|
|
|
if claim_set.insert(payment_hash.0) {
|
|
|
|
if $fail {
|
2020-01-10 17:24:33 -05:00
|
|
|
assert!(nodes[$node].fail_htlc_backwards(&payment_hash, &payment_secret));
|
2019-03-07 13:09:59 -05:00
|
|
|
} else {
|
2020-05-06 19:18:51 -04:00
|
|
|
assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0), &payment_secret, amt));
|
2019-03-07 13:09:59 -05:00
|
|
|
}
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
},
|
|
|
|
events::Event::PaymentSent { .. } => {},
|
|
|
|
events::Event::PaymentFailed { .. } => {},
|
|
|
|
events::Event::PendingHTLCsForwardable { .. } => {
|
|
|
|
nodes[$node].process_pending_htlc_forwards();
|
|
|
|
},
|
|
|
|
_ => panic!("Unhandled event"),
|
|
|
|
}
|
|
|
|
}
|
2020-11-17 21:07:15 -05:00
|
|
|
had_events
|
2019-01-07 17:17:36 -05:00
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
|
|
|
match get_slice!(1)[0] {
|
2020-11-17 15:24:20 -05:00
|
|
|
// In general, we keep related message groups close together in binary form, allowing
|
|
|
|
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
|
|
|
|
// harm in doing so.
|
|
|
|
|
2019-01-07 17:17:36 -05:00
|
|
|
0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
|
|
|
|
0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
|
|
|
|
0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
|
2020-11-17 15:24:20 -05:00
|
|
|
0x04 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
|
|
|
|
0x05 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
|
|
|
|
0x06 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
|
|
|
|
|
|
|
|
0x08 => {
|
2020-02-11 18:34:29 -05:00
|
|
|
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
|
|
|
|
nodes[0].channel_monitor_updated(&chan_1_funding, *id);
|
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x09 => {
|
2020-02-11 18:34:29 -05:00
|
|
|
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
|
|
|
|
nodes[1].channel_monitor_updated(&chan_1_funding, *id);
|
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x0a => {
|
2020-02-11 18:34:29 -05:00
|
|
|
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
|
|
|
|
nodes[1].channel_monitor_updated(&chan_2_funding, *id);
|
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x0b => {
|
2020-02-11 18:34:29 -05:00
|
|
|
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
|
|
|
|
nodes[2].channel_monitor_updated(&chan_2_funding, *id);
|
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
|
|
|
|
0x0c => {
|
2019-01-07 17:17:36 -05:00
|
|
|
if !chan_a_disconnected {
|
|
|
|
nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
|
|
|
|
nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
|
|
|
|
chan_a_disconnected = true;
|
2019-07-23 15:39:11 -04:00
|
|
|
drain_msg_events_on_disconnect!(0);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x0d => {
|
2019-01-07 17:17:36 -05:00
|
|
|
if !chan_b_disconnected {
|
|
|
|
nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
|
|
|
|
nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
|
|
|
|
chan_b_disconnected = true;
|
2019-07-23 15:39:11 -04:00
|
|
|
drain_msg_events_on_disconnect!(2);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x0e => {
|
2019-07-23 15:39:11 -04:00
|
|
|
if chan_a_disconnected {
|
2019-12-27 22:50:42 -05:00
|
|
|
nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
|
|
|
nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
2019-07-23 15:39:11 -04:00
|
|
|
chan_a_disconnected = false;
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x0f => {
|
2019-07-23 15:39:11 -04:00
|
|
|
if chan_b_disconnected {
|
2019-12-27 22:50:42 -05:00
|
|
|
nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
|
|
|
nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
2019-07-23 15:39:11 -04:00
|
|
|
chan_b_disconnected = false;
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
|
2020-11-17 21:07:15 -05:00
|
|
|
0x10 => { process_msg_events!(0, true); },
|
|
|
|
0x11 => { process_msg_events!(0, false); },
|
|
|
|
0x12 => { process_events!(0, true); },
|
|
|
|
0x13 => { process_events!(0, false); },
|
|
|
|
0x14 => { process_msg_events!(1, true); },
|
|
|
|
0x15 => { process_msg_events!(1, false); },
|
|
|
|
0x16 => { process_events!(1, true); },
|
|
|
|
0x17 => { process_events!(1, false); },
|
|
|
|
0x18 => { process_msg_events!(2, true); },
|
|
|
|
0x19 => { process_msg_events!(2, false); },
|
|
|
|
0x1a => { process_events!(2, true); },
|
|
|
|
0x1b => { process_events!(2, false); },
|
2020-11-17 15:24:20 -05:00
|
|
|
|
|
|
|
0x1c => {
|
2019-07-22 17:28:49 -04:00
|
|
|
if !chan_a_disconnected {
|
|
|
|
nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
|
|
|
|
chan_a_disconnected = true;
|
|
|
|
drain_msg_events_on_disconnect!(0);
|
|
|
|
}
|
|
|
|
let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a);
|
2020-11-21 12:09:40 -05:00
|
|
|
nodes[0] = new_node_a;
|
2019-07-22 17:28:49 -04:00
|
|
|
monitor_a = new_monitor_a;
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x1d => {
|
2019-07-22 17:28:49 -04:00
|
|
|
if !chan_a_disconnected {
|
|
|
|
nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
|
|
|
|
chan_a_disconnected = true;
|
|
|
|
nodes[0].get_and_clear_pending_msg_events();
|
|
|
|
ba_events.clear();
|
|
|
|
}
|
|
|
|
if !chan_b_disconnected {
|
|
|
|
nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
|
|
|
|
chan_b_disconnected = true;
|
|
|
|
nodes[2].get_and_clear_pending_msg_events();
|
|
|
|
bc_events.clear();
|
|
|
|
}
|
|
|
|
let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b);
|
2020-11-21 12:09:40 -05:00
|
|
|
nodes[1] = new_node_b;
|
2019-07-22 17:28:49 -04:00
|
|
|
monitor_b = new_monitor_b;
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
0x1e => {
|
2019-07-22 17:28:49 -04:00
|
|
|
if !chan_b_disconnected {
|
|
|
|
nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
|
|
|
|
chan_b_disconnected = true;
|
|
|
|
drain_msg_events_on_disconnect!(2);
|
|
|
|
}
|
|
|
|
let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c);
|
2020-11-21 12:09:40 -05:00
|
|
|
nodes[2] = new_node_c;
|
2019-07-22 17:28:49 -04:00
|
|
|
monitor_c = new_monitor_c;
|
|
|
|
},
|
2020-11-17 15:24:20 -05:00
|
|
|
|
|
|
|
// 1/10th the channel size:
|
2020-11-21 12:09:40 -05:00
|
|
|
0x20 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id); },
|
|
|
|
0x21 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id); },
|
|
|
|
0x22 => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id); },
|
|
|
|
0x23 => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id); },
|
|
|
|
0x24 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut payment_id); },
|
|
|
|
0x25 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut payment_id); },
|
|
|
|
|
|
|
|
0x26 => { send_payment_with_secret!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)); },
|
|
|
|
0x27 => { send_payment_with_secret!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)); },
|
|
|
|
|
|
|
|
0x28 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut payment_id); },
|
|
|
|
0x29 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut payment_id); },
|
|
|
|
0x2a => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut payment_id); },
|
|
|
|
0x2b => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut payment_id); },
|
|
|
|
0x2c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut payment_id); },
|
|
|
|
0x2d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut payment_id); },
|
|
|
|
|
|
|
|
0x30 => { send_payment(&nodes[0], &nodes[1], chan_a, 100_000, &mut payment_id); },
|
|
|
|
0x31 => { send_payment(&nodes[1], &nodes[0], chan_a, 100_000, &mut payment_id); },
|
|
|
|
0x32 => { send_payment(&nodes[1], &nodes[2], chan_b, 100_000, &mut payment_id); },
|
|
|
|
0x33 => { send_payment(&nodes[2], &nodes[1], chan_b, 100_000, &mut payment_id); },
|
|
|
|
0x34 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut payment_id); },
|
|
|
|
0x35 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut payment_id); },
|
|
|
|
|
|
|
|
0x38 => { send_payment(&nodes[0], &nodes[1], chan_a, 10_000, &mut payment_id); },
|
|
|
|
0x39 => { send_payment(&nodes[1], &nodes[0], chan_a, 10_000, &mut payment_id); },
|
|
|
|
0x3a => { send_payment(&nodes[1], &nodes[2], chan_b, 10_000, &mut payment_id); },
|
|
|
|
0x3b => { send_payment(&nodes[2], &nodes[1], chan_b, 10_000, &mut payment_id); },
|
|
|
|
0x3c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut payment_id); },
|
|
|
|
0x3d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut payment_id); },
|
|
|
|
|
|
|
|
0x40 => { send_payment(&nodes[0], &nodes[1], chan_a, 1_000, &mut payment_id); },
|
|
|
|
0x41 => { send_payment(&nodes[1], &nodes[0], chan_a, 1_000, &mut payment_id); },
|
|
|
|
0x42 => { send_payment(&nodes[1], &nodes[2], chan_b, 1_000, &mut payment_id); },
|
|
|
|
0x43 => { send_payment(&nodes[2], &nodes[1], chan_b, 1_000, &mut payment_id); },
|
|
|
|
0x44 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut payment_id); },
|
|
|
|
0x45 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut payment_id); },
|
|
|
|
|
|
|
|
0x48 => { send_payment(&nodes[0], &nodes[1], chan_a, 100, &mut payment_id); },
|
|
|
|
0x49 => { send_payment(&nodes[1], &nodes[0], chan_a, 100, &mut payment_id); },
|
|
|
|
0x4a => { send_payment(&nodes[1], &nodes[2], chan_b, 100, &mut payment_id); },
|
|
|
|
0x4b => { send_payment(&nodes[2], &nodes[1], chan_b, 100, &mut payment_id); },
|
|
|
|
0x4c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut payment_id); },
|
|
|
|
0x4d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut payment_id); },
|
|
|
|
|
|
|
|
0x50 => { send_payment(&nodes[0], &nodes[1], chan_a, 10, &mut payment_id); },
|
|
|
|
0x51 => { send_payment(&nodes[1], &nodes[0], chan_a, 10, &mut payment_id); },
|
|
|
|
0x52 => { send_payment(&nodes[1], &nodes[2], chan_b, 10, &mut payment_id); },
|
|
|
|
0x53 => { send_payment(&nodes[2], &nodes[1], chan_b, 10, &mut payment_id); },
|
|
|
|
0x54 => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut payment_id); },
|
|
|
|
0x55 => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut payment_id); },
|
|
|
|
|
|
|
|
0x58 => { send_payment(&nodes[0], &nodes[1], chan_a, 1, &mut payment_id); },
|
|
|
|
0x59 => { send_payment(&nodes[1], &nodes[0], chan_a, 1, &mut payment_id); },
|
|
|
|
0x5a => { send_payment(&nodes[1], &nodes[2], chan_b, 1, &mut payment_id); },
|
|
|
|
0x5b => { send_payment(&nodes[2], &nodes[1], chan_b, 1, &mut payment_id); },
|
|
|
|
0x5c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); },
|
|
|
|
0x5d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); },
|
2020-11-17 15:24:20 -05:00
|
|
|
|
2020-11-17 21:07:15 -05:00
|
|
|
0xff => {
|
|
|
|
// Test that no channel is in a stuck state where neither party can send funds even
|
|
|
|
// after we resolve all pending events.
|
|
|
|
// First make sure there are no pending monitor updates, resetting the error state
|
|
|
|
// and calling channel_monitor_updated for each monitor.
|
|
|
|
*monitor_a.update_ret.lock().unwrap() = Ok(());
|
|
|
|
*monitor_b.update_ret.lock().unwrap() = Ok(());
|
|
|
|
*monitor_c.update_ret.lock().unwrap() = Ok(());
|
|
|
|
|
|
|
|
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
|
|
|
|
nodes[0].channel_monitor_updated(&chan_1_funding, *id);
|
|
|
|
}
|
|
|
|
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
|
|
|
|
nodes[1].channel_monitor_updated(&chan_1_funding, *id);
|
|
|
|
}
|
|
|
|
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
|
|
|
|
nodes[1].channel_monitor_updated(&chan_2_funding, *id);
|
|
|
|
}
|
|
|
|
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
|
|
|
|
nodes[2].channel_monitor_updated(&chan_2_funding, *id);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, make sure peers are all connected to each other
|
|
|
|
if chan_a_disconnected {
|
|
|
|
nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
|
|
|
nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
|
|
|
chan_a_disconnected = false;
|
|
|
|
}
|
|
|
|
if chan_b_disconnected {
|
|
|
|
nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
|
|
|
nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::empty() });
|
|
|
|
chan_b_disconnected = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for i in 0..std::usize::MAX {
|
|
|
|
if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); }
|
|
|
|
// Then, make sure any current forwards make their way to their destination
|
|
|
|
if process_msg_events!(0, false) { continue; }
|
|
|
|
if process_msg_events!(1, false) { continue; }
|
|
|
|
if process_msg_events!(2, false) { continue; }
|
|
|
|
// ...making sure any pending PendingHTLCsForwardable events are handled and
|
|
|
|
// payments claimed.
|
|
|
|
if process_events!(0, false) { continue; }
|
|
|
|
if process_events!(1, false) { continue; }
|
|
|
|
if process_events!(2, false) { continue; }
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, make sure that at least one end of each channel can make a substantial payment.
|
|
|
|
assert!(
|
|
|
|
send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id) ||
|
|
|
|
send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id));
|
|
|
|
assert!(
|
|
|
|
send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) ||
|
|
|
|
send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id));
|
|
|
|
},
|
2019-01-07 17:17:36 -05:00
|
|
|
_ => test_return!(),
|
|
|
|
}
|
2019-07-22 17:28:49 -04:00
|
|
|
|
2020-02-11 18:34:29 -05:00
|
|
|
node_a_ser.0.clear();
|
|
|
|
nodes[0].write(&mut node_a_ser).unwrap();
|
|
|
|
monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
|
|
|
|
node_b_ser.0.clear();
|
|
|
|
nodes[1].write(&mut node_b_ser).unwrap();
|
|
|
|
monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
|
|
|
|
node_c_ser.0.clear();
|
|
|
|
nodes[2].write(&mut node_c_ser).unwrap();
|
|
|
|
monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:11:40 -05:00
|
|
|
pub fn chanmon_consistency_test<Out: test_logger::Output>(data: &[u8], out: Out) {
|
|
|
|
do_test(data, out);
|
|
|
|
}
|
|
|
|
|
2019-12-11 13:18:43 -05:00
|
|
|
#[no_mangle]
|
|
|
|
pub extern "C" fn chanmon_consistency_run(data: *const u8, datalen: usize) {
|
2020-02-20 20:11:40 -05:00
|
|
|
do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{});
|
2019-01-07 17:17:36 -05:00
|
|
|
}
|