2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2019-01-07 17:17:36 -05:00
//! Test that monitor update failures don't get our channel state out of sync.
//! One of the biggest concern with the monitor update failure handling code is that messages
//! resent after monitor updating is restored are delivered out-of-order, resulting in
//! commitment_signed messages having "invalid signatures".
//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
//! actions such as sending payments, handling events, or changing monitor update return values on
//! a per-node basis. This should allow it to find any cases where the ordering of actions results
//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
//! send-side handling is correct, other peers. We consider it a failure if any action results in a
//! channel being force-closed.
use bitcoin ::blockdata ::block ::BlockHeader ;
2021-03-03 11:24:55 -08:00
use bitcoin ::blockdata ::constants ::genesis_block ;
2019-01-07 17:17:36 -05:00
use bitcoin ::blockdata ::transaction ::{ Transaction , TxOut } ;
use bitcoin ::blockdata ::script ::{ Builder , Script } ;
use bitcoin ::blockdata ::opcodes ;
use bitcoin ::network ::constants ::Network ;
2020-04-27 16:41:54 +02:00
use bitcoin ::hashes ::Hash as TraitImport ;
use bitcoin ::hashes ::sha256 ::Hash as Sha256 ;
2020-04-27 18:13:27 +02:00
use bitcoin ::hash_types ::{ BlockHash , WPubkeyHash } ;
2019-01-07 17:17:36 -05:00
2020-07-20 17:03:52 -07:00
use lightning ::chain ;
2021-07-03 01:58:30 +00:00
use lightning ::chain ::{ BestBlock , chainmonitor , channelmonitor , Confirm , Watch } ;
2020-08-07 10:58:15 -07:00
use lightning ::chain ::channelmonitor ::{ ChannelMonitor , ChannelMonitorUpdateErr , MonitorEvent } ;
2019-01-07 17:17:36 -05:00
use lightning ::chain ::transaction ::OutPoint ;
2020-07-29 13:02:29 -07:00
use lightning ::chain ::chaininterface ::{ BroadcasterInterface , ConfirmationTarget , FeeEstimator } ;
2021-02-16 16:30:08 -05:00
use lightning ::chain ::keysinterface ::{ KeysInterface , InMemorySigner } ;
2021-04-28 17:28:10 -04:00
use lightning ::ln ::{ PaymentHash , PaymentPreimage , PaymentSecret } ;
2021-07-03 01:58:30 +00:00
use lightning ::ln ::channelmanager ::{ ChainParameters , ChannelManager , PaymentSendFailure , ChannelManagerReadArgs } ;
2019-12-28 01:10:14 -05:00
use lightning ::ln ::features ::{ ChannelFeatures , InitFeatures , NodeFeatures } ;
2020-11-23 19:12:19 -05:00
use lightning ::ln ::msgs ::{ CommitmentUpdate , ChannelMessageHandler , DecodeError , UpdateAddHTLC , Init } ;
2021-02-16 16:30:08 -05:00
use lightning ::util ::enforcing_trait_impls ::{ EnforcingSigner , INITIAL_REVOKED_COMMITMENT_NUMBER } ;
2020-11-17 15:22:59 -05:00
use lightning ::util ::errors ::APIError ;
2019-07-18 22:21:00 -04:00
use lightning ::util ::events ;
2019-01-07 17:17:36 -05:00
use lightning ::util ::logger ::Logger ;
use lightning ::util ::config ::UserConfig ;
2021-05-12 00:34:30 -07:00
use lightning ::util ::events ::MessageSendEventsProvider ;
2019-07-22 17:28:49 -04:00
use lightning ::util ::ser ::{ Readable , ReadableArgs , Writeable , Writer } ;
2020-05-02 09:37:38 -04:00
use lightning ::routing ::router ::{ Route , RouteHop } ;
2019-01-07 17:17:36 -05:00
use utils ::test_logger ;
2020-09-14 20:50:04 -04:00
use utils ::test_persister ::TestPersister ;
2019-01-07 17:17:36 -05:00
2020-04-27 16:51:59 +02:00
use bitcoin ::secp256k1 ::key ::{ PublicKey , SecretKey } ;
2021-04-29 12:19:05 -04:00
use bitcoin ::secp256k1 ::recovery ::RecoverableSignature ;
2020-04-27 16:51:59 +02:00
use bitcoin ::secp256k1 ::Secp256k1 ;
2019-01-07 17:17:36 -05:00
2019-07-23 15:39:11 -04:00
use std ::mem ;
2019-03-07 13:09:59 -05:00
use std ::cmp ::Ordering ;
2019-07-25 11:07:45 -04:00
use std ::collections ::{ HashSet , hash_map , HashMap } ;
2019-01-07 17:17:36 -05:00
use std ::sync ::{ Arc , Mutex } ;
2019-07-18 22:17:36 -04:00
use std ::sync ::atomic ;
2019-01-07 17:17:36 -05:00
use std ::io ::Cursor ;
struct FuzzEstimator { }
impl FeeEstimator for FuzzEstimator {
2020-06-15 17:28:01 -04:00
fn get_est_sat_per_1000_weight ( & self , _ : ConfirmationTarget ) -> u32 {
2019-01-07 17:17:36 -05:00
253
}
}
pub struct TestBroadcaster { }
impl BroadcasterInterface for TestBroadcaster {
fn broadcast_transaction ( & self , _tx : & Transaction ) { }
}
2019-07-22 17:28:49 -04:00
pub struct VecWriter ( pub Vec < u8 > ) ;
impl Writer for VecWriter {
fn write_all ( & mut self , buf : & [ u8 ] ) -> Result < ( ) , ::std ::io ::Error > {
self . 0. extend_from_slice ( buf ) ;
Ok ( ( ) )
}
fn size_hint ( & mut self , size : usize ) {
self . 0. reserve_exact ( size ) ;
}
}
2020-07-20 22:12:14 -07:00
struct TestChainMonitor {
2020-02-11 18:34:29 -05:00
pub logger : Arc < dyn Logger > ,
2021-02-09 15:22:44 -05:00
pub keys : Arc < KeyProvider > ,
2021-02-16 16:30:08 -05:00
pub chain_monitor : Arc < chainmonitor ::ChainMonitor < EnforcingSigner , Arc < dyn chain ::Filter > , Arc < TestBroadcaster > , Arc < FuzzEstimator > , Arc < dyn Logger > , Arc < TestPersister > > > ,
2019-01-07 17:17:36 -05:00
pub update_ret : Mutex < Result < ( ) , channelmonitor ::ChannelMonitorUpdateErr > > ,
2020-02-11 18:34:29 -05:00
// If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
// logic will automatically force-close our channels for us (as we don't have an up-to-date
// monitor implying we are not able to punish misbehaving counterparties). Because this test
// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
// fully-serialized monitor state here, as well as the corresponding update_id.
pub latest_monitors : Mutex < HashMap < OutPoint , ( u64 , Vec < u8 > ) > > ,
2019-07-22 17:28:49 -04:00
pub should_update_manager : atomic ::AtomicBool ,
2019-01-07 17:17:36 -05:00
}
2020-07-20 22:12:14 -07:00
impl TestChainMonitor {
2021-02-09 15:22:44 -05:00
pub fn new ( broadcaster : Arc < TestBroadcaster > , logger : Arc < dyn Logger > , feeest : Arc < FuzzEstimator > , persister : Arc < TestPersister > , keys : Arc < KeyProvider > ) -> Self {
2019-01-07 17:17:36 -05:00
Self {
2020-09-14 20:50:04 -04:00
chain_monitor : Arc ::new ( chainmonitor ::ChainMonitor ::new ( None , broadcaster , logger . clone ( ) , feeest , persister ) ) ,
2020-02-11 18:34:29 -05:00
logger ,
2021-02-09 15:22:44 -05:00
keys ,
2019-01-07 17:17:36 -05:00
update_ret : Mutex ::new ( Ok ( ( ) ) ) ,
2020-02-11 18:34:29 -05:00
latest_monitors : Mutex ::new ( HashMap ::new ( ) ) ,
2019-07-22 17:28:49 -04:00
should_update_manager : atomic ::AtomicBool ::new ( false ) ,
2019-01-07 17:17:36 -05:00
}
}
}
2021-02-18 16:20:43 -05:00
impl chain ::Watch < EnforcingSigner > for TestChainMonitor {
2021-02-16 16:30:08 -05:00
fn watch_channel ( & self , funding_txo : OutPoint , monitor : channelmonitor ::ChannelMonitor < EnforcingSigner > ) -> Result < ( ) , channelmonitor ::ChannelMonitorUpdateErr > {
2020-02-11 18:34:29 -05:00
let mut ser = VecWriter ( Vec ::new ( ) ) ;
2020-11-25 15:03:19 -05:00
monitor . write ( & mut ser ) . unwrap ( ) ;
2020-02-11 18:34:29 -05:00
if let Some ( _ ) = self . latest_monitors . lock ( ) . unwrap ( ) . insert ( funding_txo , ( monitor . get_latest_update_id ( ) , ser . 0 ) ) {
2020-07-20 17:03:52 -07:00
panic! ( " Already had monitor pre-watch_channel " ) ;
2019-07-22 17:28:49 -04:00
}
2020-02-11 18:34:29 -05:00
self . should_update_manager . store ( true , atomic ::Ordering ::Relaxed ) ;
2020-07-20 22:12:14 -07:00
assert! ( self . chain_monitor . watch_channel ( funding_txo , monitor ) . is_ok ( ) ) ;
2020-02-11 18:34:29 -05:00
self . update_ret . lock ( ) . unwrap ( ) . clone ( )
2019-01-07 17:17:36 -05:00
}
2020-07-20 17:03:52 -07:00
fn update_channel ( & self , funding_txo : OutPoint , update : channelmonitor ::ChannelMonitorUpdate ) -> Result < ( ) , channelmonitor ::ChannelMonitorUpdateErr > {
2020-02-11 18:34:29 -05:00
let mut map_lock = self . latest_monitors . lock ( ) . unwrap ( ) ;
let mut map_entry = match map_lock . entry ( funding_txo ) {
hash_map ::Entry ::Occupied ( entry ) = > entry ,
hash_map ::Entry ::Vacant ( _ ) = > panic! ( " Didn't have monitor on update call " ) ,
} ;
2021-03-05 13:28:20 -08:00
let deserialized_monitor = < ( BlockHash , channelmonitor ::ChannelMonitor < EnforcingSigner > ) > ::
2021-02-09 15:22:44 -05:00
read ( & mut Cursor ::new ( & map_entry . get ( ) . 1 ) , & * self . keys ) . unwrap ( ) . 1 ;
2020-10-26 15:25:40 -04:00
deserialized_monitor . update_monitor ( & update , & & TestBroadcaster { } , & & FuzzEstimator { } , & self . logger ) . unwrap ( ) ;
2020-02-11 18:34:29 -05:00
let mut ser = VecWriter ( Vec ::new ( ) ) ;
2020-11-25 15:03:19 -05:00
deserialized_monitor . write ( & mut ser ) . unwrap ( ) ;
2020-02-11 18:34:29 -05:00
map_entry . insert ( ( update . update_id , ser . 0 ) ) ;
self . should_update_manager . store ( true , atomic ::Ordering ::Relaxed ) ;
2021-02-09 15:22:44 -05:00
assert! ( self . chain_monitor . update_channel ( funding_txo , update ) . is_ok ( ) ) ;
2020-02-11 18:34:29 -05:00
self . update_ret . lock ( ) . unwrap ( ) . clone ( )
2020-02-05 19:39:31 -05:00
}
2020-07-20 17:03:52 -07:00
fn release_pending_monitor_events ( & self ) -> Vec < MonitorEvent > {
2020-07-20 22:12:14 -07:00
return self . chain_monitor . release_pending_monitor_events ( ) ;
2019-01-07 17:17:36 -05:00
}
}
struct KeyProvider {
node_id : u8 ,
2021-05-20 16:38:18 +00:00
rand_bytes_id : atomic ::AtomicU32 ,
2020-12-05 18:56:27 +01:00
revoked_commitments : Mutex < HashMap < [ u8 ; 32 ] , Arc < Mutex < u64 > > > > ,
2019-01-07 17:17:36 -05:00
}
impl KeysInterface for KeyProvider {
2021-02-16 16:30:08 -05:00
type Signer = EnforcingSigner ;
2019-11-26 16:46:33 -05:00
2019-01-07 17:17:36 -05:00
fn get_node_secret ( & self ) -> SecretKey {
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , self . node_id ] ) . unwrap ( )
}
fn get_destination_script ( & self ) -> Script {
let secp_ctx = Secp256k1 ::signing_only ( ) ;
let channel_monitor_claim_key = SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 2 , self . node_id ] ) . unwrap ( ) ;
2020-04-27 18:13:27 +02:00
let our_channel_monitor_claim_key_hash = WPubkeyHash ::hash ( & PublicKey ::from_secret_key ( & secp_ctx , & channel_monitor_claim_key ) . serialize ( ) ) ;
2019-01-07 17:17:36 -05:00
Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 ) . push_slice ( & our_channel_monitor_claim_key_hash [ .. ] ) . into_script ( )
}
fn get_shutdown_pubkey ( & self ) -> PublicKey {
let secp_ctx = Secp256k1 ::signing_only ( ) ;
PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , self . node_id ] ) . unwrap ( ) )
}
2021-02-16 16:30:08 -05:00
fn get_channel_signer ( & self , _inbound : bool , channel_value_satoshis : u64 ) -> EnforcingSigner {
2020-02-04 09:15:59 -08:00
let secp_ctx = Secp256k1 ::signing_only ( ) ;
2020-12-05 18:56:27 +01:00
let id = self . rand_bytes_id . fetch_add ( 1 , atomic ::Ordering ::Relaxed ) ;
2021-02-16 16:30:08 -05:00
let keys = InMemorySigner ::new (
2020-02-04 09:15:59 -08:00
& secp_ctx ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 4 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 5 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 6 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 7 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , self . node_id ] ) . unwrap ( ) ,
2021-05-20 16:38:18 +00:00
[ id as u8 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 9 , self . node_id ] ,
2020-01-23 13:33:31 -08:00
channel_value_satoshis ,
2021-02-06 13:11:23 -05:00
[ 0 ; 32 ] ,
2020-12-05 18:56:27 +01:00
) ;
let revoked_commitment = self . make_revoked_commitment_cell ( keys . commitment_seed ) ;
2021-02-16 16:30:08 -05:00
EnforcingSigner ::new_with_revoked ( keys , revoked_commitment , false )
2019-01-07 17:17:36 -05:00
}
2020-08-23 17:06:33 -04:00
fn get_secure_random_bytes ( & self ) -> [ u8 ; 32 ] {
let id = self . rand_bytes_id . fetch_add ( 1 , atomic ::Ordering ::Relaxed ) ;
2021-05-20 16:38:18 +00:00
let mut res = [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 11 , self . node_id ] ;
res [ 30 - 4 .. 30 ] . copy_from_slice ( & id . to_le_bytes ( ) ) ;
res
2019-01-07 17:17:36 -05:00
}
2020-11-25 12:23:47 -05:00
2021-02-16 16:30:08 -05:00
fn read_chan_signer ( & self , buffer : & [ u8 ] ) -> Result < Self ::Signer , DecodeError > {
2020-12-05 18:56:27 +01:00
let mut reader = std ::io ::Cursor ::new ( buffer ) ;
2021-02-16 16:30:08 -05:00
let inner : InMemorySigner = Readable ::read ( & mut reader ) ? ;
2020-12-05 18:56:27 +01:00
let revoked_commitment = self . make_revoked_commitment_cell ( inner . commitment_seed ) ;
let last_commitment_number = Readable ::read ( & mut reader ) ? ;
2021-02-16 16:30:08 -05:00
Ok ( EnforcingSigner {
2020-12-05 18:56:27 +01:00
inner ,
last_commitment_number : Arc ::new ( Mutex ::new ( last_commitment_number ) ) ,
revoked_commitment ,
2021-01-13 17:36:07 -08:00
disable_revocation_policy_check : false ,
2020-12-05 18:56:27 +01:00
} )
}
2021-04-29 12:19:05 -04:00
fn sign_invoice ( & self , _invoice_preimage : Vec < u8 > ) -> Result < RecoverableSignature , ( ) > {
unreachable! ( )
}
2020-12-05 18:56:27 +01:00
}
impl KeyProvider {
fn make_revoked_commitment_cell ( & self , commitment_seed : [ u8 ; 32 ] ) -> Arc < Mutex < u64 > > {
let mut revoked_commitments = self . revoked_commitments . lock ( ) . unwrap ( ) ;
if ! revoked_commitments . contains_key ( & commitment_seed ) {
revoked_commitments . insert ( commitment_seed , Arc ::new ( Mutex ::new ( INITIAL_REVOKED_COMMITMENT_NUMBER ) ) ) ;
}
let cell = revoked_commitments . get ( & commitment_seed ) . unwrap ( ) ;
Arc ::clone ( cell )
2020-11-25 12:23:47 -05:00
}
2019-01-07 17:17:36 -05:00
}
2020-11-17 15:22:59 -05:00
#[ inline ]
fn check_api_err ( api_err : APIError ) {
match api_err {
APIError ::APIMisuseError { .. } = > panic! ( " We can't misuse the API " ) ,
APIError ::FeeRateTooHigh { .. } = > panic! ( " We can't send too much fee? " ) ,
APIError ::RouteError { .. } = > panic! ( " Our routes should work " ) ,
APIError ::ChannelUnavailable { err } = > {
// Test the error against a list of errors we can hit, and reject
// all others. If you hit this panic, the list of acceptable errors
// is probably just stale and you should add new messages here.
match err . as_str ( ) {
" Peer for first hop currently disconnected/pending monitor update! " = > { } ,
_ if err . starts_with ( " Cannot push more than their max accepted HTLCs " ) = > { } ,
_ if err . starts_with ( " Cannot send value that would put us over the max HTLC value in flight our peer will accept " ) = > { } ,
_ if err . starts_with ( " Cannot send value that would put our balance under counterparty-announced channel reserve value " ) = > { } ,
_ if err . starts_with ( " Cannot send value that would overdraw remaining funds. " ) = > { } ,
_ if err . starts_with ( " Cannot send value that would not leave enough to pay for fees. " ) = > { } ,
2021-03-30 23:21:00 -04:00
_ = > panic! ( " {} " , err ) ,
2020-11-17 15:22:59 -05:00
}
} ,
APIError ::MonitorUpdateFailed = > {
// We can (obviously) temp-fail a monitor update
} ,
}
}
#[ inline ]
fn check_payment_err ( send_err : PaymentSendFailure ) {
match send_err {
PaymentSendFailure ::ParameterError ( api_err ) = > check_api_err ( api_err ) ,
PaymentSendFailure ::PathParameterError ( per_path_results ) = > {
for res in per_path_results { if let Err ( api_err ) = res { check_api_err ( api_err ) ; } }
} ,
PaymentSendFailure ::AllFailedRetrySafe ( per_path_results ) = > {
for api_err in per_path_results { check_api_err ( api_err ) ; }
} ,
PaymentSendFailure ::PartialFailure ( per_path_results ) = > {
for res in per_path_results { if let Err ( api_err ) = res { check_api_err ( api_err ) ; } }
} ,
}
}
2021-02-16 16:30:08 -05:00
type ChanMan = ChannelManager < EnforcingSigner , Arc < TestChainMonitor > , Arc < TestBroadcaster > , Arc < KeyProvider > , Arc < FuzzEstimator > , Arc < dyn Logger > > ;
2020-11-21 12:09:40 -05:00
2021-04-23 19:04:02 +00:00
#[ inline ]
fn get_payment_secret_hash ( dest : & ChanMan , payment_id : & mut u8 ) -> Option < ( PaymentSecret , PaymentHash ) > {
let mut payment_hash ;
for _ in 0 .. 256 {
payment_hash = PaymentHash ( Sha256 ::hash ( & [ * payment_id ; 1 ] ) . into_inner ( ) ) ;
2021-05-14 10:06:17 -04:00
if let Ok ( payment_secret ) = dest . create_inbound_payment_for_hash ( payment_hash , None , 3600 , 0 ) {
2021-04-23 19:04:02 +00:00
return Some ( ( payment_secret , payment_hash ) ) ;
}
* payment_id = payment_id . wrapping_add ( 1 ) ;
}
None
}
2020-11-21 12:09:40 -05:00
#[ inline ]
fn send_payment ( source : & ChanMan , dest : & ChanMan , dest_chan_id : u64 , amt : u64 , payment_id : & mut u8 ) -> bool {
2021-04-23 19:04:02 +00:00
let ( payment_secret , payment_hash ) =
if let Some ( ( secret , hash ) ) = get_payment_secret_hash ( dest , payment_id ) { ( secret , hash ) } else { return true ; } ;
2020-11-21 12:09:40 -05:00
if let Err ( err ) = source . send_payment ( & Route {
paths : vec ! [ vec! [ RouteHop {
pubkey : dest . get_our_node_id ( ) ,
2021-04-23 19:04:02 +00:00
node_features : NodeFeatures ::known ( ) ,
2020-11-21 12:09:40 -05:00
short_channel_id : dest_chan_id ,
2021-04-23 19:04:02 +00:00
channel_features : ChannelFeatures ::known ( ) ,
2020-11-21 12:09:40 -05:00
fee_msat : amt ,
cltv_expiry_delta : 200 ,
} ] ] ,
2021-04-23 19:04:02 +00:00
} , payment_hash , & Some ( payment_secret ) ) {
2020-11-21 12:09:40 -05:00
check_payment_err ( err ) ;
false
} else { true }
}
#[ inline ]
fn send_hop_payment ( source : & ChanMan , middle : & ChanMan , middle_chan_id : u64 , dest : & ChanMan , dest_chan_id : u64 , amt : u64 , payment_id : & mut u8 ) -> bool {
2021-04-23 19:04:02 +00:00
let ( payment_secret , payment_hash ) =
if let Some ( ( secret , hash ) ) = get_payment_secret_hash ( dest , payment_id ) { ( secret , hash ) } else { return true ; } ;
2020-11-21 12:09:40 -05:00
if let Err ( err ) = source . send_payment ( & Route {
paths : vec ! [ vec! [ RouteHop {
pubkey : middle . get_our_node_id ( ) ,
2021-04-23 19:04:02 +00:00
node_features : NodeFeatures ::known ( ) ,
2020-11-21 12:09:40 -05:00
short_channel_id : middle_chan_id ,
2021-04-23 19:04:02 +00:00
channel_features : ChannelFeatures ::known ( ) ,
2020-11-21 12:09:40 -05:00
fee_msat : 50000 ,
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : dest . get_our_node_id ( ) ,
2021-04-23 19:04:02 +00:00
node_features : NodeFeatures ::known ( ) ,
2020-11-21 12:09:40 -05:00
short_channel_id : dest_chan_id ,
2021-04-23 19:04:02 +00:00
channel_features : ChannelFeatures ::known ( ) ,
2020-11-21 12:09:40 -05:00
fee_msat : amt ,
cltv_expiry_delta : 200 ,
} ] ] ,
2021-04-23 19:04:02 +00:00
} , payment_hash , & Some ( payment_secret ) ) {
2020-11-21 12:09:40 -05:00
check_payment_err ( err ) ;
false
} else { true }
}
2019-01-07 17:17:36 -05:00
#[ inline ]
2020-02-20 20:11:40 -05:00
pub fn do_test < Out : test_logger ::Output > ( data : & [ u8 ] , out : Out ) {
2019-01-07 17:17:36 -05:00
let fee_est = Arc ::new ( FuzzEstimator { } ) ;
let broadcast = Arc ::new ( TestBroadcaster { } ) ;
macro_rules ! make_node {
( $node_id : expr ) = > { {
2020-02-20 20:11:40 -05:00
let logger : Arc < dyn Logger > = Arc ::new ( test_logger ::TestLogger ::new ( $node_id . to_string ( ) , out . clone ( ) ) ) ;
2021-05-20 16:38:18 +00:00
let keys_manager = Arc ::new ( KeyProvider { node_id : $node_id , rand_bytes_id : atomic ::AtomicU32 ::new ( 0 ) , revoked_commitments : Mutex ::new ( HashMap ::new ( ) ) } ) ;
2021-02-09 15:22:44 -05:00
let monitor = Arc ::new ( TestChainMonitor ::new ( broadcast . clone ( ) , logger . clone ( ) , fee_est . clone ( ) , Arc ::new ( TestPersister { } ) , Arc ::clone ( & keys_manager ) ) ) ;
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
config . channel_options . forwarding_fee_proportional_millionths = 0 ;
2019-01-07 17:17:36 -05:00
config . channel_options . announced_channel = true ;
2021-03-03 11:24:55 -08:00
let network = Network ::Bitcoin ;
let params = ChainParameters {
network ,
2021-04-08 23:36:30 -07:00
best_block : BestBlock ::from_genesis ( network ) ,
2021-03-03 11:24:55 -08:00
} ;
( ChannelManager ::new ( fee_est . clone ( ) , monitor . clone ( ) , broadcast . clone ( ) , Arc ::clone ( & logger ) , keys_manager . clone ( ) , config , params ) ,
2020-12-05 18:56:27 +01:00
monitor , keys_manager )
2019-01-07 17:17:36 -05:00
} }
}
2019-07-22 17:28:49 -04:00
macro_rules ! reload_node {
2020-12-05 18:56:27 +01:00
( $ser : expr , $node_id : expr , $old_monitors : expr , $keys_manager : expr ) = > { {
let keys_manager = Arc ::clone ( & $keys_manager ) ;
2020-02-20 20:11:40 -05:00
let logger : Arc < dyn Logger > = Arc ::new ( test_logger ::TestLogger ::new ( $node_id . to_string ( ) , out . clone ( ) ) ) ;
2021-02-09 15:22:44 -05:00
let chain_monitor = Arc ::new ( TestChainMonitor ::new ( broadcast . clone ( ) , logger . clone ( ) , fee_est . clone ( ) , Arc ::new ( TestPersister { } ) , Arc ::clone ( & $keys_manager ) ) ) ;
2019-07-22 17:28:49 -04:00
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
config . channel_options . forwarding_fee_proportional_millionths = 0 ;
2019-07-22 17:28:49 -04:00
config . channel_options . announced_channel = true ;
let mut monitors = HashMap ::new ( ) ;
2020-02-11 18:34:29 -05:00
let mut old_monitors = $old_monitors . latest_monitors . lock ( ) . unwrap ( ) ;
for ( outpoint , ( update_id , monitor_ser ) ) in old_monitors . drain ( ) {
2021-02-09 15:22:44 -05:00
monitors . insert ( outpoint , < ( BlockHash , ChannelMonitor < EnforcingSigner > ) > ::read ( & mut Cursor ::new ( & monitor_ser ) , & * $keys_manager ) . expect ( " Failed to read monitor " ) . 1 ) ;
2020-07-20 17:03:52 -07:00
chain_monitor . latest_monitors . lock ( ) . unwrap ( ) . insert ( outpoint , ( update_id , monitor_ser ) ) ;
2019-07-22 17:28:49 -04:00
}
let mut monitor_refs = HashMap ::new ( ) ;
2019-12-13 01:58:08 -05:00
for ( outpoint , monitor ) in monitors . iter_mut ( ) {
2019-07-22 17:28:49 -04:00
monitor_refs . insert ( * outpoint , monitor ) ;
}
let read_args = ChannelManagerReadArgs {
keys_manager ,
fee_estimator : fee_est . clone ( ) ,
2020-07-20 17:03:52 -07:00
chain_monitor : chain_monitor . clone ( ) ,
2019-07-22 17:28:49 -04:00
tx_broadcaster : broadcast . clone ( ) ,
logger ,
default_config : config ,
2020-08-07 16:27:26 -04:00
channel_monitors : monitor_refs ,
2019-07-22 17:28:49 -04:00
} ;
2021-02-09 15:22:44 -05:00
let res = ( < ( BlockHash , ChanMan ) > ::read ( & mut Cursor ::new ( & $ser . 0 ) , read_args ) . expect ( " Failed to read manager " ) . 1 , chain_monitor . clone ( ) ) ;
for ( funding_txo , mon ) in monitors . drain ( ) {
assert! ( chain_monitor . chain_monitor . watch_channel ( funding_txo , mon ) . is_ok ( ) ) ;
}
res
2019-07-22 17:28:49 -04:00
} }
}
2019-01-07 17:17:36 -05:00
let mut channel_txn = Vec ::new ( ) ;
macro_rules ! make_channel {
( $source : expr , $dest : expr , $chan_id : expr ) = > { {
2020-11-17 15:24:20 -05:00
$source . create_channel ( $dest . get_our_node_id ( ) , 100_000 , 42 , 0 , None ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
let open_channel = {
let events = $source . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendOpenChannel { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2020-04-15 17:16:45 -07:00
$dest . handle_open_channel ( & $source . get_our_node_id ( ) , InitFeatures ::known ( ) , & open_channel ) ;
2019-01-07 17:17:36 -05:00
let accept_channel = {
let events = $dest . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendAcceptChannel { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2020-04-15 17:16:45 -07:00
$source . handle_accept_channel ( & $dest . get_our_node_id ( ) , InitFeatures ::known ( ) , & accept_channel ) ;
2020-02-11 18:34:29 -05:00
let funding_output ;
2019-01-07 17:17:36 -05:00
{
let events = $source . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::FundingGenerationReady { ref temporary_channel_id , ref channel_value_satoshis , ref output_script , .. } = events [ 0 ] {
let tx = Transaction { version : $chan_id , lock_time : 0 , input : Vec ::new ( ) , output : vec ! [ TxOut {
value : * channel_value_satoshis , script_pubkey : output_script . clone ( ) ,
} ] } ;
2020-05-12 13:17:49 -04:00
funding_output = OutPoint { txid : tx . txid ( ) , index : 0 } ;
2021-03-26 18:07:24 -04:00
$source . funding_transaction_generated ( & temporary_channel_id , tx . clone ( ) ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
channel_txn . push ( tx ) ;
} else { panic! ( " Wrong event type " ) ; }
}
let funding_created = {
let events = $source . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendFundingCreated { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2019-11-05 18:51:05 -05:00
$dest . handle_funding_created ( & $source . get_our_node_id ( ) , & funding_created ) ;
2019-01-07 17:17:36 -05:00
let funding_signed = {
let events = $dest . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendFundingSigned { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2019-11-05 18:51:05 -05:00
$source . handle_funding_signed ( & $dest . get_our_node_id ( ) , & funding_signed ) ;
2019-01-07 17:17:36 -05:00
2020-02-11 18:34:29 -05:00
funding_output
2019-01-07 17:17:36 -05:00
} }
}
macro_rules ! confirm_txn {
( $node : expr ) = > { {
2021-03-05 11:02:42 -05:00
let chain_hash = genesis_block ( Network ::Bitcoin ) . block_hash ( ) ;
let mut header = BlockHeader { version : 0x20000000 , prev_blockhash : chain_hash , merkle_root : Default ::default ( ) , time : 42 , bits : 42 , nonce : 42 } ;
2020-06-16 15:10:17 -07:00
let txdata : Vec < _ > = channel_txn . iter ( ) . enumerate ( ) . map ( | ( i , tx ) | ( i + 1 , tx ) ) . collect ( ) ;
2021-04-20 13:39:00 -07:00
$node . transactions_confirmed ( & header , & txdata , 1 ) ;
2021-03-09 22:05:21 -05:00
for _ in 2 .. 100 {
2020-08-25 17:12:00 -04:00
header = BlockHeader { version : 0x20000000 , prev_blockhash : header . block_hash ( ) , merkle_root : Default ::default ( ) , time : 42 , bits : 42 , nonce : 42 } ;
2019-01-07 17:17:36 -05:00
}
2021-04-20 13:39:00 -07:00
$node . best_block_updated ( & header , 99 ) ;
2019-01-07 17:17:36 -05:00
} }
}
macro_rules ! lock_fundings {
( $nodes : expr ) = > { {
let mut node_events = Vec ::new ( ) ;
for node in $nodes . iter ( ) {
node_events . push ( node . get_and_clear_pending_msg_events ( ) ) ;
}
for ( idx , node_event ) in node_events . iter ( ) . enumerate ( ) {
for event in node_event {
if let events ::MessageSendEvent ::SendFundingLocked { ref node_id , ref msg } = event {
for node in $nodes . iter ( ) {
if node . get_our_node_id ( ) = = * node_id {
2019-11-05 18:51:05 -05:00
node . handle_funding_locked ( & $nodes [ idx ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} else { panic! ( " Wrong event type " ) ; }
}
}
for node in $nodes . iter ( ) {
let events = node . get_and_clear_pending_msg_events ( ) ;
for event in events {
if let events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = event {
} else { panic! ( " Wrong event type " ) ; }
}
}
} }
}
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
// forwarding.
2020-12-05 18:56:27 +01:00
let ( node_a , mut monitor_a , keys_manager_a ) = make_node! ( 0 ) ;
let ( node_b , mut monitor_b , keys_manager_b ) = make_node! ( 1 ) ;
let ( node_c , mut monitor_c , keys_manager_c ) = make_node! ( 2 ) ;
2019-01-07 17:17:36 -05:00
2019-07-22 17:28:49 -04:00
let mut nodes = [ node_a , node_b , node_c ] ;
2019-01-07 17:17:36 -05:00
2020-02-11 18:34:29 -05:00
let chan_1_funding = make_channel! ( nodes [ 0 ] , nodes [ 1 ] , 0 ) ;
let chan_2_funding = make_channel! ( nodes [ 1 ] , nodes [ 2 ] , 1 ) ;
2019-01-07 17:17:36 -05:00
for node in nodes . iter ( ) {
confirm_txn! ( node ) ;
}
lock_fundings! ( nodes ) ;
let chan_a = nodes [ 0 ] . list_usable_channels ( ) [ 0 ] . short_channel_id . unwrap ( ) ;
let chan_b = nodes [ 2 ] . list_usable_channels ( ) [ 0 ] . short_channel_id . unwrap ( ) ;
2020-11-21 12:09:40 -05:00
let mut payment_id : u8 = 0 ;
2019-01-07 17:17:36 -05:00
let mut chan_a_disconnected = false ;
let mut chan_b_disconnected = false ;
2021-04-21 17:03:57 +00:00
let mut ab_events = Vec ::new ( ) ;
2019-07-23 15:39:11 -04:00
let mut ba_events = Vec ::new ( ) ;
let mut bc_events = Vec ::new ( ) ;
2021-04-21 17:03:57 +00:00
let mut cb_events = Vec ::new ( ) ;
2019-01-07 17:17:36 -05:00
2019-07-22 17:28:49 -04:00
let mut node_a_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
let mut node_b_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 1 ] . write ( & mut node_b_ser ) . unwrap ( ) ;
let mut node_c_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
macro_rules ! test_return {
( ) = > { {
assert_eq! ( nodes [ 0 ] . list_channels ( ) . len ( ) , 1 ) ;
assert_eq! ( nodes [ 1 ] . list_channels ( ) . len ( ) , 2 ) ;
assert_eq! ( nodes [ 2 ] . list_channels ( ) . len ( ) , 1 ) ;
return ;
} }
}
let mut read_pos = 0 ;
macro_rules ! get_slice {
( $len : expr ) = > {
{
let slice_len = $len as usize ;
if data . len ( ) < read_pos + slice_len {
test_return! ( ) ;
}
read_pos + = slice_len ;
& data [ read_pos - slice_len .. read_pos ]
}
}
}
loop {
2021-04-21 17:03:57 +00:00
// Push any events from Node B onto ba_events and bc_events
macro_rules ! push_excess_b_events {
( $excess_events : expr , $expect_drop_node : expr ) = > { {
let a_id = nodes [ 0 ] . get_our_node_id ( ) ;
let expect_drop_node : Option < usize > = $expect_drop_node ;
let expect_drop_id = if let Some ( id ) = expect_drop_node { Some ( nodes [ id ] . get_our_node_id ( ) ) } else { None } ;
for event in $excess_events {
let push_a = match event {
events ::MessageSendEvent ::UpdateHTLCs { ref node_id , .. } = > {
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , .. } = > {
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
events ::MessageSendEvent ::SendChannelReestablish { ref node_id , .. } = > {
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > continue ,
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > continue ,
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > continue ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref node_id , ref msg } = > {
assert_eq! ( msg . contents . flags & 2 , 0 ) ; // The disable bit must never be set!
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
_ = > panic! ( " Unhandled message event {:?} " , event ) ,
2021-04-21 17:03:57 +00:00
} ;
if push_a { ba_events . push ( event ) ; } else { bc_events . push ( event ) ; }
}
} }
}
// While delivering messages, we select across three possible message selection processes
// to ensure we get as much coverage as possible. See the individual enum variants for more
// details.
#[ derive(PartialEq) ]
enum ProcessMessages {
/// Deliver all available messages, including fetching any new messages from
/// `get_and_clear_pending_msg_events()` (which may have side effects).
AllMessages ,
/// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one
/// message (which may already be queued).
OneMessage ,
/// Deliver up to one already-queued message. This avoids any potential side-effects
/// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which
/// provides potentially more coverage.
OnePendingMessage ,
}
2019-01-07 17:17:36 -05:00
macro_rules ! process_msg_events {
2021-04-21 17:03:57 +00:00
( $node : expr , $corrupt_forward : expr , $limit_events : expr ) = > { {
let mut events = if $node = = 1 {
2019-07-23 15:39:11 -04:00
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut ba_events ) ;
new_events . extend_from_slice ( & bc_events [ .. ] ) ;
bc_events . clear ( ) ;
new_events
2021-04-21 17:03:57 +00:00
} else if $node = = 0 {
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut ab_events ) ;
new_events
} else {
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut cb_events ) ;
new_events
} ;
let mut new_events = Vec ::new ( ) ;
if $limit_events ! = ProcessMessages ::OnePendingMessage {
new_events = nodes [ $node ] . get_and_clear_pending_msg_events ( ) ;
}
2020-11-17 21:07:15 -05:00
let mut had_events = false ;
2021-04-21 17:03:57 +00:00
let mut events_iter = events . drain ( .. ) . chain ( new_events . drain ( .. ) ) ;
let mut extra_ev = None ;
for event in & mut events_iter {
2020-11-17 21:07:15 -05:00
had_events = true ;
2019-01-07 17:17:36 -05:00
match event {
2021-04-21 17:03:57 +00:00
events ::MessageSendEvent ::UpdateHTLCs { node_id , updates : CommitmentUpdate { update_add_htlcs , update_fail_htlcs , update_fulfill_htlcs , update_fail_malformed_htlcs , update_fee , commitment_signed } } = > {
2019-07-23 15:39:11 -04:00
for dest in nodes . iter ( ) {
2021-04-21 17:03:57 +00:00
if dest . get_our_node_id ( ) = = node_id {
2019-01-07 17:17:36 -05:00
assert! ( update_fee . is_none ( ) ) ;
2021-04-21 17:03:57 +00:00
for update_add in update_add_htlcs . iter ( ) {
2019-01-07 17:17:36 -05:00
if ! $corrupt_forward {
2021-04-21 17:03:57 +00:00
dest . handle_update_add_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_add ) ;
2019-01-07 17:17:36 -05:00
} else {
// Corrupt the update_add_htlc message so that its HMAC
// check will fail and we generate a
// update_fail_malformed_htlc instead of an
// update_fail_htlc as we do when we reject a payment.
let mut msg_ser = update_add . encode ( ) ;
msg_ser [ 1000 ] ^ = 0xff ;
let new_msg = UpdateAddHTLC ::read ( & mut Cursor ::new ( & msg_ser ) ) . unwrap ( ) ;
2019-11-05 18:51:05 -05:00
dest . handle_update_add_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & new_msg ) ;
2019-01-07 17:17:36 -05:00
}
}
2021-04-21 17:03:57 +00:00
for update_fulfill in update_fulfill_htlcs . iter ( ) {
dest . handle_update_fulfill_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_fulfill ) ;
2019-01-07 17:17:36 -05:00
}
2021-04-21 17:03:57 +00:00
for update_fail in update_fail_htlcs . iter ( ) {
dest . handle_update_fail_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_fail ) ;
2019-01-07 17:17:36 -05:00
}
2021-04-21 17:03:57 +00:00
for update_fail_malformed in update_fail_malformed_htlcs . iter ( ) {
dest . handle_update_fail_malformed_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_fail_malformed ) ;
}
let processed_change = ! update_add_htlcs . is_empty ( ) | | ! update_fulfill_htlcs . is_empty ( ) | |
! update_fail_htlcs . is_empty ( ) | | ! update_fail_malformed_htlcs . is_empty ( ) ;
if $limit_events ! = ProcessMessages ::AllMessages & & processed_change {
// If we only want to process some messages, don't deliver the CS until later.
extra_ev = Some ( events ::MessageSendEvent ::UpdateHTLCs { node_id , updates : CommitmentUpdate {
update_add_htlcs : Vec ::new ( ) ,
update_fail_htlcs : Vec ::new ( ) ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : None ,
commitment_signed
} } ) ;
break ;
2019-01-07 17:17:36 -05:00
}
2019-11-05 18:51:05 -05:00
dest . handle_commitment_signed ( & nodes [ $node ] . get_our_node_id ( ) , & commitment_signed ) ;
2021-04-21 17:03:57 +00:00
break ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , ref msg } = > {
2019-07-23 15:39:11 -04:00
for dest in nodes . iter ( ) {
if dest . get_our_node_id ( ) = = * node_id {
2019-11-05 18:51:05 -05:00
dest . handle_revoke_and_ack ( & nodes [ $node ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendChannelReestablish { ref node_id , ref msg } = > {
2019-07-23 15:39:11 -04:00
for dest in nodes . iter ( ) {
2019-01-07 17:17:36 -05:00
if dest . get_our_node_id ( ) = = * node_id {
2019-11-05 18:51:05 -05:00
dest . handle_channel_reestablish ( & nodes [ $node ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > {
// Can be generated as a reestablish response
} ,
2020-11-23 19:12:31 -05:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > {
// Can be generated as a reestablish response
} ,
2019-01-07 17:17:36 -05:00
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > {
// Can be generated due to a payment forward being rejected due to a
// channel having previously failed a monitor update
} ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref msg , .. } = > {
// When we reconnect we will resend a channel_update to make sure our
// counterparty has the latest parameters for receiving payments
// through us. We do, however, check that the message does not include
// the "disabled" bit, as we should never ever have a channel which is
// disabled when we send such an update (or it may indicate channel
// force-close which we should detect as an error).
assert_eq! ( msg . contents . flags & 2 , 0 ) ;
} ,
_ = > panic! ( " Unhandled message event {:?} " , event ) ,
2019-01-07 17:17:36 -05:00
}
2021-04-21 17:03:57 +00:00
if $limit_events ! = ProcessMessages ::AllMessages {
break ;
}
}
if $node = = 1 {
push_excess_b_events! ( extra_ev . into_iter ( ) . chain ( events_iter ) , None ) ;
} else if $node = = 0 {
if let Some ( ev ) = extra_ev { ab_events . push ( ev ) ; }
for event in events_iter { ab_events . push ( event ) ; }
} else {
if let Some ( ev ) = extra_ev { cb_events . push ( ev ) ; }
for event in events_iter { cb_events . push ( event ) ; }
2019-01-07 17:17:36 -05:00
}
2020-11-17 21:07:15 -05:00
had_events
2019-01-07 17:17:36 -05:00
} }
}
2019-07-23 15:39:11 -04:00
macro_rules ! drain_msg_events_on_disconnect {
( $counterparty_id : expr ) = > { {
if $counterparty_id = = 0 {
for event in nodes [ 0 ] . get_and_clear_pending_msg_events ( ) {
match event {
events ::MessageSendEvent ::UpdateHTLCs { .. } = > { } ,
events ::MessageSendEvent ::SendRevokeAndACK { .. } = > { } ,
events ::MessageSendEvent ::SendChannelReestablish { .. } = > { } ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > { } ,
2020-11-23 19:12:31 -05:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > { } ,
2019-07-23 15:39:11 -04:00
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > { } ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref msg , .. } = > {
assert_eq! ( msg . contents . flags & 2 , 0 ) ; // The disable bit must never be set!
} ,
2019-07-23 15:39:11 -04:00
_ = > panic! ( " Unhandled message event " ) ,
}
}
2021-04-21 17:03:57 +00:00
push_excess_b_events! ( nodes [ 1 ] . get_and_clear_pending_msg_events ( ) . drain ( .. ) , Some ( 0 ) ) ;
ab_events . clear ( ) ;
2019-07-23 15:39:11 -04:00
ba_events . clear ( ) ;
} else {
for event in nodes [ 2 ] . get_and_clear_pending_msg_events ( ) {
match event {
events ::MessageSendEvent ::UpdateHTLCs { .. } = > { } ,
events ::MessageSendEvent ::SendRevokeAndACK { .. } = > { } ,
events ::MessageSendEvent ::SendChannelReestablish { .. } = > { } ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > { } ,
2020-11-23 19:12:31 -05:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > { } ,
2019-07-23 15:39:11 -04:00
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > { } ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref msg , .. } = > {
assert_eq! ( msg . contents . flags & 2 , 0 ) ; // The disable bit must never be set!
} ,
2019-07-23 15:39:11 -04:00
_ = > panic! ( " Unhandled message event " ) ,
}
}
2021-04-21 17:03:57 +00:00
push_excess_b_events! ( nodes [ 1 ] . get_and_clear_pending_msg_events ( ) . drain ( .. ) , Some ( 2 ) ) ;
2019-07-23 15:39:11 -04:00
bc_events . clear ( ) ;
2021-04-21 17:03:57 +00:00
cb_events . clear ( ) ;
2019-07-23 15:39:11 -04:00
}
} }
}
2019-01-07 17:17:36 -05:00
macro_rules ! process_events {
( $node : expr , $fail : expr ) = > { {
2019-03-07 13:09:59 -05:00
// In case we get 256 payments we may have a hash collision, resulting in the
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
// deduplicate the calls here.
let mut claim_set = HashSet ::new ( ) ;
let mut events = nodes [ $node ] . get_and_clear_pending_events ( ) ;
// Sort events so that PendingHTLCsForwardable get processed last. This avoids a
// case where we first process a PendingHTLCsForwardable, then claim/fail on a
// PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
// PaymentReceived event for the second HTLC in our pending_events (and breaking
// our claim_set deduplication).
events . sort_by ( | a , b | {
if let events ::Event ::PaymentReceived { .. } = a {
if let events ::Event ::PendingHTLCsForwardable { .. } = b {
Ordering ::Less
} else { Ordering ::Equal }
} else if let events ::Event ::PendingHTLCsForwardable { .. } = a {
if let events ::Event ::PaymentReceived { .. } = b {
Ordering ::Greater
} else { Ordering ::Equal }
} else { Ordering ::Equal }
} ) ;
2020-11-17 21:07:15 -05:00
let had_events = ! events . is_empty ( ) ;
2019-03-07 13:09:59 -05:00
for event in events . drain ( .. ) {
2019-01-07 17:17:36 -05:00
match event {
2021-04-26 23:05:56 +00:00
events ::Event ::PaymentReceived { payment_hash , .. } = > {
2019-03-07 13:09:59 -05:00
if claim_set . insert ( payment_hash . 0 ) {
if $fail {
2021-04-23 03:00:44 +00:00
assert! ( nodes [ $node ] . fail_htlc_backwards ( & payment_hash ) ) ;
2019-03-07 13:09:59 -05:00
} else {
2021-04-26 23:05:56 +00:00
assert! ( nodes [ $node ] . claim_funds ( PaymentPreimage ( payment_hash . 0 ) ) ) ;
2019-03-07 13:09:59 -05:00
}
2019-01-07 17:17:36 -05:00
}
} ,
events ::Event ::PaymentSent { .. } = > { } ,
events ::Event ::PaymentFailed { .. } = > { } ,
events ::Event ::PendingHTLCsForwardable { .. } = > {
nodes [ $node ] . process_pending_htlc_forwards ( ) ;
} ,
_ = > panic! ( " Unhandled event " ) ,
}
}
2020-11-17 21:07:15 -05:00
had_events
2019-01-07 17:17:36 -05:00
} }
}
match get_slice! ( 1 ) [ 0 ] {
2020-11-17 15:24:20 -05:00
// In general, we keep related message groups close together in binary form, allowing
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
2019-01-07 17:17:36 -05:00
0x00 = > * monitor_a . update_ret . lock ( ) . unwrap ( ) = Err ( ChannelMonitorUpdateErr ::TemporaryFailure ) ,
0x01 = > * monitor_b . update_ret . lock ( ) . unwrap ( ) = Err ( ChannelMonitorUpdateErr ::TemporaryFailure ) ,
0x02 = > * monitor_c . update_ret . lock ( ) . unwrap ( ) = Err ( ChannelMonitorUpdateErr ::TemporaryFailure ) ,
2020-11-17 15:24:20 -05:00
0x04 = > * monitor_a . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ,
0x05 = > * monitor_b . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ,
0x06 = > * monitor_c . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ,
0x08 = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_a . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
nodes [ 0 ] . channel_monitor_updated ( & chan_1_funding , * id ) ;
}
} ,
2020-11-17 15:24:20 -05:00
0x09 = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
nodes [ 1 ] . channel_monitor_updated ( & chan_1_funding , * id ) ;
}
} ,
2020-11-17 15:24:20 -05:00
0x0a = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
nodes [ 1 ] . channel_monitor_updated ( & chan_2_funding , * id ) ;
}
} ,
2020-11-17 15:24:20 -05:00
0x0b = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_c . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
nodes [ 2 ] . channel_monitor_updated ( & chan_2_funding , * id ) ;
}
} ,
2020-11-17 15:24:20 -05:00
0x0c = > {
2019-01-07 17:17:36 -05:00
if ! chan_a_disconnected {
nodes [ 0 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
nodes [ 1 ] . peer_disconnected ( & nodes [ 0 ] . get_our_node_id ( ) , false ) ;
chan_a_disconnected = true ;
2019-07-23 15:39:11 -04:00
drain_msg_events_on_disconnect! ( 0 ) ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0d = > {
2019-01-07 17:17:36 -05:00
if ! chan_b_disconnected {
nodes [ 1 ] . peer_disconnected ( & nodes [ 2 ] . get_our_node_id ( ) , false ) ;
nodes [ 2 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
chan_b_disconnected = true ;
2019-07-23 15:39:11 -04:00
drain_msg_events_on_disconnect! ( 2 ) ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0e = > {
2019-07-23 15:39:11 -04:00
if chan_a_disconnected {
2021-04-23 19:04:02 +00:00
nodes [ 0 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
nodes [ 1 ] . peer_connected ( & nodes [ 0 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
2019-07-23 15:39:11 -04:00
chan_a_disconnected = false ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0f = > {
2019-07-23 15:39:11 -04:00
if chan_b_disconnected {
2021-04-23 19:04:02 +00:00
nodes [ 1 ] . peer_connected ( & nodes [ 2 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
nodes [ 2 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
2019-07-23 15:39:11 -04:00
chan_b_disconnected = false ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
2021-04-21 17:03:57 +00:00
0x10 = > { process_msg_events! ( 0 , true , ProcessMessages ::AllMessages ) ; } ,
0x11 = > { process_msg_events! ( 0 , false , ProcessMessages ::AllMessages ) ; } ,
0x12 = > { process_msg_events! ( 0 , true , ProcessMessages ::OneMessage ) ; } ,
0x13 = > { process_msg_events! ( 0 , false , ProcessMessages ::OneMessage ) ; } ,
0x14 = > { process_msg_events! ( 0 , true , ProcessMessages ::OnePendingMessage ) ; } ,
0x15 = > { process_msg_events! ( 0 , false , ProcessMessages ::OnePendingMessage ) ; } ,
0x16 = > { process_events! ( 0 , true ) ; } ,
0x17 = > { process_events! ( 0 , false ) ; } ,
0x18 = > { process_msg_events! ( 1 , true , ProcessMessages ::AllMessages ) ; } ,
0x19 = > { process_msg_events! ( 1 , false , ProcessMessages ::AllMessages ) ; } ,
0x1a = > { process_msg_events! ( 1 , true , ProcessMessages ::OneMessage ) ; } ,
0x1b = > { process_msg_events! ( 1 , false , ProcessMessages ::OneMessage ) ; } ,
0x1c = > { process_msg_events! ( 1 , true , ProcessMessages ::OnePendingMessage ) ; } ,
0x1d = > { process_msg_events! ( 1 , false , ProcessMessages ::OnePendingMessage ) ; } ,
0x1e = > { process_events! ( 1 , true ) ; } ,
0x1f = > { process_events! ( 1 , false ) ; } ,
0x20 = > { process_msg_events! ( 2 , true , ProcessMessages ::AllMessages ) ; } ,
0x21 = > { process_msg_events! ( 2 , false , ProcessMessages ::AllMessages ) ; } ,
0x22 = > { process_msg_events! ( 2 , true , ProcessMessages ::OneMessage ) ; } ,
0x23 = > { process_msg_events! ( 2 , false , ProcessMessages ::OneMessage ) ; } ,
0x24 = > { process_msg_events! ( 2 , true , ProcessMessages ::OnePendingMessage ) ; } ,
0x25 = > { process_msg_events! ( 2 , false , ProcessMessages ::OnePendingMessage ) ; } ,
0x26 = > { process_events! ( 2 , true ) ; } ,
0x27 = > { process_events! ( 2 , false ) ; } ,
0x2c = > {
2019-07-22 17:28:49 -04:00
if ! chan_a_disconnected {
nodes [ 1 ] . peer_disconnected ( & nodes [ 0 ] . get_our_node_id ( ) , false ) ;
chan_a_disconnected = true ;
drain_msg_events_on_disconnect! ( 0 ) ;
}
2021-04-21 02:37:02 +00:00
if monitor_a . should_update_manager . load ( atomic ::Ordering ::Relaxed ) {
node_a_ser . 0. clear ( ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
}
2020-12-05 18:56:27 +01:00
let ( new_node_a , new_monitor_a ) = reload_node! ( node_a_ser , 0 , monitor_a , keys_manager_a ) ;
2020-11-21 12:09:40 -05:00
nodes [ 0 ] = new_node_a ;
2019-07-22 17:28:49 -04:00
monitor_a = new_monitor_a ;
} ,
2021-04-21 17:03:57 +00:00
0x2d = > {
2019-07-22 17:28:49 -04:00
if ! chan_a_disconnected {
nodes [ 0 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
chan_a_disconnected = true ;
nodes [ 0 ] . get_and_clear_pending_msg_events ( ) ;
2021-04-21 17:03:57 +00:00
ab_events . clear ( ) ;
2019-07-22 17:28:49 -04:00
ba_events . clear ( ) ;
}
if ! chan_b_disconnected {
nodes [ 2 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
chan_b_disconnected = true ;
nodes [ 2 ] . get_and_clear_pending_msg_events ( ) ;
bc_events . clear ( ) ;
2021-04-21 17:03:57 +00:00
cb_events . clear ( ) ;
2019-07-22 17:28:49 -04:00
}
2020-12-05 18:56:27 +01:00
let ( new_node_b , new_monitor_b ) = reload_node! ( node_b_ser , 1 , monitor_b , keys_manager_b ) ;
2020-11-21 12:09:40 -05:00
nodes [ 1 ] = new_node_b ;
2019-07-22 17:28:49 -04:00
monitor_b = new_monitor_b ;
} ,
2021-04-21 17:03:57 +00:00
0x2e = > {
2019-07-22 17:28:49 -04:00
if ! chan_b_disconnected {
nodes [ 1 ] . peer_disconnected ( & nodes [ 2 ] . get_our_node_id ( ) , false ) ;
chan_b_disconnected = true ;
drain_msg_events_on_disconnect! ( 2 ) ;
}
2021-04-21 02:37:02 +00:00
if monitor_c . should_update_manager . load ( atomic ::Ordering ::Relaxed ) {
node_c_ser . 0. clear ( ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
}
2020-12-05 18:56:27 +01:00
let ( new_node_c , new_monitor_c ) = reload_node! ( node_c_ser , 2 , monitor_c , keys_manager_c ) ;
2020-11-21 12:09:40 -05:00
nodes [ 2 ] = new_node_c ;
2019-07-22 17:28:49 -04:00
monitor_c = new_monitor_c ;
} ,
2020-11-17 15:24:20 -05:00
// 1/10th the channel size:
2021-04-21 17:03:57 +00:00
0x30 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10_000_000 , & mut payment_id ) ; } ,
0x31 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10_000_000 , & mut payment_id ) ; } ,
0x32 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10_000_000 , & mut payment_id ) ; } ,
0x33 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10_000_000 , & mut payment_id ) ; } ,
0x34 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 10_000_000 , & mut payment_id ) ; } ,
0x35 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 10_000_000 , & mut payment_id ) ; } ,
0x38 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 1_000_000 , & mut payment_id ) ; } ,
0x39 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 1_000_000 , & mut payment_id ) ; } ,
0x3a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 1_000_000 , & mut payment_id ) ; } ,
0x3b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 1_000_000 , & mut payment_id ) ; } ,
0x3c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 1_000_000 , & mut payment_id ) ; } ,
0x3d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 1_000_000 , & mut payment_id ) ; } ,
0x40 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 100_000 , & mut payment_id ) ; } ,
0x41 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 100_000 , & mut payment_id ) ; } ,
0x42 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 100_000 , & mut payment_id ) ; } ,
0x43 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 100_000 , & mut payment_id ) ; } ,
0x44 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 100_000 , & mut payment_id ) ; } ,
0x45 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 100_000 , & mut payment_id ) ; } ,
0x48 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10_000 , & mut payment_id ) ; } ,
0x49 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10_000 , & mut payment_id ) ; } ,
0x4a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10_000 , & mut payment_id ) ; } ,
0x4b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10_000 , & mut payment_id ) ; } ,
0x4c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 10_000 , & mut payment_id ) ; } ,
0x4d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 10_000 , & mut payment_id ) ; } ,
0x50 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 1_000 , & mut payment_id ) ; } ,
0x51 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 1_000 , & mut payment_id ) ; } ,
0x52 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 1_000 , & mut payment_id ) ; } ,
0x53 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 1_000 , & mut payment_id ) ; } ,
0x54 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 1_000 , & mut payment_id ) ; } ,
0x55 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 1_000 , & mut payment_id ) ; } ,
0x58 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 100 , & mut payment_id ) ; } ,
0x59 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 100 , & mut payment_id ) ; } ,
0x5a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 100 , & mut payment_id ) ; } ,
0x5b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 100 , & mut payment_id ) ; } ,
0x5c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 100 , & mut payment_id ) ; } ,
0x5d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 100 , & mut payment_id ) ; } ,
0x60 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10 , & mut payment_id ) ; } ,
0x61 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10 , & mut payment_id ) ; } ,
0x62 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10 , & mut payment_id ) ; } ,
0x63 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10 , & mut payment_id ) ; } ,
0x64 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 10 , & mut payment_id ) ; } ,
0x65 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 10 , & mut payment_id ) ; } ,
0x68 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 1 , & mut payment_id ) ; } ,
0x69 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 1 , & mut payment_id ) ; } ,
0x6a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 1 , & mut payment_id ) ; } ,
0x6b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 1 , & mut payment_id ) ; } ,
0x6c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 1 , & mut payment_id ) ; } ,
0x6d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 1 , & mut payment_id ) ; } ,
2020-11-17 15:24:20 -05:00
2020-11-17 21:07:15 -05:00
0xff = > {
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
// First make sure there are no pending monitor updates, resetting the error state
// and calling channel_monitor_updated for each monitor.
* monitor_a . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ;
* monitor_b . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ;
* monitor_c . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ;
if let Some ( ( id , _ ) ) = monitor_a . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
nodes [ 0 ] . channel_monitor_updated ( & chan_1_funding , * id ) ;
}
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
nodes [ 1 ] . channel_monitor_updated ( & chan_1_funding , * id ) ;
}
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
nodes [ 1 ] . channel_monitor_updated ( & chan_2_funding , * id ) ;
}
if let Some ( ( id , _ ) ) = monitor_c . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
nodes [ 2 ] . channel_monitor_updated ( & chan_2_funding , * id ) ;
}
// Next, make sure peers are all connected to each other
if chan_a_disconnected {
2021-04-23 19:04:02 +00:00
nodes [ 0 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
nodes [ 1 ] . peer_connected ( & nodes [ 0 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
2020-11-17 21:07:15 -05:00
chan_a_disconnected = false ;
}
if chan_b_disconnected {
2021-04-23 19:04:02 +00:00
nodes [ 1 ] . peer_connected ( & nodes [ 2 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
nodes [ 2 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::known ( ) } ) ;
2020-11-17 21:07:15 -05:00
chan_b_disconnected = false ;
}
for i in 0 .. std ::usize ::MAX {
if i = = 100 { panic! ( " It may take may iterations to settle the state, but it should not take forever " ) ; }
// Then, make sure any current forwards make their way to their destination
2021-04-21 17:03:57 +00:00
if process_msg_events! ( 0 , false , ProcessMessages ::AllMessages ) { continue ; }
if process_msg_events! ( 1 , false , ProcessMessages ::AllMessages ) { continue ; }
if process_msg_events! ( 2 , false , ProcessMessages ::AllMessages ) { continue ; }
2020-11-17 21:07:15 -05:00
// ...making sure any pending PendingHTLCsForwardable events are handled and
// payments claimed.
if process_events! ( 0 , false ) { continue ; }
if process_events! ( 1 , false ) { continue ; }
if process_events! ( 2 , false ) { continue ; }
break ;
}
// Finally, make sure that at least one end of each channel can make a substantial payment.
assert! (
send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10_000_000 , & mut payment_id ) | |
send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10_000_000 , & mut payment_id ) ) ;
assert! (
send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10_000_000 , & mut payment_id ) | |
send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10_000_000 , & mut payment_id ) ) ;
} ,
2019-01-07 17:17:36 -05:00
_ = > test_return! ( ) ,
}
2019-07-22 17:28:49 -04:00
2020-02-11 18:34:29 -05:00
node_a_ser . 0. clear ( ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
monitor_a . should_update_manager . store ( false , atomic ::Ordering ::Relaxed ) ;
node_b_ser . 0. clear ( ) ;
nodes [ 1 ] . write ( & mut node_b_ser ) . unwrap ( ) ;
monitor_b . should_update_manager . store ( false , atomic ::Ordering ::Relaxed ) ;
node_c_ser . 0. clear ( ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
monitor_c . should_update_manager . store ( false , atomic ::Ordering ::Relaxed ) ;
2019-01-07 17:17:36 -05:00
}
}
2020-02-20 20:11:40 -05:00
pub fn chanmon_consistency_test < Out : test_logger ::Output > ( data : & [ u8 ] , out : Out ) {
do_test ( data , out ) ;
}
2019-12-11 13:18:43 -05:00
#[ no_mangle ]
pub extern " C " fn chanmon_consistency_run ( data : * const u8 , datalen : usize ) {
2020-02-20 20:11:40 -05:00
do_test ( unsafe { std ::slice ::from_raw_parts ( data , datalen ) } , test_logger ::DevNull { } ) ;
2019-01-07 17:17:36 -05:00
}