2019-01-07 17:17:36 -05:00
//! Test that monitor update failures don't get our channel state out of sync.
//! One of the biggest concern with the monitor update failure handling code is that messages
//! resent after monitor updating is restored are delivered out-of-order, resulting in
//! commitment_signed messages having "invalid signatures".
//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
//! actions such as sending payments, handling events, or changing monitor update return values on
//! a per-node basis. This should allow it to find any cases where the ordering of actions results
//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
//! send-side handling is correct, other peers. We consider it a failure if any action results in a
//! channel being force-closed.
use bitcoin ::BitcoinHash ;
use bitcoin ::blockdata ::block ::BlockHeader ;
use bitcoin ::blockdata ::transaction ::{ Transaction , TxOut } ;
use bitcoin ::blockdata ::script ::{ Builder , Script } ;
use bitcoin ::blockdata ::opcodes ;
use bitcoin ::network ::constants ::Network ;
use bitcoin_hashes ::Hash as TraitImport ;
use bitcoin_hashes ::hash160 ::Hash as Hash160 ;
use bitcoin_hashes ::sha256 ::Hash as Sha256 ;
2019-07-22 17:28:49 -04:00
use bitcoin_hashes ::sha256d ::Hash as Sha256d ;
2019-01-07 17:17:36 -05:00
use lightning ::chain ::chaininterface ;
use lightning ::chain ::transaction ::OutPoint ;
use lightning ::chain ::chaininterface ::{ BroadcasterInterface , ConfirmationTarget , ChainListener , FeeEstimator , ChainWatchInterfaceUtil } ;
2019-12-06 14:08:34 -05:00
use lightning ::chain ::keysinterface ::{ KeysInterface , InMemoryChannelKeys } ;
2019-01-07 17:17:36 -05:00
use lightning ::ln ::channelmonitor ;
2019-07-22 17:28:49 -04:00
use lightning ::ln ::channelmonitor ::{ ChannelMonitor , ChannelMonitorUpdateErr , HTLCUpdate } ;
use lightning ::ln ::channelmanager ::{ ChannelManager , PaymentHash , PaymentPreimage , ChannelManagerReadArgs } ;
2019-01-07 17:17:36 -05:00
use lightning ::ln ::router ::{ Route , RouteHop } ;
2019-12-28 01:10:14 -05:00
use lightning ::ln ::features ::{ ChannelFeatures , InitFeatures , NodeFeatures } ;
2019-12-27 22:50:42 -05:00
use lightning ::ln ::msgs ::{ CommitmentUpdate , ChannelMessageHandler , ErrorAction , UpdateAddHTLC , Init } ;
2019-12-06 14:08:34 -05:00
use lightning ::util ::enforcing_trait_impls ::EnforcingChannelKeys ;
2019-07-18 22:21:00 -04:00
use lightning ::util ::events ;
2019-01-07 17:17:36 -05:00
use lightning ::util ::logger ::Logger ;
use lightning ::util ::config ::UserConfig ;
use lightning ::util ::events ::{ EventsProvider , MessageSendEventsProvider } ;
2019-07-22 17:28:49 -04:00
use lightning ::util ::ser ::{ Readable , ReadableArgs , Writeable , Writer } ;
2019-01-07 17:17:36 -05:00
use utils ::test_logger ;
use secp256k1 ::key ::{ PublicKey , SecretKey } ;
use secp256k1 ::Secp256k1 ;
2019-07-23 15:39:11 -04:00
use std ::mem ;
2019-03-07 13:09:59 -05:00
use std ::cmp ::Ordering ;
2019-07-25 11:07:45 -04:00
use std ::collections ::{ HashSet , hash_map , HashMap } ;
2019-01-07 17:17:36 -05:00
use std ::sync ::{ Arc , Mutex } ;
2019-07-18 22:17:36 -04:00
use std ::sync ::atomic ;
2019-01-07 17:17:36 -05:00
use std ::io ::Cursor ;
struct FuzzEstimator { }
impl FeeEstimator for FuzzEstimator {
fn get_est_sat_per_1000_weight ( & self , _ : ConfirmationTarget ) -> u64 {
253
}
}
pub struct TestBroadcaster { }
impl BroadcasterInterface for TestBroadcaster {
fn broadcast_transaction ( & self , _tx : & Transaction ) { }
}
2019-07-22 17:28:49 -04:00
pub struct VecWriter ( pub Vec < u8 > ) ;
impl Writer for VecWriter {
fn write_all ( & mut self , buf : & [ u8 ] ) -> Result < ( ) , ::std ::io ::Error > {
self . 0. extend_from_slice ( buf ) ;
Ok ( ( ) )
}
fn size_hint ( & mut self , size : usize ) {
self . 0. reserve_exact ( size ) ;
}
}
2019-07-25 11:07:45 -04:00
static mut IN_RESTORE : bool = false ;
2019-01-07 17:17:36 -05:00
pub struct TestChannelMonitor {
2020-02-04 09:15:59 -08:00
pub simple_monitor : Arc < channelmonitor ::SimpleManyChannelMonitor < OutPoint , EnforcingChannelKeys > > ,
2019-01-07 17:17:36 -05:00
pub update_ret : Mutex < Result < ( ) , channelmonitor ::ChannelMonitorUpdateErr > > ,
2019-07-22 17:28:49 -04:00
pub latest_good_update : Mutex < HashMap < OutPoint , Vec < u8 > > > ,
pub latest_update_good : Mutex < HashMap < OutPoint , bool > > ,
pub latest_updates_good_at_last_ser : Mutex < HashMap < OutPoint , bool > > ,
pub should_update_manager : atomic ::AtomicBool ,
2019-01-07 17:17:36 -05:00
}
impl TestChannelMonitor {
2019-11-25 16:26:31 -05:00
pub fn new ( chain_monitor : Arc < dyn chaininterface ::ChainWatchInterface > , broadcaster : Arc < dyn chaininterface ::BroadcasterInterface > , logger : Arc < dyn Logger > , feeest : Arc < dyn chaininterface ::FeeEstimator > ) -> Self {
2019-01-07 17:17:36 -05:00
Self {
2020-01-16 13:26:38 -05:00
simple_monitor : Arc ::new ( channelmonitor ::SimpleManyChannelMonitor ::new ( chain_monitor , broadcaster , logger , feeest ) ) ,
2019-01-07 17:17:36 -05:00
update_ret : Mutex ::new ( Ok ( ( ) ) ) ,
2019-07-22 17:28:49 -04:00
latest_good_update : Mutex ::new ( HashMap ::new ( ) ) ,
latest_update_good : Mutex ::new ( HashMap ::new ( ) ) ,
latest_updates_good_at_last_ser : Mutex ::new ( HashMap ::new ( ) ) ,
should_update_manager : atomic ::AtomicBool ::new ( false ) ,
2019-01-07 17:17:36 -05:00
}
}
}
2020-02-04 09:15:59 -08:00
impl channelmonitor ::ManyChannelMonitor < EnforcingChannelKeys > for TestChannelMonitor {
fn add_update_monitor ( & self , funding_txo : OutPoint , monitor : channelmonitor ::ChannelMonitor < EnforcingChannelKeys > ) -> Result < ( ) , channelmonitor ::ChannelMonitorUpdateErr > {
2019-07-22 17:28:49 -04:00
let ret = self . update_ret . lock ( ) . unwrap ( ) . clone ( ) ;
if let Ok ( ( ) ) = ret {
let mut ser = VecWriter ( Vec ::new ( ) ) ;
monitor . write_for_disk ( & mut ser ) . unwrap ( ) ;
self . latest_good_update . lock ( ) . unwrap ( ) . insert ( funding_txo , ser . 0 ) ;
2019-07-25 11:07:45 -04:00
match self . latest_update_good . lock ( ) . unwrap ( ) . entry ( funding_txo ) {
2019-11-25 16:26:31 -05:00
hash_map ::Entry ::Vacant ( e ) = > { e . insert ( true ) ; } ,
2019-07-25 11:07:45 -04:00
hash_map ::Entry ::Occupied ( mut e ) = > {
if ! e . get ( ) & & unsafe { IN_RESTORE } {
// Technically we can't consider an update to be "good" unless we're doing
// it in response to a test_restore_channel_monitor as the channel may
// still be waiting on such a call, so only set us to good if we're in the
// middle of a restore call.
e . insert ( true ) ;
}
} ,
}
2019-07-22 17:28:49 -04:00
self . should_update_manager . store ( true , atomic ::Ordering ::Relaxed ) ;
} else {
self . latest_update_good . lock ( ) . unwrap ( ) . insert ( funding_txo , false ) ;
}
2019-01-07 17:17:36 -05:00
assert! ( self . simple_monitor . add_update_monitor ( funding_txo , monitor ) . is_ok ( ) ) ;
2019-07-22 17:28:49 -04:00
ret
2019-01-07 17:17:36 -05:00
}
fn fetch_pending_htlc_updated ( & self ) -> Vec < HTLCUpdate > {
return self . simple_monitor . fetch_pending_htlc_updated ( ) ;
}
}
struct KeyProvider {
node_id : u8 ,
2019-07-18 22:17:36 -04:00
session_id : atomic ::AtomicU8 ,
channel_id : atomic ::AtomicU8 ,
2019-01-07 17:17:36 -05:00
}
impl KeysInterface for KeyProvider {
2019-12-06 14:08:34 -05:00
type ChanKeySigner = EnforcingChannelKeys ;
2019-11-26 16:46:33 -05:00
2019-01-07 17:17:36 -05:00
fn get_node_secret ( & self ) -> SecretKey {
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , self . node_id ] ) . unwrap ( )
}
fn get_destination_script ( & self ) -> Script {
let secp_ctx = Secp256k1 ::signing_only ( ) ;
let channel_monitor_claim_key = SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 2 , self . node_id ] ) . unwrap ( ) ;
let our_channel_monitor_claim_key_hash = Hash160 ::hash ( & PublicKey ::from_secret_key ( & secp_ctx , & channel_monitor_claim_key ) . serialize ( ) ) ;
Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 ) . push_slice ( & our_channel_monitor_claim_key_hash [ .. ] ) . into_script ( )
}
fn get_shutdown_pubkey ( & self ) -> PublicKey {
let secp_ctx = Secp256k1 ::signing_only ( ) ;
PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , self . node_id ] ) . unwrap ( ) )
}
2020-01-23 13:33:31 -08:00
fn get_channel_keys ( & self , _inbound : bool , channel_value_satoshis : u64 ) -> EnforcingChannelKeys {
2020-02-04 09:15:59 -08:00
let secp_ctx = Secp256k1 ::signing_only ( ) ;
EnforcingChannelKeys ::new ( InMemoryChannelKeys ::new (
& secp_ctx ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 4 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 5 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 6 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 7 , self . node_id ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , self . node_id ] ) . unwrap ( ) ,
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 9 , self . node_id ] ,
2020-01-23 13:33:31 -08:00
channel_value_satoshis ,
2020-02-04 09:15:59 -08:00
) )
2019-01-07 17:17:36 -05:00
}
2019-11-25 16:12:45 -05:00
fn get_onion_rand ( & self ) -> ( SecretKey , [ u8 ; 32 ] ) {
2019-07-18 22:17:36 -04:00
let id = self . session_id . fetch_add ( 1 , atomic ::Ordering ::Relaxed ) ;
2019-11-25 16:12:45 -05:00
( SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , id , 10 , self . node_id ] ) . unwrap ( ) ,
[ 0 ; 32 ] )
2019-01-07 17:17:36 -05:00
}
fn get_channel_id ( & self ) -> [ u8 ; 32 ] {
2019-07-18 22:17:36 -04:00
let id = self . channel_id . fetch_add ( 1 , atomic ::Ordering ::Relaxed ) ;
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , id , 11 , self . node_id ]
2019-01-07 17:17:36 -05:00
}
}
#[ inline ]
pub fn do_test ( data : & [ u8 ] ) {
let fee_est = Arc ::new ( FuzzEstimator { } ) ;
let broadcast = Arc ::new ( TestBroadcaster { } ) ;
macro_rules ! make_node {
( $node_id : expr ) = > { {
2019-11-25 16:26:31 -05:00
let logger : Arc < dyn Logger > = Arc ::new ( test_logger ::TestLogger ::new ( $node_id . to_string ( ) ) ) ;
2019-01-07 17:17:36 -05:00
let watch = Arc ::new ( ChainWatchInterfaceUtil ::new ( Network ::Bitcoin , Arc ::clone ( & logger ) ) ) ;
2019-04-08 21:11:16 -04:00
let monitor = Arc ::new ( TestChannelMonitor ::new ( watch . clone ( ) , broadcast . clone ( ) , logger . clone ( ) , fee_est . clone ( ) ) ) ;
2019-01-07 17:17:36 -05:00
2019-07-18 22:17:36 -04:00
let keys_manager = Arc ::new ( KeyProvider { node_id : $node_id , session_id : atomic ::AtomicU8 ::new ( 0 ) , channel_id : atomic ::AtomicU8 ::new ( 0 ) } ) ;
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
2019-01-07 17:17:36 -05:00
config . channel_options . fee_proportional_millionths = 0 ;
config . channel_options . announced_channel = true ;
2019-03-26 12:16:20 -07:00
config . peer_channel_config_limits . min_dust_limit_satoshis = 0 ;
2020-01-16 13:54:38 -05:00
( Arc ::new ( ChannelManager ::new ( Network ::Bitcoin , fee_est . clone ( ) , monitor . clone ( ) , broadcast . clone ( ) , Arc ::clone ( & logger ) , keys_manager . clone ( ) , config , 0 ) . unwrap ( ) ) ,
2019-01-07 17:17:36 -05:00
monitor )
} }
}
2019-07-22 17:28:49 -04:00
macro_rules ! reload_node {
( $ser : expr , $node_id : expr , $old_monitors : expr ) = > { {
2019-11-25 16:26:31 -05:00
let logger : Arc < dyn Logger > = Arc ::new ( test_logger ::TestLogger ::new ( $node_id . to_string ( ) ) ) ;
2019-07-22 17:28:49 -04:00
let watch = Arc ::new ( ChainWatchInterfaceUtil ::new ( Network ::Bitcoin , Arc ::clone ( & logger ) ) ) ;
let monitor = Arc ::new ( TestChannelMonitor ::new ( watch . clone ( ) , broadcast . clone ( ) , logger . clone ( ) , fee_est . clone ( ) ) ) ;
let keys_manager = Arc ::new ( KeyProvider { node_id : $node_id , session_id : atomic ::AtomicU8 ::new ( 0 ) , channel_id : atomic ::AtomicU8 ::new ( 0 ) } ) ;
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
2019-07-22 17:28:49 -04:00
config . channel_options . fee_proportional_millionths = 0 ;
config . channel_options . announced_channel = true ;
config . peer_channel_config_limits . min_dust_limit_satoshis = 0 ;
let mut monitors = HashMap ::new ( ) ;
let mut old_monitors = $old_monitors . latest_good_update . lock ( ) . unwrap ( ) ;
for ( outpoint , monitor_ser ) in old_monitors . drain ( ) {
2020-02-04 09:15:59 -08:00
monitors . insert ( outpoint , < ( Sha256d , ChannelMonitor < EnforcingChannelKeys > ) > ::read ( & mut Cursor ::new ( & monitor_ser ) , Arc ::clone ( & logger ) ) . expect ( " Failed to read monitor " ) . 1 ) ;
2019-07-22 17:28:49 -04:00
monitor . latest_good_update . lock ( ) . unwrap ( ) . insert ( outpoint , monitor_ser ) ;
}
let mut monitor_refs = HashMap ::new ( ) ;
2019-12-13 01:58:08 -05:00
for ( outpoint , monitor ) in monitors . iter_mut ( ) {
2019-07-22 17:28:49 -04:00
monitor_refs . insert ( * outpoint , monitor ) ;
}
let read_args = ChannelManagerReadArgs {
keys_manager ,
fee_estimator : fee_est . clone ( ) ,
2020-01-16 13:54:38 -05:00
monitor : monitor . clone ( ) ,
2019-07-22 17:28:49 -04:00
tx_broadcaster : broadcast . clone ( ) ,
logger ,
default_config : config ,
2019-12-13 01:58:08 -05:00
channel_monitors : & mut monitor_refs ,
2019-07-22 17:28:49 -04:00
} ;
2020-01-16 13:54:38 -05:00
let res = ( < ( Sha256d , ChannelManager < EnforcingChannelKeys , Arc < TestChannelMonitor > > ) > ::read ( & mut Cursor ::new ( & $ser . 0 ) , read_args ) . expect ( " Failed to read manager " ) . 1 , monitor ) ;
2019-07-22 17:28:49 -04:00
for ( _ , was_good ) in $old_monitors . latest_updates_good_at_last_ser . lock ( ) . unwrap ( ) . iter ( ) {
if ! was_good {
// If the last time we updated a monitor we didn't successfully update (and we
// have sense updated our serialized copy of the ChannelManager) we may
// force-close the channel on our counterparty cause we know we're missing
// something. Thus, we just return here since we can't continue to test.
return ;
}
}
res
} }
}
2019-01-07 17:17:36 -05:00
let mut channel_txn = Vec ::new ( ) ;
macro_rules ! make_channel {
( $source : expr , $dest : expr , $chan_id : expr ) = > { {
$source . create_channel ( $dest . get_our_node_id ( ) , 10000000 , 42 , 0 ) . unwrap ( ) ;
let open_channel = {
let events = $source . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendOpenChannel { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2020-01-06 17:54:02 -05:00
$dest . handle_open_channel ( & $source . get_our_node_id ( ) , InitFeatures ::supported ( ) , & open_channel ) ;
2019-01-07 17:17:36 -05:00
let accept_channel = {
let events = $dest . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendAcceptChannel { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2020-01-06 17:54:02 -05:00
$source . handle_accept_channel ( & $dest . get_our_node_id ( ) , InitFeatures ::supported ( ) , & accept_channel ) ;
2019-01-07 17:17:36 -05:00
{
let events = $source . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::FundingGenerationReady { ref temporary_channel_id , ref channel_value_satoshis , ref output_script , .. } = events [ 0 ] {
let tx = Transaction { version : $chan_id , lock_time : 0 , input : Vec ::new ( ) , output : vec ! [ TxOut {
value : * channel_value_satoshis , script_pubkey : output_script . clone ( ) ,
} ] } ;
let funding_output = OutPoint ::new ( tx . txid ( ) , 0 ) ;
$source . funding_transaction_generated ( & temporary_channel_id , funding_output ) ;
channel_txn . push ( tx ) ;
} else { panic! ( " Wrong event type " ) ; }
}
let funding_created = {
let events = $source . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendFundingCreated { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2019-11-05 18:51:05 -05:00
$dest . handle_funding_created ( & $source . get_our_node_id ( ) , & funding_created ) ;
2019-01-07 17:17:36 -05:00
let funding_signed = {
let events = $dest . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendFundingSigned { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2019-11-05 18:51:05 -05:00
$source . handle_funding_signed ( & $dest . get_our_node_id ( ) , & funding_signed ) ;
2019-01-07 17:17:36 -05:00
{
let events = $source . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::FundingBroadcastSafe { .. } = events [ 0 ] {
} else { panic! ( " Wrong event type " ) ; }
}
} }
}
macro_rules ! confirm_txn {
( $node : expr ) = > { {
let mut header = BlockHeader { version : 0x20000000 , prev_blockhash : Default ::default ( ) , merkle_root : Default ::default ( ) , time : 42 , bits : 42 , nonce : 42 } ;
let mut txn = Vec ::with_capacity ( channel_txn . len ( ) ) ;
let mut posn = Vec ::with_capacity ( channel_txn . len ( ) ) ;
for i in 0 .. channel_txn . len ( ) {
txn . push ( & channel_txn [ i ] ) ;
posn . push ( i as u32 + 1 ) ;
}
$node . block_connected ( & header , 1 , & txn , & posn ) ;
for i in 2 .. 100 {
header = BlockHeader { version : 0x20000000 , prev_blockhash : header . bitcoin_hash ( ) , merkle_root : Default ::default ( ) , time : 42 , bits : 42 , nonce : 42 } ;
$node . block_connected ( & header , i , & Vec ::new ( ) , & [ 0 ; 0 ] ) ;
}
} }
}
macro_rules ! lock_fundings {
( $nodes : expr ) = > { {
let mut node_events = Vec ::new ( ) ;
for node in $nodes . iter ( ) {
node_events . push ( node . get_and_clear_pending_msg_events ( ) ) ;
}
for ( idx , node_event ) in node_events . iter ( ) . enumerate ( ) {
for event in node_event {
if let events ::MessageSendEvent ::SendFundingLocked { ref node_id , ref msg } = event {
for node in $nodes . iter ( ) {
if node . get_our_node_id ( ) = = * node_id {
2019-11-05 18:51:05 -05:00
node . handle_funding_locked ( & $nodes [ idx ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} else { panic! ( " Wrong event type " ) ; }
}
}
for node in $nodes . iter ( ) {
let events = node . get_and_clear_pending_msg_events ( ) ;
for event in events {
if let events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = event {
} else { panic! ( " Wrong event type " ) ; }
}
}
} }
}
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
// forwarding.
2019-07-22 17:28:49 -04:00
let ( mut node_a , mut monitor_a ) = make_node! ( 0 ) ;
let ( mut node_b , mut monitor_b ) = make_node! ( 1 ) ;
let ( mut node_c , mut monitor_c ) = make_node! ( 2 ) ;
2019-01-07 17:17:36 -05:00
2019-07-22 17:28:49 -04:00
let mut nodes = [ node_a , node_b , node_c ] ;
2019-01-07 17:17:36 -05:00
make_channel! ( nodes [ 0 ] , nodes [ 1 ] , 0 ) ;
make_channel! ( nodes [ 1 ] , nodes [ 2 ] , 1 ) ;
for node in nodes . iter ( ) {
confirm_txn! ( node ) ;
}
lock_fundings! ( nodes ) ;
let chan_a = nodes [ 0 ] . list_usable_channels ( ) [ 0 ] . short_channel_id . unwrap ( ) ;
let chan_b = nodes [ 2 ] . list_usable_channels ( ) [ 0 ] . short_channel_id . unwrap ( ) ;
let mut payment_id = 0 ;
let mut chan_a_disconnected = false ;
let mut chan_b_disconnected = false ;
2019-07-23 15:39:11 -04:00
let mut ba_events = Vec ::new ( ) ;
let mut bc_events = Vec ::new ( ) ;
2019-01-07 17:17:36 -05:00
2019-07-22 17:28:49 -04:00
let mut node_a_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
let mut node_b_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 1 ] . write ( & mut node_b_ser ) . unwrap ( ) ;
let mut node_c_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
macro_rules ! test_return {
( ) = > { {
assert_eq! ( nodes [ 0 ] . list_channels ( ) . len ( ) , 1 ) ;
assert_eq! ( nodes [ 1 ] . list_channels ( ) . len ( ) , 2 ) ;
assert_eq! ( nodes [ 2 ] . list_channels ( ) . len ( ) , 1 ) ;
return ;
} }
}
let mut read_pos = 0 ;
macro_rules ! get_slice {
( $len : expr ) = > {
{
let slice_len = $len as usize ;
if data . len ( ) < read_pos + slice_len {
test_return! ( ) ;
}
read_pos + = slice_len ;
& data [ read_pos - slice_len .. read_pos ]
}
}
}
loop {
macro_rules ! send_payment {
( $source : expr , $dest : expr ) = > { {
let payment_hash = Sha256 ::hash ( & [ payment_id ; 1 ] ) ;
payment_id = payment_id . wrapping_add ( 1 ) ;
if let Err ( _ ) = $source . send_payment ( Route {
hops : vec ! [ RouteHop {
pubkey : $dest . 0. get_our_node_id ( ) ,
2019-12-28 01:10:14 -05:00
node_features : NodeFeatures ::empty ( ) ,
2019-01-07 17:17:36 -05:00
short_channel_id : $dest . 1 ,
2019-12-28 01:10:14 -05:00
channel_features : ChannelFeatures ::empty ( ) ,
2019-01-07 17:17:36 -05:00
fee_msat : 5000000 ,
cltv_expiry_delta : 200 ,
} ] ,
} , PaymentHash ( payment_hash . into_inner ( ) ) ) {
// Probably ran out of funds
test_return! ( ) ;
}
} } ;
( $source : expr , $middle : expr , $dest : expr ) = > { {
let payment_hash = Sha256 ::hash ( & [ payment_id ; 1 ] ) ;
payment_id = payment_id . wrapping_add ( 1 ) ;
if let Err ( _ ) = $source . send_payment ( Route {
hops : vec ! [ RouteHop {
pubkey : $middle . 0. get_our_node_id ( ) ,
2019-12-28 01:10:14 -05:00
node_features : NodeFeatures ::empty ( ) ,
2019-01-07 17:17:36 -05:00
short_channel_id : $middle . 1 ,
2019-12-28 01:10:14 -05:00
channel_features : ChannelFeatures ::empty ( ) ,
2019-01-07 17:17:36 -05:00
fee_msat : 50000 ,
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : $dest . 0. get_our_node_id ( ) ,
2019-12-28 01:10:14 -05:00
node_features : NodeFeatures ::empty ( ) ,
2019-01-07 17:17:36 -05:00
short_channel_id : $dest . 1 ,
2019-12-28 01:10:14 -05:00
channel_features : ChannelFeatures ::empty ( ) ,
2019-01-07 17:17:36 -05:00
fee_msat : 5000000 ,
cltv_expiry_delta : 200 ,
} ] ,
} , PaymentHash ( payment_hash . into_inner ( ) ) ) {
// Probably ran out of funds
test_return! ( ) ;
}
} }
}
macro_rules ! process_msg_events {
( $node : expr , $corrupt_forward : expr ) = > { {
2019-07-23 15:39:11 -04:00
let events = if $node = = 1 {
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut ba_events ) ;
new_events . extend_from_slice ( & bc_events [ .. ] ) ;
bc_events . clear ( ) ;
new_events
} else { Vec ::new ( ) } ;
for event in events . iter ( ) . chain ( nodes [ $node ] . get_and_clear_pending_msg_events ( ) . iter ( ) ) {
2019-01-07 17:17:36 -05:00
match event {
events ::MessageSendEvent ::UpdateHTLCs { ref node_id , updates : CommitmentUpdate { ref update_add_htlcs , ref update_fail_htlcs , ref update_fulfill_htlcs , ref update_fail_malformed_htlcs , ref update_fee , ref commitment_signed } } = > {
2019-07-23 15:39:11 -04:00
for dest in nodes . iter ( ) {
if dest . get_our_node_id ( ) = = * node_id {
2019-01-07 17:17:36 -05:00
assert! ( update_fee . is_none ( ) ) ;
for update_add in update_add_htlcs {
if ! $corrupt_forward {
2019-11-05 18:51:05 -05:00
dest . handle_update_add_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & update_add ) ;
2019-01-07 17:17:36 -05:00
} else {
// Corrupt the update_add_htlc message so that its HMAC
// check will fail and we generate a
// update_fail_malformed_htlc instead of an
// update_fail_htlc as we do when we reject a payment.
let mut msg_ser = update_add . encode ( ) ;
msg_ser [ 1000 ] ^ = 0xff ;
let new_msg = UpdateAddHTLC ::read ( & mut Cursor ::new ( & msg_ser ) ) . unwrap ( ) ;
2019-11-05 18:51:05 -05:00
dest . handle_update_add_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & new_msg ) ;
2019-01-07 17:17:36 -05:00
}
}
for update_fulfill in update_fulfill_htlcs {
2019-11-05 18:51:05 -05:00
dest . handle_update_fulfill_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & update_fulfill ) ;
2019-01-07 17:17:36 -05:00
}
for update_fail in update_fail_htlcs {
2019-11-05 18:51:05 -05:00
dest . handle_update_fail_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & update_fail ) ;
2019-01-07 17:17:36 -05:00
}
for update_fail_malformed in update_fail_malformed_htlcs {
2019-11-05 18:51:05 -05:00
dest . handle_update_fail_malformed_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & update_fail_malformed ) ;
2019-01-07 17:17:36 -05:00
}
2019-11-05 18:51:05 -05:00
dest . handle_commitment_signed ( & nodes [ $node ] . get_our_node_id ( ) , & commitment_signed ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , ref msg } = > {
2019-07-23 15:39:11 -04:00
for dest in nodes . iter ( ) {
if dest . get_our_node_id ( ) = = * node_id {
2019-11-05 18:51:05 -05:00
dest . handle_revoke_and_ack ( & nodes [ $node ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendChannelReestablish { ref node_id , ref msg } = > {
2019-07-23 15:39:11 -04:00
for dest in nodes . iter ( ) {
2019-01-07 17:17:36 -05:00
if dest . get_our_node_id ( ) = = * node_id {
2019-11-05 18:51:05 -05:00
dest . handle_channel_reestablish ( & nodes [ $node ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > {
// Can be generated as a reestablish response
} ,
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > {
// Can be generated due to a payment forward being rejected due to a
// channel having previously failed a monitor update
} ,
2019-11-05 18:51:05 -05:00
events ::MessageSendEvent ::HandleError { action : ErrorAction ::IgnoreError , .. } = > {
// Can be generated at any processing step to send back an error, disconnect
// peer or just ignore
} ,
2019-01-07 17:17:36 -05:00
_ = > panic! ( " Unhandled message event " ) ,
}
}
} }
}
2019-07-23 15:39:11 -04:00
macro_rules ! drain_msg_events_on_disconnect {
( $counterparty_id : expr ) = > { {
if $counterparty_id = = 0 {
for event in nodes [ 0 ] . get_and_clear_pending_msg_events ( ) {
match event {
events ::MessageSendEvent ::UpdateHTLCs { .. } = > { } ,
events ::MessageSendEvent ::SendRevokeAndACK { .. } = > { } ,
events ::MessageSendEvent ::SendChannelReestablish { .. } = > { } ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > { } ,
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > { } ,
2019-11-05 18:51:05 -05:00
events ::MessageSendEvent ::HandleError { action : ErrorAction ::IgnoreError , .. } = > { } ,
2019-07-23 15:39:11 -04:00
_ = > panic! ( " Unhandled message event " ) ,
}
}
ba_events . clear ( ) ;
} else {
for event in nodes [ 2 ] . get_and_clear_pending_msg_events ( ) {
match event {
events ::MessageSendEvent ::UpdateHTLCs { .. } = > { } ,
events ::MessageSendEvent ::SendRevokeAndACK { .. } = > { } ,
events ::MessageSendEvent ::SendChannelReestablish { .. } = > { } ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > { } ,
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > { } ,
2019-11-05 18:51:05 -05:00
events ::MessageSendEvent ::HandleError { action : ErrorAction ::IgnoreError , .. } = > { } ,
2019-07-23 15:39:11 -04:00
_ = > panic! ( " Unhandled message event " ) ,
}
}
bc_events . clear ( ) ;
}
let mut events = nodes [ 1 ] . get_and_clear_pending_msg_events ( ) ;
let drop_node_id = if $counterparty_id = = 0 { nodes [ 0 ] . get_our_node_id ( ) } else { nodes [ 2 ] . get_our_node_id ( ) } ;
let msg_sink = if $counterparty_id = = 0 { & mut bc_events } else { & mut ba_events } ;
for event in events . drain ( .. ) {
let push = match event {
events ::MessageSendEvent ::UpdateHTLCs { ref node_id , .. } = > {
if * node_id ! = drop_node_id { true } else { false }
} ,
events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , .. } = > {
if * node_id ! = drop_node_id { true } else { false }
} ,
events ::MessageSendEvent ::SendChannelReestablish { ref node_id , .. } = > {
if * node_id ! = drop_node_id { true } else { false }
} ,
events ::MessageSendEvent ::SendFundingLocked { .. } = > false ,
events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > false ,
2019-11-05 18:51:05 -05:00
events ::MessageSendEvent ::HandleError { action : ErrorAction ::IgnoreError , .. } = > false ,
2019-07-23 15:39:11 -04:00
_ = > panic! ( " Unhandled message event " ) ,
} ;
if push { msg_sink . push ( event ) ; }
}
} }
}
2019-01-07 17:17:36 -05:00
macro_rules ! process_events {
( $node : expr , $fail : expr ) = > { {
2019-03-07 13:09:59 -05:00
// In case we get 256 payments we may have a hash collision, resulting in the
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
// deduplicate the calls here.
let mut claim_set = HashSet ::new ( ) ;
let mut events = nodes [ $node ] . get_and_clear_pending_events ( ) ;
// Sort events so that PendingHTLCsForwardable get processed last. This avoids a
// case where we first process a PendingHTLCsForwardable, then claim/fail on a
// PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
// PaymentReceived event for the second HTLC in our pending_events (and breaking
// our claim_set deduplication).
events . sort_by ( | a , b | {
if let events ::Event ::PaymentReceived { .. } = a {
if let events ::Event ::PendingHTLCsForwardable { .. } = b {
Ordering ::Less
} else { Ordering ::Equal }
} else if let events ::Event ::PendingHTLCsForwardable { .. } = a {
if let events ::Event ::PaymentReceived { .. } = b {
Ordering ::Greater
} else { Ordering ::Equal }
} else { Ordering ::Equal }
} ) ;
for event in events . drain ( .. ) {
2019-01-07 17:17:36 -05:00
match event {
events ::Event ::PaymentReceived { payment_hash , .. } = > {
2019-03-07 13:09:59 -05:00
if claim_set . insert ( payment_hash . 0 ) {
if $fail {
assert! ( nodes [ $node ] . fail_htlc_backwards ( & payment_hash ) ) ;
} else {
2019-11-14 18:50:24 -05:00
assert! ( nodes [ $node ] . claim_funds ( PaymentPreimage ( payment_hash . 0 ) , 5_000_000 ) ) ;
2019-03-07 13:09:59 -05:00
}
2019-01-07 17:17:36 -05:00
}
} ,
events ::Event ::PaymentSent { .. } = > { } ,
events ::Event ::PaymentFailed { .. } = > { } ,
events ::Event ::PendingHTLCsForwardable { .. } = > {
nodes [ $node ] . process_pending_htlc_forwards ( ) ;
} ,
_ = > panic! ( " Unhandled event " ) ,
}
}
} }
}
match get_slice! ( 1 ) [ 0 ] {
0x00 = > * monitor_a . update_ret . lock ( ) . unwrap ( ) = Err ( ChannelMonitorUpdateErr ::TemporaryFailure ) ,
0x01 = > * monitor_b . update_ret . lock ( ) . unwrap ( ) = Err ( ChannelMonitorUpdateErr ::TemporaryFailure ) ,
0x02 = > * monitor_c . update_ret . lock ( ) . unwrap ( ) = Err ( ChannelMonitorUpdateErr ::TemporaryFailure ) ,
0x03 = > * monitor_a . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ,
0x04 = > * monitor_b . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ,
0x05 = > * monitor_c . update_ret . lock ( ) . unwrap ( ) = Ok ( ( ) ) ,
2019-07-25 11:07:45 -04:00
0x06 = > { unsafe { IN_RESTORE = true } ; nodes [ 0 ] . test_restore_channel_monitor ( ) ; unsafe { IN_RESTORE = false } ; } ,
0x07 = > { unsafe { IN_RESTORE = true } ; nodes [ 1 ] . test_restore_channel_monitor ( ) ; unsafe { IN_RESTORE = false } ; } ,
0x08 = > { unsafe { IN_RESTORE = true } ; nodes [ 2 ] . test_restore_channel_monitor ( ) ; unsafe { IN_RESTORE = false } ; } ,
2019-01-07 17:17:36 -05:00
0x09 = > send_payment! ( nodes [ 0 ] , ( & nodes [ 1 ] , chan_a ) ) ,
0x0a = > send_payment! ( nodes [ 1 ] , ( & nodes [ 0 ] , chan_a ) ) ,
0x0b = > send_payment! ( nodes [ 1 ] , ( & nodes [ 2 ] , chan_b ) ) ,
0x0c = > send_payment! ( nodes [ 2 ] , ( & nodes [ 1 ] , chan_b ) ) ,
0x0d = > send_payment! ( nodes [ 0 ] , ( & nodes [ 1 ] , chan_a ) , ( & nodes [ 2 ] , chan_b ) ) ,
0x0e = > send_payment! ( nodes [ 2 ] , ( & nodes [ 1 ] , chan_b ) , ( & nodes [ 0 ] , chan_a ) ) ,
0x0f = > {
if ! chan_a_disconnected {
nodes [ 0 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
nodes [ 1 ] . peer_disconnected ( & nodes [ 0 ] . get_our_node_id ( ) , false ) ;
chan_a_disconnected = true ;
2019-07-23 15:39:11 -04:00
drain_msg_events_on_disconnect! ( 0 ) ;
2019-01-07 17:17:36 -05:00
}
} ,
0x10 = > {
if ! chan_b_disconnected {
nodes [ 1 ] . peer_disconnected ( & nodes [ 2 ] . get_our_node_id ( ) , false ) ;
nodes [ 2 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
chan_b_disconnected = true ;
2019-07-23 15:39:11 -04:00
drain_msg_events_on_disconnect! ( 2 ) ;
2019-01-07 17:17:36 -05:00
}
} ,
0x11 = > {
2019-07-23 15:39:11 -04:00
if chan_a_disconnected {
2019-12-27 22:50:42 -05:00
nodes [ 0 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::empty ( ) } ) ;
nodes [ 1 ] . peer_connected ( & nodes [ 0 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::empty ( ) } ) ;
2019-07-23 15:39:11 -04:00
chan_a_disconnected = false ;
2019-01-07 17:17:36 -05:00
}
} ,
0x12 = > {
2019-07-23 15:39:11 -04:00
if chan_b_disconnected {
2019-12-27 22:50:42 -05:00
nodes [ 1 ] . peer_connected ( & nodes [ 2 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::empty ( ) } ) ;
nodes [ 2 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init { features : InitFeatures ::empty ( ) } ) ;
2019-07-23 15:39:11 -04:00
chan_b_disconnected = false ;
2019-01-07 17:17:36 -05:00
}
} ,
0x13 = > process_msg_events! ( 0 , true ) ,
0x14 = > process_msg_events! ( 0 , false ) ,
0x15 = > process_events! ( 0 , true ) ,
0x16 = > process_events! ( 0 , false ) ,
0x17 = > process_msg_events! ( 1 , true ) ,
0x18 = > process_msg_events! ( 1 , false ) ,
0x19 = > process_events! ( 1 , true ) ,
0x1a = > process_events! ( 1 , false ) ,
0x1b = > process_msg_events! ( 2 , true ) ,
0x1c = > process_msg_events! ( 2 , false ) ,
0x1d = > process_events! ( 2 , true ) ,
0x1e = > process_events! ( 2 , false ) ,
2019-07-22 17:28:49 -04:00
0x1f = > {
if ! chan_a_disconnected {
nodes [ 1 ] . peer_disconnected ( & nodes [ 0 ] . get_our_node_id ( ) , false ) ;
chan_a_disconnected = true ;
drain_msg_events_on_disconnect! ( 0 ) ;
}
let ( new_node_a , new_monitor_a ) = reload_node! ( node_a_ser , 0 , monitor_a ) ;
node_a = Arc ::new ( new_node_a ) ;
nodes [ 0 ] = node_a . clone ( ) ;
monitor_a = new_monitor_a ;
} ,
0x20 = > {
if ! chan_a_disconnected {
nodes [ 0 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
chan_a_disconnected = true ;
nodes [ 0 ] . get_and_clear_pending_msg_events ( ) ;
ba_events . clear ( ) ;
}
if ! chan_b_disconnected {
nodes [ 2 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) , false ) ;
chan_b_disconnected = true ;
nodes [ 2 ] . get_and_clear_pending_msg_events ( ) ;
bc_events . clear ( ) ;
}
let ( new_node_b , new_monitor_b ) = reload_node! ( node_b_ser , 1 , monitor_b ) ;
node_b = Arc ::new ( new_node_b ) ;
nodes [ 1 ] = node_b . clone ( ) ;
monitor_b = new_monitor_b ;
} ,
0x21 = > {
if ! chan_b_disconnected {
nodes [ 1 ] . peer_disconnected ( & nodes [ 2 ] . get_our_node_id ( ) , false ) ;
chan_b_disconnected = true ;
drain_msg_events_on_disconnect! ( 2 ) ;
}
let ( new_node_c , new_monitor_c ) = reload_node! ( node_c_ser , 2 , monitor_c ) ;
node_c = Arc ::new ( new_node_c ) ;
nodes [ 2 ] = node_c . clone ( ) ;
monitor_c = new_monitor_c ;
} ,
2019-01-07 17:17:36 -05:00
_ = > test_return! ( ) ,
}
2019-07-22 17:28:49 -04:00
if monitor_a . should_update_manager . load ( atomic ::Ordering ::Relaxed ) {
node_a_ser . 0. clear ( ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
monitor_a . should_update_manager . store ( false , atomic ::Ordering ::Relaxed ) ;
* monitor_a . latest_updates_good_at_last_ser . lock ( ) . unwrap ( ) = monitor_a . latest_update_good . lock ( ) . unwrap ( ) . clone ( ) ;
}
if monitor_b . should_update_manager . load ( atomic ::Ordering ::Relaxed ) {
node_b_ser . 0. clear ( ) ;
nodes [ 1 ] . write ( & mut node_b_ser ) . unwrap ( ) ;
monitor_b . should_update_manager . store ( false , atomic ::Ordering ::Relaxed ) ;
* monitor_b . latest_updates_good_at_last_ser . lock ( ) . unwrap ( ) = monitor_b . latest_update_good . lock ( ) . unwrap ( ) . clone ( ) ;
}
if monitor_c . should_update_manager . load ( atomic ::Ordering ::Relaxed ) {
node_c_ser . 0. clear ( ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
monitor_c . should_update_manager . store ( false , atomic ::Ordering ::Relaxed ) ;
* monitor_c . latest_updates_good_at_last_ser . lock ( ) . unwrap ( ) = monitor_c . latest_update_good . lock ( ) . unwrap ( ) . clone ( ) ;
}
2019-01-07 17:17:36 -05:00
}
}
2019-12-11 13:18:43 -05:00
#[ no_mangle ]
pub extern " C " fn chanmon_consistency_run ( data : * const u8 , datalen : usize ) {
do_test ( unsafe { std ::slice ::from_raw_parts ( data , datalen ) } ) ;
2019-01-07 17:17:36 -05:00
}