2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2019-01-07 17:17:36 -05:00
//! Test that monitor update failures don't get our channel state out of sync.
//! One of the biggest concern with the monitor update failure handling code is that messages
//! resent after monitor updating is restored are delivered out-of-order, resulting in
//! commitment_signed messages having "invalid signatures".
//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
//! actions such as sending payments, handling events, or changing monitor update return values on
//! a per-node basis. This should allow it to find any cases where the ordering of actions results
//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
//! send-side handling is correct, other peers. We consider it a failure if any action results in a
//! channel being force-closed.
2021-03-03 11:24:55 -08:00
use bitcoin ::blockdata ::constants ::genesis_block ;
2019-01-07 17:17:36 -05:00
use bitcoin ::blockdata ::transaction ::{ Transaction , TxOut } ;
use bitcoin ::blockdata ::script ::{ Builder , Script } ;
use bitcoin ::blockdata ::opcodes ;
2022-08-09 17:39:51 +02:00
use bitcoin ::blockdata ::locktime ::PackedLockTime ;
2019-01-07 17:17:36 -05:00
use bitcoin ::network ::constants ::Network ;
2020-04-27 16:41:54 +02:00
use bitcoin ::hashes ::Hash as TraitImport ;
use bitcoin ::hashes ::sha256 ::Hash as Sha256 ;
2023-01-18 13:03:06 -08:00
use bitcoin ::hashes ::sha256d ::Hash as Sha256dHash ;
2020-04-27 18:13:27 +02:00
use bitcoin ::hash_types ::{ BlockHash , WPubkeyHash } ;
2019-01-07 17:17:36 -05:00
2020-07-20 17:03:52 -07:00
use lightning ::chain ;
2022-07-18 01:32:27 +00:00
use lightning ::chain ::{ BestBlock , ChannelMonitorUpdateStatus , chainmonitor , channelmonitor , Confirm , Watch } ;
2021-10-05 17:59:13 +00:00
use lightning ::chain ::channelmonitor ::{ ChannelMonitor , MonitorEvent } ;
2019-01-07 17:17:36 -05:00
use lightning ::chain ::transaction ::OutPoint ;
2020-07-29 13:02:29 -07:00
use lightning ::chain ::chaininterface ::{ BroadcasterInterface , ConfirmationTarget , FeeEstimator } ;
2023-04-28 14:11:37 -05:00
use lightning ::sign ::{ KeyMaterial , InMemorySigner , Recipient , EntropySource , NodeSigner , SignerProvider } ;
2023-03-07 13:57:01 -08:00
use lightning ::events ;
use lightning ::events ::MessageSendEventsProvider ;
2021-04-28 17:28:10 -04:00
use lightning ::ln ::{ PaymentHash , PaymentPreimage , PaymentSecret } ;
2023-03-22 21:48:22 +00:00
use lightning ::ln ::channelmanager ::{ ChainParameters , ChannelDetails , ChannelManager , PaymentSendFailure , ChannelManagerReadArgs , PaymentId , RecipientOnionFields } ;
2021-06-30 03:09:04 +00:00
use lightning ::ln ::channel ::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE ;
2022-10-28 11:31:24 -04:00
use lightning ::ln ::msgs ::{ self , CommitmentUpdate , ChannelMessageHandler , DecodeError , UpdateAddHTLC , Init } ;
2021-07-26 12:31:24 -04:00
use lightning ::ln ::script ::ShutdownScript ;
2023-04-27 13:31:04 -07:00
use lightning ::ln ::functional_test_utils ::* ;
2023-02-27 12:10:32 -06:00
use lightning ::offers ::invoice ::UnsignedBolt12Invoice ;
use lightning ::offers ::invoice_request ::UnsignedInvoiceRequest ;
2023-08-28 09:37:33 -07:00
use lightning ::util ::test_channel_signer ::{ TestChannelSigner , EnforcementState } ;
2020-11-17 15:22:59 -05:00
use lightning ::util ::errors ::APIError ;
2019-01-07 17:17:36 -05:00
use lightning ::util ::logger ::Logger ;
use lightning ::util ::config ::UserConfig ;
2019-07-22 17:28:49 -04:00
use lightning ::util ::ser ::{ Readable , ReadableArgs , Writeable , Writer } ;
2023-04-09 13:50:44 -04:00
use lightning ::routing ::router ::{ InFlightHtlcs , Path , Route , RouteHop , RouteParameters , Router } ;
2020-05-02 09:37:38 -04:00
2022-10-13 02:35:48 -04:00
use crate ::utils ::test_logger ::{ self , Output } ;
use crate ::utils ::test_persister ::TestPersister ;
2019-01-07 17:17:36 -05:00
2023-01-18 13:03:06 -08:00
use bitcoin ::secp256k1 ::{ Message , PublicKey , SecretKey , Scalar , Secp256k1 } ;
2022-07-11 16:27:10 -04:00
use bitcoin ::secp256k1 ::ecdh ::SharedSecret ;
2023-01-18 13:03:06 -08:00
use bitcoin ::secp256k1 ::ecdsa ::{ RecoverableSignature , Signature } ;
2023-02-27 12:10:32 -06:00
use bitcoin ::secp256k1 ::schnorr ;
2019-01-07 17:17:36 -05:00
2019-07-23 15:39:11 -04:00
use std ::mem ;
2021-06-30 03:09:04 +00:00
use std ::cmp ::{ self , Ordering } ;
2023-01-06 19:53:47 +00:00
use hashbrown ::{ HashSet , hash_map , HashMap } ;
2019-01-07 17:17:36 -05:00
use std ::sync ::{ Arc , Mutex } ;
2019-07-18 22:17:36 -04:00
use std ::sync ::atomic ;
2019-01-07 17:17:36 -05:00
use std ::io ::Cursor ;
2022-01-21 11:33:39 +01:00
use bitcoin ::bech32 ::u5 ;
2019-01-07 17:17:36 -05:00
2021-06-30 03:09:04 +00:00
const MAX_FEE : u32 = 10_000 ;
struct FuzzEstimator {
ret_val : atomic ::AtomicU32 ,
}
2019-01-07 17:17:36 -05:00
impl FeeEstimator for FuzzEstimator {
2021-06-30 03:09:04 +00:00
fn get_est_sat_per_1000_weight ( & self , conf_target : ConfirmationTarget ) -> u32 {
// We force-close channels if our counterparty sends us a feerate which is a small multiple
// of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we
// always return a HighPriority feerate here which is >= the maximum Normal feerate and a
// Background feerate which is <= the minimum Normal feerate.
match conf_target {
2023-10-12 15:43:30 -05:00
ConfirmationTarget ::MaxAllowedNonAnchorChannelRemoteFee = > MAX_FEE * 10 ,
ConfirmationTarget ::OnChainSweep = > MAX_FEE ,
ConfirmationTarget ::ChannelCloseMinimum | ConfirmationTarget ::AnchorChannelFee | ConfirmationTarget ::MinAllowedAnchorChannelRemoteFee | ConfirmationTarget ::MinAllowedNonAnchorChannelRemoteFee = > 253 ,
ConfirmationTarget ::NonAnchorChannelFee = > cmp ::min ( self . ret_val . load ( atomic ::Ordering ::Acquire ) , MAX_FEE ) ,
2021-06-30 03:09:04 +00:00
}
2019-01-07 17:17:36 -05:00
}
}
2022-10-28 11:31:24 -04:00
struct FuzzRouter { }
impl Router for FuzzRouter {
fn find_route (
& self , _payer : & PublicKey , _params : & RouteParameters , _first_hops : Option < & [ & ChannelDetails ] > ,
2023-07-18 19:41:07 +00:00
_inflight_htlcs : InFlightHtlcs
2022-10-28 11:31:24 -04:00
) -> Result < Route , msgs ::LightningError > {
Err ( msgs ::LightningError {
err : String ::from ( " Not implemented " ) ,
action : msgs ::ErrorAction ::IgnoreError
} )
}
}
2019-01-07 17:17:36 -05:00
pub struct TestBroadcaster { }
impl BroadcasterInterface for TestBroadcaster {
2023-05-05 08:29:52 -05:00
fn broadcast_transactions ( & self , _txs : & [ & Transaction ] ) { }
2019-01-07 17:17:36 -05:00
}
2019-07-22 17:28:49 -04:00
pub struct VecWriter ( pub Vec < u8 > ) ;
impl Writer for VecWriter {
fn write_all ( & mut self , buf : & [ u8 ] ) -> Result < ( ) , ::std ::io ::Error > {
self . 0. extend_from_slice ( buf ) ;
Ok ( ( ) )
}
}
2020-07-20 22:12:14 -07:00
struct TestChainMonitor {
2020-02-11 18:34:29 -05:00
pub logger : Arc < dyn Logger > ,
2021-02-09 15:22:44 -05:00
pub keys : Arc < KeyProvider > ,
2021-10-09 00:23:44 +00:00
pub persister : Arc < TestPersister > ,
2023-08-28 09:37:33 -07:00
pub chain_monitor : Arc < chainmonitor ::ChainMonitor < TestChannelSigner , Arc < dyn chain ::Filter > , Arc < TestBroadcaster > , Arc < FuzzEstimator > , Arc < dyn Logger > , Arc < TestPersister > > > ,
2020-02-11 18:34:29 -05:00
// If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
// logic will automatically force-close our channels for us (as we don't have an up-to-date
// monitor implying we are not able to punish misbehaving counterparties). Because this test
// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
// fully-serialized monitor state here, as well as the corresponding update_id.
pub latest_monitors : Mutex < HashMap < OutPoint , ( u64 , Vec < u8 > ) > > ,
2019-01-07 17:17:36 -05:00
}
2020-07-20 22:12:14 -07:00
impl TestChainMonitor {
2021-02-09 15:22:44 -05:00
pub fn new ( broadcaster : Arc < TestBroadcaster > , logger : Arc < dyn Logger > , feeest : Arc < FuzzEstimator > , persister : Arc < TestPersister > , keys : Arc < KeyProvider > ) -> Self {
2019-01-07 17:17:36 -05:00
Self {
2021-10-09 00:23:44 +00:00
chain_monitor : Arc ::new ( chainmonitor ::ChainMonitor ::new ( None , broadcaster , logger . clone ( ) , feeest , Arc ::clone ( & persister ) ) ) ,
2020-02-11 18:34:29 -05:00
logger ,
2021-02-09 15:22:44 -05:00
keys ,
2021-10-09 00:23:44 +00:00
persister ,
2020-02-11 18:34:29 -05:00
latest_monitors : Mutex ::new ( HashMap ::new ( ) ) ,
2019-01-07 17:17:36 -05:00
}
}
}
2023-08-28 09:37:33 -07:00
impl chain ::Watch < TestChannelSigner > for TestChainMonitor {
Drop the `ChannelMonitorUpdateStatus::PermanentFailure` variant
When a `ChannelMonitorUpdate` fails to apply, it generally means
we cannot reach our storage backend. This, in general, is a
critical issue, but is often only a transient issue.
Sadly, users see the failure variant and return it on any I/O
error, resulting in channel force-closures due to transient issues.
Users don't generally expect force-closes in most cases, and
luckily with async `ChannelMonitorUpdate`s supported we don't take
any risk by "delaying" the `ChannelMonitorUpdate` indefinitely.
Thus, here we drop the `PermanentFailure` variant entirely, making
all failures instead be "the update is in progress, but won't ever
complete", which is equivalent if we do not close the channel
automatically.
2023-09-10 17:14:32 +00:00
fn watch_channel ( & self , funding_txo : OutPoint , monitor : channelmonitor ::ChannelMonitor < TestChannelSigner > ) -> Result < chain ::ChannelMonitorUpdateStatus , ( ) > {
2020-02-11 18:34:29 -05:00
let mut ser = VecWriter ( Vec ::new ( ) ) ;
2020-11-25 15:03:19 -05:00
monitor . write ( & mut ser ) . unwrap ( ) ;
2020-02-11 18:34:29 -05:00
if let Some ( _ ) = self . latest_monitors . lock ( ) . unwrap ( ) . insert ( funding_txo , ( monitor . get_latest_update_id ( ) , ser . 0 ) ) {
2020-07-20 17:03:52 -07:00
panic! ( " Already had monitor pre-watch_channel " ) ;
2019-07-22 17:28:49 -04:00
}
2021-10-09 00:23:44 +00:00
self . chain_monitor . watch_channel ( funding_txo , monitor )
2019-01-07 17:17:36 -05:00
}
2022-11-12 18:26:38 +00:00
fn update_channel ( & self , funding_txo : OutPoint , update : & channelmonitor ::ChannelMonitorUpdate ) -> chain ::ChannelMonitorUpdateStatus {
2020-02-11 18:34:29 -05:00
let mut map_lock = self . latest_monitors . lock ( ) . unwrap ( ) ;
let mut map_entry = match map_lock . entry ( funding_txo ) {
hash_map ::Entry ::Occupied ( entry ) = > entry ,
hash_map ::Entry ::Vacant ( _ ) = > panic! ( " Didn't have monitor on update call " ) ,
} ;
2023-08-28 09:37:33 -07:00
let deserialized_monitor = < ( BlockHash , channelmonitor ::ChannelMonitor < TestChannelSigner > ) > ::
2022-12-20 14:46:08 -08:00
read ( & mut Cursor ::new ( & map_entry . get ( ) . 1 ) , ( & * self . keys , & * self . keys ) ) . unwrap ( ) . 1 ;
2023-09-27 23:02:50 +00:00
deserialized_monitor . update_monitor ( update , & & TestBroadcaster { } , & & FuzzEstimator { ret_val : atomic ::AtomicU32 ::new ( 253 ) } , & self . logger ) . unwrap ( ) ;
2020-02-11 18:34:29 -05:00
let mut ser = VecWriter ( Vec ::new ( ) ) ;
2020-11-25 15:03:19 -05:00
deserialized_monitor . write ( & mut ser ) . unwrap ( ) ;
2020-02-11 18:34:29 -05:00
map_entry . insert ( ( update . update_id , ser . 0 ) ) ;
2021-10-09 00:23:44 +00:00
self . chain_monitor . update_channel ( funding_txo , update )
2020-02-05 19:39:31 -05:00
}
2022-07-25 11:28:51 -07:00
fn release_pending_monitor_events ( & self ) -> Vec < ( OutPoint , Vec < MonitorEvent > , Option < PublicKey > ) > {
2020-07-20 22:12:14 -07:00
return self . chain_monitor . release_pending_monitor_events ( ) ;
2019-01-07 17:17:36 -05:00
}
}
struct KeyProvider {
2023-01-18 13:03:06 -08:00
node_secret : SecretKey ,
2021-05-20 16:38:18 +00:00
rand_bytes_id : atomic ::AtomicU32 ,
2021-08-09 10:56:15 +02:00
enforcement_states : Mutex < HashMap < [ u8 ; 32 ] , Arc < Mutex < EnforcementState > > > > ,
2019-01-07 17:17:36 -05:00
}
2019-11-26 16:46:33 -05:00
2022-12-08 15:40:54 -08:00
impl EntropySource for KeyProvider {
fn get_secure_random_bytes ( & self ) -> [ u8 ; 32 ] {
let id = self . rand_bytes_id . fetch_add ( 1 , atomic ::Ordering ::Relaxed ) ;
2023-01-18 13:03:06 -08:00
let mut res = [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 11 , self . node_secret [ 31 ] ] ;
2022-12-08 15:40:54 -08:00
res [ 30 - 4 .. 30 ] . copy_from_slice ( & id . to_le_bytes ( ) ) ;
res
}
}
impl NodeSigner for KeyProvider {
fn get_node_id ( & self , recipient : Recipient ) -> Result < PublicKey , ( ) > {
2023-01-18 13:43:32 -08:00
let node_secret = match recipient {
Recipient ::Node = > Ok ( & self . node_secret ) ,
Recipient ::PhantomNode = > Err ( ( ) )
} ? ;
Ok ( PublicKey ::from_secret_key ( & Secp256k1 ::signing_only ( ) , node_secret ) )
2022-12-08 15:40:54 -08:00
}
2022-08-10 18:04:59 +02:00
fn ecdh ( & self , recipient : Recipient , other_key : & PublicKey , tweak : Option < & Scalar > ) -> Result < SharedSecret , ( ) > {
2023-01-18 13:43:32 -08:00
let mut node_secret = match recipient {
Recipient ::Node = > Ok ( self . node_secret . clone ( ) ) ,
Recipient ::PhantomNode = > Err ( ( ) )
} ? ;
2022-07-11 16:27:10 -04:00
if let Some ( tweak ) = tweak {
2023-01-18 13:43:32 -08:00
node_secret = node_secret . mul_tweak ( tweak ) . map_err ( | _ | ( ) ) ? ;
2022-07-11 16:27:10 -04:00
}
Ok ( SharedSecret ::new ( other_key , & node_secret ) )
}
2021-11-29 12:50:47 -05:00
fn get_inbound_payment_key_material ( & self ) -> KeyMaterial {
2023-01-18 13:03:06 -08:00
KeyMaterial ( [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , self . node_secret [ 31 ] ] )
2021-11-29 12:50:47 -05:00
}
2022-12-08 15:40:54 -08:00
fn sign_invoice ( & self , _hrp_bytes : & [ u8 ] , _invoice_data : & [ u5 ] , _recipient : Recipient ) -> Result < RecoverableSignature , ( ) > {
unreachable! ( )
2019-01-07 17:17:36 -05:00
}
2023-01-18 13:03:06 -08:00
2023-02-27 12:10:32 -06:00
fn sign_bolt12_invoice_request (
& self , _invoice_request : & UnsignedInvoiceRequest
) -> Result < schnorr ::Signature , ( ) > {
unreachable! ( )
}
fn sign_bolt12_invoice (
& self , _invoice : & UnsignedBolt12Invoice ,
) -> Result < schnorr ::Signature , ( ) > {
unreachable! ( )
}
2023-01-18 13:03:06 -08:00
fn sign_gossip_message ( & self , msg : lightning ::ln ::msgs ::UnsignedGossipMessage ) -> Result < Signature , ( ) > {
let msg_hash = Message ::from_slice ( & Sha256dHash ::hash ( & msg . encode ( ) [ .. ] ) [ .. ] ) . map_err ( | _ | ( ) ) ? ;
let secp_ctx = Secp256k1 ::signing_only ( ) ;
Ok ( secp_ctx . sign_ecdsa ( & msg_hash , & self . node_secret ) )
}
2022-12-08 15:40:54 -08:00
}
2019-01-07 17:17:36 -05:00
2022-12-08 15:40:54 -08:00
impl SignerProvider for KeyProvider {
2023-08-28 09:37:33 -07:00
type Signer = TestChannelSigner ;
2019-01-07 17:17:36 -05:00
2022-11-21 12:45:30 -08:00
fn generate_channel_keys_id ( & self , _inbound : bool , _channel_value_satoshis : u64 , _user_channel_id : u128 ) -> [ u8 ; 32 ] {
let id = self . rand_bytes_id . fetch_add ( 1 , atomic ::Ordering ::Relaxed ) as u8 ;
[ id ; 32 ]
}
fn derive_channel_signer ( & self , channel_value_satoshis : u64 , channel_keys_id : [ u8 ; 32 ] ) -> Self ::Signer {
2020-02-04 09:15:59 -08:00
let secp_ctx = Secp256k1 ::signing_only ( ) ;
2022-11-21 12:45:30 -08:00
let id = channel_keys_id [ 0 ] ;
2021-02-16 16:30:08 -05:00
let keys = InMemorySigner ::new (
2020-02-04 09:15:59 -08:00
& secp_ctx ,
2023-01-18 13:03:06 -08:00
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 4 , self . node_secret [ 31 ] ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 5 , self . node_secret [ 31 ] ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 6 , self . node_secret [ 31 ] ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 7 , self . node_secret [ 31 ] ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 8 , self . node_secret [ 31 ] ] ) . unwrap ( ) ,
[ id , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 9 , self . node_secret [ 31 ] ] ,
2020-01-23 13:33:31 -08:00
channel_value_satoshis ,
2022-11-21 12:45:30 -08:00
channel_keys_id ,
2023-04-19 14:58:57 -07:00
channel_keys_id ,
2020-12-05 18:56:27 +01:00
) ;
2021-08-09 10:56:15 +02:00
let revoked_commitment = self . make_enforcement_state_cell ( keys . commitment_seed ) ;
2023-08-28 09:37:33 -07:00
TestChannelSigner ::new_with_revoked ( keys , revoked_commitment , false )
2019-01-07 17:17:36 -05:00
}
2021-02-16 16:30:08 -05:00
fn read_chan_signer ( & self , buffer : & [ u8 ] ) -> Result < Self ::Signer , DecodeError > {
2020-12-05 18:56:27 +01:00
let mut reader = std ::io ::Cursor ::new ( buffer ) ;
2023-04-19 14:58:57 -07:00
let inner : InMemorySigner = ReadableArgs ::read ( & mut reader , self ) ? ;
2021-08-09 10:56:15 +02:00
let state = self . make_enforcement_state_cell ( inner . commitment_seed ) ;
2020-12-05 18:56:27 +01:00
2023-08-28 09:37:33 -07:00
Ok ( TestChannelSigner {
2020-12-05 18:56:27 +01:00
inner ,
2021-08-09 10:56:15 +02:00
state ,
2021-01-13 17:36:07 -08:00
disable_revocation_policy_check : false ,
2023-09-06 11:38:34 -07:00
available : Arc ::new ( Mutex ::new ( true ) ) ,
2020-12-05 18:56:27 +01:00
} )
}
2021-04-29 12:19:05 -04:00
2023-04-22 00:48:28 -05:00
fn get_destination_script ( & self ) -> Result < Script , ( ) > {
2022-12-08 15:40:54 -08:00
let secp_ctx = Secp256k1 ::signing_only ( ) ;
2023-01-18 13:03:06 -08:00
let channel_monitor_claim_key = SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 2 , self . node_secret [ 31 ] ] ) . unwrap ( ) ;
2022-12-08 15:40:54 -08:00
let our_channel_monitor_claim_key_hash = WPubkeyHash ::hash ( & PublicKey ::from_secret_key ( & secp_ctx , & channel_monitor_claim_key ) . serialize ( ) ) ;
2023-04-22 00:48:28 -05:00
Ok ( Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 ) . push_slice ( & our_channel_monitor_claim_key_hash [ .. ] ) . into_script ( ) )
2022-12-08 15:40:54 -08:00
}
2023-04-22 00:48:28 -05:00
fn get_shutdown_scriptpubkey ( & self ) -> Result < ShutdownScript , ( ) > {
2022-12-08 15:40:54 -08:00
let secp_ctx = Secp256k1 ::signing_only ( ) ;
2023-01-18 13:03:06 -08:00
let secret_key = SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , self . node_secret [ 31 ] ] ) . unwrap ( ) ;
2022-12-08 15:40:54 -08:00
let pubkey_hash = WPubkeyHash ::hash ( & PublicKey ::from_secret_key ( & secp_ctx , & secret_key ) . serialize ( ) ) ;
2023-04-22 00:48:28 -05:00
Ok ( ShutdownScript ::new_p2wpkh ( & pubkey_hash ) )
2021-04-29 12:19:05 -04:00
}
2020-12-05 18:56:27 +01:00
}
impl KeyProvider {
2021-08-09 10:56:15 +02:00
fn make_enforcement_state_cell ( & self , commitment_seed : [ u8 ; 32 ] ) -> Arc < Mutex < EnforcementState > > {
let mut revoked_commitments = self . enforcement_states . lock ( ) . unwrap ( ) ;
2020-12-05 18:56:27 +01:00
if ! revoked_commitments . contains_key ( & commitment_seed ) {
2021-08-09 10:56:15 +02:00
revoked_commitments . insert ( commitment_seed , Arc ::new ( Mutex ::new ( EnforcementState ::new ( ) ) ) ) ;
2020-12-05 18:56:27 +01:00
}
let cell = revoked_commitments . get ( & commitment_seed ) . unwrap ( ) ;
Arc ::clone ( cell )
2020-11-25 12:23:47 -05:00
}
2019-01-07 17:17:36 -05:00
}
2020-11-17 15:22:59 -05:00
#[ inline ]
2023-05-17 00:56:22 +00:00
fn check_api_err ( api_err : APIError , sendable_bounds_violated : bool ) {
2020-11-17 15:22:59 -05:00
match api_err {
APIError ::APIMisuseError { .. } = > panic! ( " We can't misuse the API " ) ,
APIError ::FeeRateTooHigh { .. } = > panic! ( " We can't send too much fee? " ) ,
2022-12-01 01:08:55 -05:00
APIError ::InvalidRoute { .. } = > panic! ( " Our routes should work " ) ,
2020-11-17 15:22:59 -05:00
APIError ::ChannelUnavailable { err } = > {
// Test the error against a list of errors we can hit, and reject
// all others. If you hit this panic, the list of acceptable errors
// is probably just stale and you should add new messages here.
match err . as_str ( ) {
2023-02-01 20:54:10 +00:00
" Peer for first hop currently disconnected " = > { } ,
2023-05-17 01:33:42 +00:00
_ if err . starts_with ( " Cannot send less than our next-HTLC minimum - " ) = > { } ,
_ if err . starts_with ( " Cannot send more than our next-HTLC maximum - " ) = > { } ,
2021-03-30 23:21:00 -04:00
_ = > panic! ( " {} " , err ) ,
2020-11-17 15:22:59 -05:00
}
2023-05-17 00:56:22 +00:00
assert! ( sendable_bounds_violated ) ;
2020-11-17 15:22:59 -05:00
} ,
2022-09-29 20:26:48 +00:00
APIError ::MonitorUpdateInProgress = > {
2020-11-17 15:22:59 -05:00
// We can (obviously) temp-fail a monitor update
} ,
2021-08-01 22:31:07 -05:00
APIError ::IncompatibleShutdownScript { .. } = > panic! ( " Cannot send an incompatible shutdown script " ) ,
2020-11-17 15:22:59 -05:00
}
}
#[ inline ]
2023-05-17 00:56:22 +00:00
fn check_payment_err ( send_err : PaymentSendFailure , sendable_bounds_violated : bool ) {
2020-11-17 15:22:59 -05:00
match send_err {
2023-05-17 00:56:22 +00:00
PaymentSendFailure ::ParameterError ( api_err ) = > check_api_err ( api_err , sendable_bounds_violated ) ,
2020-11-17 15:22:59 -05:00
PaymentSendFailure ::PathParameterError ( per_path_results ) = > {
2023-05-17 00:56:22 +00:00
for res in per_path_results { if let Err ( api_err ) = res { check_api_err ( api_err , sendable_bounds_violated ) ; } }
2020-11-17 15:22:59 -05:00
} ,
2022-11-02 23:16:25 +00:00
PaymentSendFailure ::AllFailedResendSafe ( per_path_results ) = > {
2023-05-17 00:56:22 +00:00
for api_err in per_path_results { check_api_err ( api_err , sendable_bounds_violated ) ; }
2020-11-17 15:22:59 -05:00
} ,
2021-10-26 21:39:31 +00:00
PaymentSendFailure ::PartialFailure { results , .. } = > {
2023-05-17 00:56:22 +00:00
for res in results { if let Err ( api_err ) = res { check_api_err ( api_err , sendable_bounds_violated ) ; } }
2020-11-17 15:22:59 -05:00
} ,
2022-11-02 23:25:34 +00:00
PaymentSendFailure ::DuplicatePayment = > panic! ( ) ,
2020-11-17 15:22:59 -05:00
}
}
2022-12-20 14:46:08 -08:00
type ChanMan < ' a > = ChannelManager < Arc < TestChainMonitor > , Arc < TestBroadcaster > , Arc < KeyProvider > , Arc < KeyProvider > , Arc < KeyProvider > , Arc < FuzzEstimator > , & ' a FuzzRouter , Arc < dyn Logger > > ;
2020-11-21 12:09:40 -05:00
2021-04-23 19:04:02 +00:00
#[ inline ]
fn get_payment_secret_hash ( dest : & ChanMan , payment_id : & mut u8 ) -> Option < ( PaymentSecret , PaymentHash ) > {
let mut payment_hash ;
for _ in 0 .. 256 {
payment_hash = PaymentHash ( Sha256 ::hash ( & [ * payment_id ; 1 ] ) . into_inner ( ) ) ;
2022-11-24 22:31:16 +02:00
if let Ok ( payment_secret ) = dest . create_inbound_payment_for_hash ( payment_hash , None , 3600 , None ) {
2021-04-23 19:04:02 +00:00
return Some ( ( payment_secret , payment_hash ) ) ;
}
* payment_id = payment_id . wrapping_add ( 1 ) ;
}
None
}
2020-11-21 12:09:40 -05:00
#[ inline ]
2022-10-06 21:31:02 +00:00
fn send_payment ( source : & ChanMan , dest : & ChanMan , dest_chan_id : u64 , amt : u64 , payment_id : & mut u8 , payment_idx : & mut u64 ) -> bool {
2021-04-23 19:04:02 +00:00
let ( payment_secret , payment_hash ) =
if let Some ( ( secret , hash ) ) = get_payment_secret_hash ( dest , payment_id ) { ( secret , hash ) } else { return true ; } ;
2022-10-06 21:31:02 +00:00
let mut payment_id = [ 0 ; 32 ] ;
payment_id [ 0 .. 8 ] . copy_from_slice ( & payment_idx . to_ne_bytes ( ) ) ;
* payment_idx + = 1 ;
2023-05-17 00:56:22 +00:00
let ( min_value_sendable , max_value_sendable ) = source . list_usable_channels ( )
. iter ( ) . find ( | chan | chan . short_channel_id = = Some ( dest_chan_id ) )
. map ( | chan |
( chan . next_outbound_htlc_minimum_msat , chan . next_outbound_htlc_limit_msat ) )
. unwrap_or ( ( 0 , 0 ) ) ;
2023-03-22 21:48:22 +00:00
if let Err ( err ) = source . send_payment_with_route ( & Route {
2023-04-09 13:50:44 -04:00
paths : vec ! [ Path { hops : vec ! [ RouteHop {
2020-11-21 12:09:40 -05:00
pubkey : dest . get_our_node_id ( ) ,
2023-01-11 10:21:29 -08:00
node_features : dest . node_features ( ) ,
2020-11-21 12:09:40 -05:00
short_channel_id : dest_chan_id ,
2023-01-11 10:21:29 -08:00
channel_features : dest . channel_features ( ) ,
2020-11-21 12:09:40 -05:00
fee_msat : amt ,
cltv_expiry_delta : 200 ,
2023-09-12 15:51:37 +02:00
maybe_announced_channel : true ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ] ,
2023-08-31 15:10:09 +02:00
route_params : None ,
2023-03-22 21:48:22 +00:00
} , payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_id ) ) {
2023-05-17 00:56:22 +00:00
check_payment_err ( err , amt > max_value_sendable | | amt < min_value_sendable ) ;
2020-11-21 12:09:40 -05:00
false
2023-05-17 00:56:22 +00:00
} else {
// Note that while the max is a strict upper-bound, we can occasionally send substantially
// below the minimum, with some gap which is unusable immediately below the minimum. Thus,
// we don't check against min_value_sendable here.
assert! ( amt < = max_value_sendable ) ;
true
}
2020-11-21 12:09:40 -05:00
}
#[ inline ]
2022-10-06 21:31:02 +00:00
fn send_hop_payment ( source : & ChanMan , middle : & ChanMan , middle_chan_id : u64 , dest : & ChanMan , dest_chan_id : u64 , amt : u64 , payment_id : & mut u8 , payment_idx : & mut u64 ) -> bool {
2021-04-23 19:04:02 +00:00
let ( payment_secret , payment_hash ) =
if let Some ( ( secret , hash ) ) = get_payment_secret_hash ( dest , payment_id ) { ( secret , hash ) } else { return true ; } ;
2022-10-06 21:31:02 +00:00
let mut payment_id = [ 0 ; 32 ] ;
payment_id [ 0 .. 8 ] . copy_from_slice ( & payment_idx . to_ne_bytes ( ) ) ;
* payment_idx + = 1 ;
2023-05-17 00:56:22 +00:00
let ( min_value_sendable , max_value_sendable ) = source . list_usable_channels ( )
. iter ( ) . find ( | chan | chan . short_channel_id = = Some ( middle_chan_id ) )
. map ( | chan |
( chan . next_outbound_htlc_minimum_msat , chan . next_outbound_htlc_limit_msat ) )
. unwrap_or ( ( 0 , 0 ) ) ;
let first_hop_fee = 50_000 ;
2023-03-22 21:48:22 +00:00
if let Err ( err ) = source . send_payment_with_route ( & Route {
2023-04-09 13:50:44 -04:00
paths : vec ! [ Path { hops : vec ! [ RouteHop {
2020-11-21 12:09:40 -05:00
pubkey : middle . get_our_node_id ( ) ,
2023-01-11 10:21:29 -08:00
node_features : middle . node_features ( ) ,
2020-11-21 12:09:40 -05:00
short_channel_id : middle_chan_id ,
2023-01-11 10:21:29 -08:00
channel_features : middle . channel_features ( ) ,
2023-05-17 00:56:22 +00:00
fee_msat : first_hop_fee ,
2020-11-21 12:09:40 -05:00
cltv_expiry_delta : 100 ,
2023-09-12 15:51:37 +02:00
maybe_announced_channel : true ,
2023-08-31 15:10:09 +02:00
} , RouteHop {
2020-11-21 12:09:40 -05:00
pubkey : dest . get_our_node_id ( ) ,
2023-01-11 10:21:29 -08:00
node_features : dest . node_features ( ) ,
2020-11-21 12:09:40 -05:00
short_channel_id : dest_chan_id ,
2023-01-11 10:21:29 -08:00
channel_features : dest . channel_features ( ) ,
2020-11-21 12:09:40 -05:00
fee_msat : amt ,
cltv_expiry_delta : 200 ,
2023-09-12 15:51:37 +02:00
maybe_announced_channel : true ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ] ,
2023-08-31 15:10:09 +02:00
route_params : None ,
2023-03-22 21:48:22 +00:00
} , payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_id ) ) {
2023-05-17 00:56:22 +00:00
let sent_amt = amt + first_hop_fee ;
check_payment_err ( err , sent_amt < min_value_sendable | | sent_amt > max_value_sendable ) ;
2020-11-21 12:09:40 -05:00
false
2023-05-17 00:56:22 +00:00
} else {
// Note that while the max is a strict upper-bound, we can occasionally send substantially
// below the minimum, with some gap which is unusable immediately below the minimum. Thus,
// we don't check against min_value_sendable here.
assert! ( amt + first_hop_fee < = max_value_sendable ) ;
true
}
2020-11-21 12:09:40 -05:00
}
2019-01-07 17:17:36 -05:00
#[ inline ]
2023-10-19 09:22:50 -07:00
pub fn do_test < Out : Output > ( data : & [ u8 ] , underlying_out : Out , anchors : bool ) {
2021-08-21 18:05:51 -04:00
let out = SearchingOutput ::new ( underlying_out ) ;
2019-01-07 17:17:36 -05:00
let broadcast = Arc ::new ( TestBroadcaster { } ) ;
2022-10-28 11:31:24 -04:00
let router = FuzzRouter { } ;
2019-01-07 17:17:36 -05:00
macro_rules ! make_node {
2021-06-30 03:09:04 +00:00
( $node_id : expr , $fee_estimator : expr ) = > { {
2020-02-20 20:11:40 -05:00
let logger : Arc < dyn Logger > = Arc ::new ( test_logger ::TestLogger ::new ( $node_id . to_string ( ) , out . clone ( ) ) ) ;
2023-01-18 13:03:06 -08:00
let node_secret = SecretKey ::from_slice ( & [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , $node_id ] ) . unwrap ( ) ;
let keys_manager = Arc ::new ( KeyProvider { node_secret , rand_bytes_id : atomic ::AtomicU32 ::new ( 0 ) , enforcement_states : Mutex ::new ( HashMap ::new ( ) ) } ) ;
2021-10-09 00:23:44 +00:00
let monitor = Arc ::new ( TestChainMonitor ::new ( broadcast . clone ( ) , logger . clone ( ) , $fee_estimator . clone ( ) ,
2022-07-18 01:32:27 +00:00
Arc ::new ( TestPersister {
update_ret : Mutex ::new ( ChannelMonitorUpdateStatus ::Completed )
} ) , Arc ::clone ( & keys_manager ) ) ) ;
2021-02-09 15:22:44 -05:00
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config . channel_config . forwarding_fee_proportional_millionths = 0 ;
config . channel_handshake_config . announced_channel = true ;
2023-10-19 09:22:50 -07:00
if anchors {
config . channel_handshake_config . negotiate_anchors_zero_fee_htlc_tx = true ;
config . manually_accept_inbound_channels = true ;
}
2021-03-03 11:24:55 -08:00
let network = Network ::Bitcoin ;
2023-06-22 15:19:15 -07:00
let best_block_timestamp = genesis_block ( network ) . header . time ;
2021-03-03 11:24:55 -08:00
let params = ChainParameters {
network ,
2023-02-15 06:09:00 +00:00
best_block : BestBlock ::from_network ( network ) ,
2021-03-03 11:24:55 -08:00
} ;
2023-06-22 15:19:15 -07:00
( ChannelManager ::new ( $fee_estimator . clone ( ) , monitor . clone ( ) , broadcast . clone ( ) , & router , Arc ::clone ( & logger ) , keys_manager . clone ( ) , keys_manager . clone ( ) , keys_manager . clone ( ) , config , params , best_block_timestamp ) ,
2020-12-05 18:56:27 +01:00
monitor , keys_manager )
2019-01-07 17:17:36 -05:00
} }
}
2019-07-22 17:28:49 -04:00
macro_rules ! reload_node {
2021-06-30 03:09:04 +00:00
( $ser : expr , $node_id : expr , $old_monitors : expr , $keys_manager : expr , $fee_estimator : expr ) = > { {
2020-12-05 18:56:27 +01:00
let keys_manager = Arc ::clone ( & $keys_manager ) ;
2020-02-20 20:11:40 -05:00
let logger : Arc < dyn Logger > = Arc ::new ( test_logger ::TestLogger ::new ( $node_id . to_string ( ) , out . clone ( ) ) ) ;
2021-10-09 00:23:44 +00:00
let chain_monitor = Arc ::new ( TestChainMonitor ::new ( broadcast . clone ( ) , logger . clone ( ) , $fee_estimator . clone ( ) ,
2022-07-18 01:32:27 +00:00
Arc ::new ( TestPersister {
update_ret : Mutex ::new ( ChannelMonitorUpdateStatus ::Completed )
} ) , Arc ::clone ( & $keys_manager ) ) ) ;
2019-07-22 17:28:49 -04:00
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config . channel_config . forwarding_fee_proportional_millionths = 0 ;
config . channel_handshake_config . announced_channel = true ;
2023-10-19 09:22:50 -07:00
if anchors {
config . channel_handshake_config . negotiate_anchors_zero_fee_htlc_tx = true ;
config . manually_accept_inbound_channels = true ;
}
2019-07-22 17:28:49 -04:00
let mut monitors = HashMap ::new ( ) ;
2020-02-11 18:34:29 -05:00
let mut old_monitors = $old_monitors . latest_monitors . lock ( ) . unwrap ( ) ;
for ( outpoint , ( update_id , monitor_ser ) ) in old_monitors . drain ( ) {
2023-08-28 09:37:33 -07:00
monitors . insert ( outpoint , < ( BlockHash , ChannelMonitor < TestChannelSigner > ) > ::read ( & mut Cursor ::new ( & monitor_ser ) , ( & * $keys_manager , & * $keys_manager ) ) . expect ( " Failed to read monitor " ) . 1 ) ;
2020-07-20 17:03:52 -07:00
chain_monitor . latest_monitors . lock ( ) . unwrap ( ) . insert ( outpoint , ( update_id , monitor_ser ) ) ;
2019-07-22 17:28:49 -04:00
}
let mut monitor_refs = HashMap ::new ( ) ;
2019-12-13 01:58:08 -05:00
for ( outpoint , monitor ) in monitors . iter_mut ( ) {
2019-07-22 17:28:49 -04:00
monitor_refs . insert ( * outpoint , monitor ) ;
}
let read_args = ChannelManagerReadArgs {
2022-12-20 14:46:08 -08:00
entropy_source : keys_manager . clone ( ) ,
node_signer : keys_manager . clone ( ) ,
signer_provider : keys_manager . clone ( ) ,
2021-06-30 03:09:04 +00:00
fee_estimator : $fee_estimator . clone ( ) ,
2020-07-20 17:03:52 -07:00
chain_monitor : chain_monitor . clone ( ) ,
2019-07-22 17:28:49 -04:00
tx_broadcaster : broadcast . clone ( ) ,
2022-10-28 11:31:24 -04:00
router : & router ,
2019-07-22 17:28:49 -04:00
logger ,
default_config : config ,
2020-08-07 16:27:26 -04:00
channel_monitors : monitor_refs ,
2019-07-22 17:28:49 -04:00
} ;
2021-02-09 15:22:44 -05:00
let res = ( < ( BlockHash , ChanMan ) > ::read ( & mut Cursor ::new ( & $ser . 0 ) , read_args ) . expect ( " Failed to read manager " ) . 1 , chain_monitor . clone ( ) ) ;
for ( funding_txo , mon ) in monitors . drain ( ) {
2022-07-18 01:32:27 +00:00
assert_eq! ( chain_monitor . chain_monitor . watch_channel ( funding_txo , mon ) ,
Drop the `ChannelMonitorUpdateStatus::PermanentFailure` variant
When a `ChannelMonitorUpdate` fails to apply, it generally means
we cannot reach our storage backend. This, in general, is a
critical issue, but is often only a transient issue.
Sadly, users see the failure variant and return it on any I/O
error, resulting in channel force-closures due to transient issues.
Users don't generally expect force-closes in most cases, and
luckily with async `ChannelMonitorUpdate`s supported we don't take
any risk by "delaying" the `ChannelMonitorUpdate` indefinitely.
Thus, here we drop the `PermanentFailure` variant entirely, making
all failures instead be "the update is in progress, but won't ever
complete", which is equivalent if we do not close the channel
automatically.
2023-09-10 17:14:32 +00:00
Ok ( ChannelMonitorUpdateStatus ::Completed ) ) ;
2021-02-09 15:22:44 -05:00
}
res
2019-07-22 17:28:49 -04:00
} }
}
2019-01-07 17:17:36 -05:00
let mut channel_txn = Vec ::new ( ) ;
macro_rules ! make_channel {
2023-10-19 09:22:50 -07:00
( $source : expr , $dest : expr , $dest_keys_manager : expr , $chan_id : expr ) = > { {
2023-06-01 10:23:55 +02:00
$source . peer_connected ( & $dest . get_our_node_id ( ) , & Init {
features : $dest . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
$dest . peer_connected ( & $source . get_our_node_id ( ) , & Init {
features : $source . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2021-07-31 09:32:27 -05:00
2020-11-17 15:24:20 -05:00
$source . create_channel ( $dest . get_our_node_id ( ) , 100_000 , 42 , 0 , None ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
let open_channel = {
let events = $source . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendOpenChannel { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2023-01-16 20:34:59 +00:00
$dest . handle_open_channel ( & $source . get_our_node_id ( ) , & open_channel ) ;
2019-01-07 17:17:36 -05:00
let accept_channel = {
2023-10-19 09:22:50 -07:00
if anchors {
let events = $dest . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::OpenChannelRequest {
ref temporary_channel_id , ref counterparty_node_id , ..
} = events [ 0 ] {
let mut random_bytes = [ 0 u8 ; 16 ] ;
random_bytes . copy_from_slice ( & $dest_keys_manager . get_secure_random_bytes ( ) [ .. 16 ] ) ;
let user_channel_id = u128 ::from_be_bytes ( random_bytes ) ;
$dest . accept_inbound_channel (
temporary_channel_id ,
counterparty_node_id ,
user_channel_id ,
) . unwrap ( ) ;
} else { panic! ( " Wrong event type " ) ; }
}
2019-01-07 17:17:36 -05:00
let events = $dest . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendAcceptChannel { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2023-01-16 20:34:59 +00:00
$source . handle_accept_channel ( & $dest . get_our_node_id ( ) , & accept_channel ) ;
2020-02-11 18:34:29 -05:00
let funding_output ;
2019-01-07 17:17:36 -05:00
{
let events = $source . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::FundingGenerationReady { ref temporary_channel_id , ref channel_value_satoshis , ref output_script , .. } = events [ 0 ] {
2022-08-09 17:39:51 +02:00
let tx = Transaction { version : $chan_id , lock_time : PackedLockTime ::ZERO , input : Vec ::new ( ) , output : vec ! [ TxOut {
2019-01-07 17:17:36 -05:00
value : * channel_value_satoshis , script_pubkey : output_script . clone ( ) ,
} ] } ;
2020-05-12 13:17:49 -04:00
funding_output = OutPoint { txid : tx . txid ( ) , index : 0 } ;
2022-05-12 23:59:41 +02:00
$source . funding_transaction_generated ( & temporary_channel_id , & $dest . get_our_node_id ( ) , tx . clone ( ) ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
channel_txn . push ( tx ) ;
} else { panic! ( " Wrong event type " ) ; }
}
let funding_created = {
let events = $source . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendFundingCreated { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2019-11-05 18:51:05 -05:00
$dest . handle_funding_created ( & $source . get_our_node_id ( ) , & funding_created ) ;
2019-01-07 17:17:36 -05:00
let funding_signed = {
let events = $dest . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::MessageSendEvent ::SendFundingSigned { ref msg , .. } = events [ 0 ] {
msg . clone ( )
} else { panic! ( " Wrong event type " ) ; }
} ;
2023-03-10 16:30:37 +01:00
let events = $dest . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::ChannelPending { ref counterparty_node_id , .. } = events [ 0 ] {
assert_eq! ( counterparty_node_id , & $source . get_our_node_id ( ) ) ;
} else { panic! ( " Wrong event type " ) ; }
2019-11-05 18:51:05 -05:00
$source . handle_funding_signed ( & $dest . get_our_node_id ( ) , & funding_signed ) ;
2023-03-10 16:30:37 +01:00
let events = $source . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
if let events ::Event ::ChannelPending { ref counterparty_node_id , .. } = events [ 0 ] {
assert_eq! ( counterparty_node_id , & $dest . get_our_node_id ( ) ) ;
} else { panic! ( " Wrong event type " ) ; }
2019-01-07 17:17:36 -05:00
2020-02-11 18:34:29 -05:00
funding_output
2019-01-07 17:17:36 -05:00
} }
}
macro_rules ! confirm_txn {
( $node : expr ) = > { {
2021-03-05 11:02:42 -05:00
let chain_hash = genesis_block ( Network ::Bitcoin ) . block_hash ( ) ;
2023-04-27 13:31:04 -07:00
let mut header = create_dummy_header ( chain_hash , 42 ) ;
2020-06-16 15:10:17 -07:00
let txdata : Vec < _ > = channel_txn . iter ( ) . enumerate ( ) . map ( | ( i , tx ) | ( i + 1 , tx ) ) . collect ( ) ;
2021-04-20 13:39:00 -07:00
$node . transactions_confirmed ( & header , & txdata , 1 ) ;
2021-03-09 22:05:21 -05:00
for _ in 2 .. 100 {
2023-04-27 13:31:04 -07:00
header = create_dummy_header ( header . block_hash ( ) , 42 ) ;
2019-01-07 17:17:36 -05:00
}
2021-04-20 13:39:00 -07:00
$node . best_block_updated ( & header , 99 ) ;
2019-01-07 17:17:36 -05:00
} }
}
macro_rules ! lock_fundings {
( $nodes : expr ) = > { {
let mut node_events = Vec ::new ( ) ;
for node in $nodes . iter ( ) {
node_events . push ( node . get_and_clear_pending_msg_events ( ) ) ;
}
for ( idx , node_event ) in node_events . iter ( ) . enumerate ( ) {
for event in node_event {
2022-05-30 14:39:04 -07:00
if let events ::MessageSendEvent ::SendChannelReady { ref node_id , ref msg } = event {
2019-01-07 17:17:36 -05:00
for node in $nodes . iter ( ) {
if node . get_our_node_id ( ) = = * node_id {
2022-05-30 14:39:04 -07:00
node . handle_channel_ready ( & $nodes [ idx ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} else { panic! ( " Wrong event type " ) ; }
}
}
for node in $nodes . iter ( ) {
let events = node . get_and_clear_pending_msg_events ( ) ;
for event in events {
if let events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = event {
} else { panic! ( " Wrong event type " ) ; }
}
}
} }
}
2021-06-30 03:09:04 +00:00
let fee_est_a = Arc ::new ( FuzzEstimator { ret_val : atomic ::AtomicU32 ::new ( 253 ) } ) ;
let mut last_htlc_clear_fee_a = 253 ;
let fee_est_b = Arc ::new ( FuzzEstimator { ret_val : atomic ::AtomicU32 ::new ( 253 ) } ) ;
let mut last_htlc_clear_fee_b = 253 ;
let fee_est_c = Arc ::new ( FuzzEstimator { ret_val : atomic ::AtomicU32 ::new ( 253 ) } ) ;
let mut last_htlc_clear_fee_c = 253 ;
2019-01-07 17:17:36 -05:00
// 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
// forwarding.
2021-06-30 03:09:04 +00:00
let ( node_a , mut monitor_a , keys_manager_a ) = make_node! ( 0 , fee_est_a ) ;
let ( node_b , mut monitor_b , keys_manager_b ) = make_node! ( 1 , fee_est_b ) ;
let ( node_c , mut monitor_c , keys_manager_c ) = make_node! ( 2 , fee_est_c ) ;
2019-01-07 17:17:36 -05:00
2019-07-22 17:28:49 -04:00
let mut nodes = [ node_a , node_b , node_c ] ;
2019-01-07 17:17:36 -05:00
2023-10-19 09:22:50 -07:00
let chan_1_funding = make_channel! ( nodes [ 0 ] , nodes [ 1 ] , keys_manager_b , 0 ) ;
let chan_2_funding = make_channel! ( nodes [ 1 ] , nodes [ 2 ] , keys_manager_c , 1 ) ;
2019-01-07 17:17:36 -05:00
for node in nodes . iter ( ) {
confirm_txn! ( node ) ;
}
lock_fundings! ( nodes ) ;
let chan_a = nodes [ 0 ] . list_usable_channels ( ) [ 0 ] . short_channel_id . unwrap ( ) ;
let chan_b = nodes [ 2 ] . list_usable_channels ( ) [ 0 ] . short_channel_id . unwrap ( ) ;
2020-11-21 12:09:40 -05:00
let mut payment_id : u8 = 0 ;
2022-10-06 21:31:02 +00:00
let mut payment_idx : u64 = 0 ;
2019-01-07 17:17:36 -05:00
let mut chan_a_disconnected = false ;
let mut chan_b_disconnected = false ;
2021-04-21 17:03:57 +00:00
let mut ab_events = Vec ::new ( ) ;
2019-07-23 15:39:11 -04:00
let mut ba_events = Vec ::new ( ) ;
let mut bc_events = Vec ::new ( ) ;
2021-04-21 17:03:57 +00:00
let mut cb_events = Vec ::new ( ) ;
2019-01-07 17:17:36 -05:00
2019-07-22 17:28:49 -04:00
let mut node_a_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
let mut node_b_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 1 ] . write ( & mut node_b_ser ) . unwrap ( ) ;
let mut node_c_ser = VecWriter ( Vec ::new ( ) ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
2019-01-07 17:17:36 -05:00
macro_rules ! test_return {
( ) = > { {
assert_eq! ( nodes [ 0 ] . list_channels ( ) . len ( ) , 1 ) ;
assert_eq! ( nodes [ 1 ] . list_channels ( ) . len ( ) , 2 ) ;
assert_eq! ( nodes [ 2 ] . list_channels ( ) . len ( ) , 1 ) ;
return ;
} }
}
let mut read_pos = 0 ;
macro_rules ! get_slice {
( $len : expr ) = > {
{
let slice_len = $len as usize ;
if data . len ( ) < read_pos + slice_len {
test_return! ( ) ;
}
read_pos + = slice_len ;
& data [ read_pos - slice_len .. read_pos ]
}
}
}
loop {
2021-04-21 17:03:57 +00:00
// Push any events from Node B onto ba_events and bc_events
macro_rules ! push_excess_b_events {
( $excess_events : expr , $expect_drop_node : expr ) = > { {
let a_id = nodes [ 0 ] . get_our_node_id ( ) ;
let expect_drop_node : Option < usize > = $expect_drop_node ;
let expect_drop_id = if let Some ( id ) = expect_drop_node { Some ( nodes [ id ] . get_our_node_id ( ) ) } else { None } ;
for event in $excess_events {
let push_a = match event {
events ::MessageSendEvent ::UpdateHTLCs { ref node_id , .. } = > {
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , .. } = > {
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
events ::MessageSendEvent ::SendChannelReestablish { ref node_id , .. } = > {
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
2022-05-30 14:39:04 -07:00
events ::MessageSendEvent ::SendChannelReady { .. } = > continue ,
2021-04-21 17:03:57 +00:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > continue ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref node_id , ref msg } = > {
assert_eq! ( msg . contents . flags & 2 , 0 ) ; // The disable bit must never be set!
if Some ( * node_id ) = = expect_drop_id { panic! ( " peer_disconnected should drop msgs bound for the disconnected peer " ) ; }
* node_id = = a_id
} ,
_ = > panic! ( " Unhandled message event {:?} " , event ) ,
2021-04-21 17:03:57 +00:00
} ;
if push_a { ba_events . push ( event ) ; } else { bc_events . push ( event ) ; }
}
} }
}
// While delivering messages, we select across three possible message selection processes
// to ensure we get as much coverage as possible. See the individual enum variants for more
// details.
#[ derive(PartialEq) ]
enum ProcessMessages {
/// Deliver all available messages, including fetching any new messages from
/// `get_and_clear_pending_msg_events()` (which may have side effects).
AllMessages ,
/// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one
/// message (which may already be queued).
OneMessage ,
/// Deliver up to one already-queued message. This avoids any potential side-effects
/// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which
/// provides potentially more coverage.
OnePendingMessage ,
}
2019-01-07 17:17:36 -05:00
macro_rules ! process_msg_events {
2021-04-21 17:03:57 +00:00
( $node : expr , $corrupt_forward : expr , $limit_events : expr ) = > { {
let mut events = if $node = = 1 {
2019-07-23 15:39:11 -04:00
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut ba_events ) ;
new_events . extend_from_slice ( & bc_events [ .. ] ) ;
bc_events . clear ( ) ;
new_events
2021-04-21 17:03:57 +00:00
} else if $node = = 0 {
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut ab_events ) ;
new_events
} else {
let mut new_events = Vec ::new ( ) ;
mem ::swap ( & mut new_events , & mut cb_events ) ;
new_events
} ;
let mut new_events = Vec ::new ( ) ;
if $limit_events ! = ProcessMessages ::OnePendingMessage {
new_events = nodes [ $node ] . get_and_clear_pending_msg_events ( ) ;
}
2020-11-17 21:07:15 -05:00
let mut had_events = false ;
2021-04-21 17:03:57 +00:00
let mut events_iter = events . drain ( .. ) . chain ( new_events . drain ( .. ) ) ;
let mut extra_ev = None ;
for event in & mut events_iter {
2020-11-17 21:07:15 -05:00
had_events = true ;
2019-01-07 17:17:36 -05:00
match event {
2021-04-21 17:03:57 +00:00
events ::MessageSendEvent ::UpdateHTLCs { node_id , updates : CommitmentUpdate { update_add_htlcs , update_fail_htlcs , update_fulfill_htlcs , update_fail_malformed_htlcs , update_fee , commitment_signed } } = > {
2021-07-13 02:23:41 +00:00
for ( idx , dest ) in nodes . iter ( ) . enumerate ( ) {
2021-04-21 17:03:57 +00:00
if dest . get_our_node_id ( ) = = node_id {
for update_add in update_add_htlcs . iter ( ) {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering update_add_htlc to node {} . \n " , idx ) . as_bytes ( ) ) ;
2019-01-07 17:17:36 -05:00
if ! $corrupt_forward {
2021-04-21 17:03:57 +00:00
dest . handle_update_add_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_add ) ;
2019-01-07 17:17:36 -05:00
} else {
// Corrupt the update_add_htlc message so that its HMAC
// check will fail and we generate a
// update_fail_malformed_htlc instead of an
// update_fail_htlc as we do when we reject a payment.
let mut msg_ser = update_add . encode ( ) ;
msg_ser [ 1000 ] ^ = 0xff ;
let new_msg = UpdateAddHTLC ::read ( & mut Cursor ::new ( & msg_ser ) ) . unwrap ( ) ;
2019-11-05 18:51:05 -05:00
dest . handle_update_add_htlc ( & nodes [ $node ] . get_our_node_id ( ) , & new_msg ) ;
2019-01-07 17:17:36 -05:00
}
}
2021-04-21 17:03:57 +00:00
for update_fulfill in update_fulfill_htlcs . iter ( ) {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering update_fulfill_htlc to node {} . \n " , idx ) . as_bytes ( ) ) ;
2021-04-21 17:03:57 +00:00
dest . handle_update_fulfill_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_fulfill ) ;
2019-01-07 17:17:36 -05:00
}
2021-04-21 17:03:57 +00:00
for update_fail in update_fail_htlcs . iter ( ) {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering update_fail_htlc to node {} . \n " , idx ) . as_bytes ( ) ) ;
2021-04-21 17:03:57 +00:00
dest . handle_update_fail_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_fail ) ;
2019-01-07 17:17:36 -05:00
}
2021-04-21 17:03:57 +00:00
for update_fail_malformed in update_fail_malformed_htlcs . iter ( ) {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering update_fail_malformed_htlc to node {} . \n " , idx ) . as_bytes ( ) ) ;
2021-04-21 17:03:57 +00:00
dest . handle_update_fail_malformed_htlc ( & nodes [ $node ] . get_our_node_id ( ) , update_fail_malformed ) ;
}
2021-06-30 03:09:04 +00:00
if let Some ( msg ) = update_fee {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering update_fee to node {} . \n " , idx ) . as_bytes ( ) ) ;
2021-06-30 03:09:04 +00:00
dest . handle_update_fee ( & nodes [ $node ] . get_our_node_id ( ) , & msg ) ;
}
2021-04-21 17:03:57 +00:00
let processed_change = ! update_add_htlcs . is_empty ( ) | | ! update_fulfill_htlcs . is_empty ( ) | |
! update_fail_htlcs . is_empty ( ) | | ! update_fail_malformed_htlcs . is_empty ( ) ;
if $limit_events ! = ProcessMessages ::AllMessages & & processed_change {
// If we only want to process some messages, don't deliver the CS until later.
extra_ev = Some ( events ::MessageSendEvent ::UpdateHTLCs { node_id , updates : CommitmentUpdate {
update_add_htlcs : Vec ::new ( ) ,
update_fail_htlcs : Vec ::new ( ) ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : None ,
commitment_signed
} } ) ;
break ;
2019-01-07 17:17:36 -05:00
}
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering commitment_signed to node {} . \n " , idx ) . as_bytes ( ) ) ;
2019-11-05 18:51:05 -05:00
dest . handle_commitment_signed ( & nodes [ $node ] . get_our_node_id ( ) , & commitment_signed ) ;
2021-04-21 17:03:57 +00:00
break ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , ref msg } = > {
2021-07-13 02:23:41 +00:00
for ( idx , dest ) in nodes . iter ( ) . enumerate ( ) {
2019-07-23 15:39:11 -04:00
if dest . get_our_node_id ( ) = = * node_id {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering revoke_and_ack to node {} . \n " , idx ) . as_bytes ( ) ) ;
2019-11-05 18:51:05 -05:00
dest . handle_revoke_and_ack ( & nodes [ $node ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
events ::MessageSendEvent ::SendChannelReestablish { ref node_id , ref msg } = > {
2021-07-13 02:23:41 +00:00
for ( idx , dest ) in nodes . iter ( ) . enumerate ( ) {
2019-01-07 17:17:36 -05:00
if dest . get_our_node_id ( ) = = * node_id {
2021-07-13 02:23:41 +00:00
out . locked_write ( format! ( " Delivering channel_reestablish to node {} . \n " , idx ) . as_bytes ( ) ) ;
2019-11-05 18:51:05 -05:00
dest . handle_channel_reestablish ( & nodes [ $node ] . get_our_node_id ( ) , msg ) ;
2019-01-07 17:17:36 -05:00
}
}
} ,
2022-05-30 14:39:04 -07:00
events ::MessageSendEvent ::SendChannelReady { .. } = > {
2019-01-07 17:17:36 -05:00
// Can be generated as a reestablish response
} ,
2020-11-23 19:12:31 -05:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > {
// Can be generated as a reestablish response
} ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref msg , .. } = > {
// When we reconnect we will resend a channel_update to make sure our
// counterparty has the latest parameters for receiving payments
// through us. We do, however, check that the message does not include
// the "disabled" bit, as we should never ever have a channel which is
// disabled when we send such an update (or it may indicate channel
// force-close which we should detect as an error).
assert_eq! ( msg . contents . flags & 2 , 0 ) ;
} ,
2021-08-21 18:05:51 -04:00
_ = > if out . may_fail . load ( atomic ::Ordering ::Acquire ) {
return ;
} else {
panic! ( " Unhandled message event {:?} " , event )
} ,
2019-01-07 17:17:36 -05:00
}
2021-04-21 17:03:57 +00:00
if $limit_events ! = ProcessMessages ::AllMessages {
break ;
}
}
if $node = = 1 {
push_excess_b_events! ( extra_ev . into_iter ( ) . chain ( events_iter ) , None ) ;
} else if $node = = 0 {
if let Some ( ev ) = extra_ev { ab_events . push ( ev ) ; }
for event in events_iter { ab_events . push ( event ) ; }
} else {
if let Some ( ev ) = extra_ev { cb_events . push ( ev ) ; }
for event in events_iter { cb_events . push ( event ) ; }
2019-01-07 17:17:36 -05:00
}
2020-11-17 21:07:15 -05:00
had_events
2019-01-07 17:17:36 -05:00
} }
}
2019-07-23 15:39:11 -04:00
macro_rules ! drain_msg_events_on_disconnect {
( $counterparty_id : expr ) = > { {
if $counterparty_id = = 0 {
for event in nodes [ 0 ] . get_and_clear_pending_msg_events ( ) {
match event {
events ::MessageSendEvent ::UpdateHTLCs { .. } = > { } ,
events ::MessageSendEvent ::SendRevokeAndACK { .. } = > { } ,
events ::MessageSendEvent ::SendChannelReestablish { .. } = > { } ,
2022-05-30 14:39:04 -07:00
events ::MessageSendEvent ::SendChannelReady { .. } = > { } ,
2020-11-23 19:12:31 -05:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > { } ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref msg , .. } = > {
assert_eq! ( msg . contents . flags & 2 , 0 ) ; // The disable bit must never be set!
} ,
2021-08-21 18:05:51 -04:00
_ = > if out . may_fail . load ( atomic ::Ordering ::Acquire ) {
return ;
} else {
panic! ( " Unhandled message event " )
} ,
2019-07-23 15:39:11 -04:00
}
}
2021-04-21 17:03:57 +00:00
push_excess_b_events! ( nodes [ 1 ] . get_and_clear_pending_msg_events ( ) . drain ( .. ) , Some ( 0 ) ) ;
ab_events . clear ( ) ;
2019-07-23 15:39:11 -04:00
ba_events . clear ( ) ;
} else {
for event in nodes [ 2 ] . get_and_clear_pending_msg_events ( ) {
match event {
events ::MessageSendEvent ::UpdateHTLCs { .. } = > { } ,
events ::MessageSendEvent ::SendRevokeAndACK { .. } = > { } ,
events ::MessageSendEvent ::SendChannelReestablish { .. } = > { } ,
2022-05-30 14:39:04 -07:00
events ::MessageSendEvent ::SendChannelReady { .. } = > { } ,
2020-11-23 19:12:31 -05:00
events ::MessageSendEvent ::SendAnnouncementSignatures { .. } = > { } ,
2021-06-12 21:58:50 +00:00
events ::MessageSendEvent ::SendChannelUpdate { ref msg , .. } = > {
assert_eq! ( msg . contents . flags & 2 , 0 ) ; // The disable bit must never be set!
} ,
2021-08-21 18:05:51 -04:00
_ = > if out . may_fail . load ( atomic ::Ordering ::Acquire ) {
return ;
} else {
panic! ( " Unhandled message event " )
} ,
2019-07-23 15:39:11 -04:00
}
}
2021-04-21 17:03:57 +00:00
push_excess_b_events! ( nodes [ 1 ] . get_and_clear_pending_msg_events ( ) . drain ( .. ) , Some ( 2 ) ) ;
2019-07-23 15:39:11 -04:00
bc_events . clear ( ) ;
2021-04-21 17:03:57 +00:00
cb_events . clear ( ) ;
2019-07-23 15:39:11 -04:00
}
} }
}
2019-01-07 17:17:36 -05:00
macro_rules ! process_events {
( $node : expr , $fail : expr ) = > { {
2019-03-07 13:09:59 -05:00
// In case we get 256 payments we may have a hash collision, resulting in the
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
// deduplicate the calls here.
let mut claim_set = HashSet ::new ( ) ;
let mut events = nodes [ $node ] . get_and_clear_pending_events ( ) ;
// Sort events so that PendingHTLCsForwardable get processed last. This avoids a
// case where we first process a PendingHTLCsForwardable, then claim/fail on a
2022-12-01 09:34:34 +01:00
// PaymentClaimable, claiming/failing two HTLCs, but leaving a just-generated
// PaymentClaimable event for the second HTLC in our pending_events (and breaking
2019-03-07 13:09:59 -05:00
// our claim_set deduplication).
events . sort_by ( | a , b | {
2022-12-01 09:34:34 +01:00
if let events ::Event ::PaymentClaimable { .. } = a {
2019-03-07 13:09:59 -05:00
if let events ::Event ::PendingHTLCsForwardable { .. } = b {
Ordering ::Less
} else { Ordering ::Equal }
} else if let events ::Event ::PendingHTLCsForwardable { .. } = a {
2022-12-01 09:34:34 +01:00
if let events ::Event ::PaymentClaimable { .. } = b {
2019-03-07 13:09:59 -05:00
Ordering ::Greater
} else { Ordering ::Equal }
} else { Ordering ::Equal }
} ) ;
2020-11-17 21:07:15 -05:00
let had_events = ! events . is_empty ( ) ;
2019-03-07 13:09:59 -05:00
for event in events . drain ( .. ) {
2019-01-07 17:17:36 -05:00
match event {
2022-12-01 09:34:34 +01:00
events ::Event ::PaymentClaimable { payment_hash , .. } = > {
2019-03-07 13:09:59 -05:00
if claim_set . insert ( payment_hash . 0 ) {
if $fail {
2022-04-19 22:06:50 +00:00
nodes [ $node ] . fail_htlc_backwards ( & payment_hash ) ;
2019-03-07 13:09:59 -05:00
} else {
2022-04-18 20:12:15 +00:00
nodes [ $node ] . claim_funds ( PaymentPreimage ( payment_hash . 0 ) ) ;
2019-03-07 13:09:59 -05:00
}
2019-01-07 17:17:36 -05:00
}
} ,
events ::Event ::PaymentSent { .. } = > { } ,
2022-04-18 20:12:15 +00:00
events ::Event ::PaymentClaimed { .. } = > { } ,
2021-11-18 16:24:14 -06:00
events ::Event ::PaymentPathSuccessful { .. } = > { } ,
2021-09-20 12:18:49 -04:00
events ::Event ::PaymentPathFailed { .. } = > { } ,
2023-02-03 12:53:01 -05:00
events ::Event ::PaymentFailed { .. } = > { } ,
2022-06-24 12:00:20 +02:00
events ::Event ::ProbeSuccessful { .. } | events ::Event ::ProbeFailed { .. } = > {
// Even though we don't explicitly send probes, because probes are
// detected based on hashing the payment hash+preimage, its rather
// trivial for the fuzzer to build payments that accidentally end up
// looking like probes.
} ,
2021-07-16 02:16:50 +00:00
events ::Event ::PaymentForwarded { .. } if $node = = 1 = > { } ,
2022-11-01 09:57:37 +01:00
events ::Event ::ChannelReady { .. } = > { } ,
2019-01-07 17:17:36 -05:00
events ::Event ::PendingHTLCsForwardable { .. } = > {
nodes [ $node ] . process_pending_htlc_forwards ( ) ;
} ,
2022-07-18 20:29:40 -05:00
events ::Event ::HTLCHandlingFailed { .. } = > { } ,
2021-08-21 18:05:51 -04:00
_ = > if out . may_fail . load ( atomic ::Ordering ::Acquire ) {
return ;
} else {
panic! ( " Unhandled event " )
} ,
2019-01-07 17:17:36 -05:00
}
}
2020-11-17 21:07:15 -05:00
had_events
2019-01-07 17:17:36 -05:00
} }
}
2021-07-13 02:23:41 +00:00
let v = get_slice! ( 1 ) [ 0 ] ;
out . locked_write ( format! ( " READ A BYTE! HANDLING INPUT {:x} ........... \n " , v ) . as_bytes ( ) ) ;
match v {
2020-11-17 15:24:20 -05:00
// In general, we keep related message groups close together in binary form, allowing
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
2022-07-18 01:32:27 +00:00
0x00 = > * monitor_a . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::InProgress ,
0x01 = > * monitor_b . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::InProgress ,
0x02 = > * monitor_c . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::InProgress ,
0x04 = > * monitor_a . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::Completed ,
0x05 = > * monitor_b . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::Completed ,
0x06 = > * monitor_c . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::Completed ,
2020-11-17 15:24:20 -05:00
0x08 = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_a . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
2021-10-07 23:59:47 +00:00
monitor_a . chain_monitor . force_channel_monitor_updated ( chan_1_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 0 ] . process_monitor_events ( ) ;
2020-02-11 18:34:29 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x09 = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
2021-10-07 23:59:47 +00:00
monitor_b . chain_monitor . force_channel_monitor_updated ( chan_1_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 1 ] . process_monitor_events ( ) ;
2020-02-11 18:34:29 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0a = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
2021-10-07 23:59:47 +00:00
monitor_b . chain_monitor . force_channel_monitor_updated ( chan_2_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 1 ] . process_monitor_events ( ) ;
2020-02-11 18:34:29 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0b = > {
2020-02-11 18:34:29 -05:00
if let Some ( ( id , _ ) ) = monitor_c . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
2021-10-07 23:59:47 +00:00
monitor_c . chain_monitor . force_channel_monitor_updated ( chan_2_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 2 ] . process_monitor_events ( ) ;
2020-02-11 18:34:29 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0c = > {
2019-01-07 17:17:36 -05:00
if ! chan_a_disconnected {
2023-02-21 19:10:43 +00:00
nodes [ 0 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) ) ;
nodes [ 1 ] . peer_disconnected ( & nodes [ 0 ] . get_our_node_id ( ) ) ;
2019-01-07 17:17:36 -05:00
chan_a_disconnected = true ;
2019-07-23 15:39:11 -04:00
drain_msg_events_on_disconnect! ( 0 ) ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0d = > {
2019-01-07 17:17:36 -05:00
if ! chan_b_disconnected {
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . peer_disconnected ( & nodes [ 2 ] . get_our_node_id ( ) ) ;
nodes [ 2 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) ) ;
2019-01-07 17:17:36 -05:00
chan_b_disconnected = true ;
2019-07-23 15:39:11 -04:00
drain_msg_events_on_disconnect! ( 2 ) ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0e = > {
2019-07-23 15:39:11 -04:00
if chan_a_disconnected {
2023-06-01 10:23:55 +02:00
nodes [ 0 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init {
features : nodes [ 1 ] . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
nodes [ 1 ] . peer_connected ( & nodes [ 0 ] . get_our_node_id ( ) , & Init {
features : nodes [ 0 ] . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2019-07-23 15:39:11 -04:00
chan_a_disconnected = false ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
0x0f = > {
2019-07-23 15:39:11 -04:00
if chan_b_disconnected {
2023-06-01 10:23:55 +02:00
nodes [ 1 ] . peer_connected ( & nodes [ 2 ] . get_our_node_id ( ) , & Init {
features : nodes [ 2 ] . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
nodes [ 2 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init {
features : nodes [ 1 ] . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2019-07-23 15:39:11 -04:00
chan_b_disconnected = false ;
2019-01-07 17:17:36 -05:00
}
} ,
2020-11-17 15:24:20 -05:00
2021-04-21 17:03:57 +00:00
0x10 = > { process_msg_events! ( 0 , true , ProcessMessages ::AllMessages ) ; } ,
0x11 = > { process_msg_events! ( 0 , false , ProcessMessages ::AllMessages ) ; } ,
0x12 = > { process_msg_events! ( 0 , true , ProcessMessages ::OneMessage ) ; } ,
0x13 = > { process_msg_events! ( 0 , false , ProcessMessages ::OneMessage ) ; } ,
0x14 = > { process_msg_events! ( 0 , true , ProcessMessages ::OnePendingMessage ) ; } ,
0x15 = > { process_msg_events! ( 0 , false , ProcessMessages ::OnePendingMessage ) ; } ,
0x16 = > { process_events! ( 0 , true ) ; } ,
0x17 = > { process_events! ( 0 , false ) ; } ,
0x18 = > { process_msg_events! ( 1 , true , ProcessMessages ::AllMessages ) ; } ,
0x19 = > { process_msg_events! ( 1 , false , ProcessMessages ::AllMessages ) ; } ,
0x1a = > { process_msg_events! ( 1 , true , ProcessMessages ::OneMessage ) ; } ,
0x1b = > { process_msg_events! ( 1 , false , ProcessMessages ::OneMessage ) ; } ,
0x1c = > { process_msg_events! ( 1 , true , ProcessMessages ::OnePendingMessage ) ; } ,
0x1d = > { process_msg_events! ( 1 , false , ProcessMessages ::OnePendingMessage ) ; } ,
0x1e = > { process_events! ( 1 , true ) ; } ,
0x1f = > { process_events! ( 1 , false ) ; } ,
0x20 = > { process_msg_events! ( 2 , true , ProcessMessages ::AllMessages ) ; } ,
0x21 = > { process_msg_events! ( 2 , false , ProcessMessages ::AllMessages ) ; } ,
0x22 = > { process_msg_events! ( 2 , true , ProcessMessages ::OneMessage ) ; } ,
0x23 = > { process_msg_events! ( 2 , false , ProcessMessages ::OneMessage ) ; } ,
0x24 = > { process_msg_events! ( 2 , true , ProcessMessages ::OnePendingMessage ) ; } ,
0x25 = > { process_msg_events! ( 2 , false , ProcessMessages ::OnePendingMessage ) ; } ,
0x26 = > { process_events! ( 2 , true ) ; } ,
0x27 = > { process_events! ( 2 , false ) ; } ,
0x2c = > {
2019-07-22 17:28:49 -04:00
if ! chan_a_disconnected {
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . peer_disconnected ( & nodes [ 0 ] . get_our_node_id ( ) ) ;
2019-07-22 17:28:49 -04:00
chan_a_disconnected = true ;
2023-08-28 01:35:16 +00:00
push_excess_b_events! ( nodes [ 1 ] . get_and_clear_pending_msg_events ( ) . drain ( .. ) , Some ( 0 ) ) ;
ab_events . clear ( ) ;
ba_events . clear ( ) ;
2021-04-21 02:37:02 +00:00
}
2021-06-30 03:09:04 +00:00
let ( new_node_a , new_monitor_a ) = reload_node! ( node_a_ser , 0 , monitor_a , keys_manager_a , fee_est_a ) ;
2020-11-21 12:09:40 -05:00
nodes [ 0 ] = new_node_a ;
2019-07-22 17:28:49 -04:00
monitor_a = new_monitor_a ;
} ,
2021-04-21 17:03:57 +00:00
0x2d = > {
2019-07-22 17:28:49 -04:00
if ! chan_a_disconnected {
2023-02-21 19:10:43 +00:00
nodes [ 0 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) ) ;
2019-07-22 17:28:49 -04:00
chan_a_disconnected = true ;
nodes [ 0 ] . get_and_clear_pending_msg_events ( ) ;
2021-04-21 17:03:57 +00:00
ab_events . clear ( ) ;
2019-07-22 17:28:49 -04:00
ba_events . clear ( ) ;
}
if ! chan_b_disconnected {
2023-02-21 19:10:43 +00:00
nodes [ 2 ] . peer_disconnected ( & nodes [ 1 ] . get_our_node_id ( ) ) ;
2019-07-22 17:28:49 -04:00
chan_b_disconnected = true ;
nodes [ 2 ] . get_and_clear_pending_msg_events ( ) ;
bc_events . clear ( ) ;
2021-04-21 17:03:57 +00:00
cb_events . clear ( ) ;
2019-07-22 17:28:49 -04:00
}
2021-06-30 03:09:04 +00:00
let ( new_node_b , new_monitor_b ) = reload_node! ( node_b_ser , 1 , monitor_b , keys_manager_b , fee_est_b ) ;
2020-11-21 12:09:40 -05:00
nodes [ 1 ] = new_node_b ;
2019-07-22 17:28:49 -04:00
monitor_b = new_monitor_b ;
} ,
2021-04-21 17:03:57 +00:00
0x2e = > {
2019-07-22 17:28:49 -04:00
if ! chan_b_disconnected {
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . peer_disconnected ( & nodes [ 2 ] . get_our_node_id ( ) ) ;
2019-07-22 17:28:49 -04:00
chan_b_disconnected = true ;
2023-08-28 01:35:16 +00:00
push_excess_b_events! ( nodes [ 1 ] . get_and_clear_pending_msg_events ( ) . drain ( .. ) , Some ( 2 ) ) ;
bc_events . clear ( ) ;
cb_events . clear ( ) ;
2021-04-21 02:37:02 +00:00
}
2021-06-30 03:09:04 +00:00
let ( new_node_c , new_monitor_c ) = reload_node! ( node_c_ser , 2 , monitor_c , keys_manager_c , fee_est_c ) ;
2020-11-21 12:09:40 -05:00
nodes [ 2 ] = new_node_c ;
2019-07-22 17:28:49 -04:00
monitor_c = new_monitor_c ;
} ,
2020-11-17 15:24:20 -05:00
// 1/10th the channel size:
2022-10-06 21:31:02 +00:00
0x30 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x31 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x32 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x33 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x34 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 10_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x35 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 10_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x38 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 1_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x39 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 1_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x3a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 1_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x3b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 1_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x3c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 1_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x3d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 1_000_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x40 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 100_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x41 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 100_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x42 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 100_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x43 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 100_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x44 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 100_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x45 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 100_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x48 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x49 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x4a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x4b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x4c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 10_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x4d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 10_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x50 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 1_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x51 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 1_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x52 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 1_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x53 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 1_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x54 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 1_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x55 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 1_000 , & mut payment_id , & mut payment_idx ) ; } ,
0x58 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 100 , & mut payment_id , & mut payment_idx ) ; } ,
0x59 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 100 , & mut payment_id , & mut payment_idx ) ; } ,
0x5a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 100 , & mut payment_id , & mut payment_idx ) ; } ,
0x5b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 100 , & mut payment_id , & mut payment_idx ) ; } ,
0x5c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 100 , & mut payment_id , & mut payment_idx ) ; } ,
0x5d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 100 , & mut payment_id , & mut payment_idx ) ; } ,
0x60 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10 , & mut payment_id , & mut payment_idx ) ; } ,
0x61 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10 , & mut payment_id , & mut payment_idx ) ; } ,
0x62 = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10 , & mut payment_id , & mut payment_idx ) ; } ,
0x63 = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10 , & mut payment_id , & mut payment_idx ) ; } ,
0x64 = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 10 , & mut payment_id , & mut payment_idx ) ; } ,
0x65 = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 10 , & mut payment_id , & mut payment_idx ) ; } ,
0x68 = > { send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 1 , & mut payment_id , & mut payment_idx ) ; } ,
0x69 = > { send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 1 , & mut payment_id , & mut payment_idx ) ; } ,
0x6a = > { send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 1 , & mut payment_id , & mut payment_idx ) ; } ,
0x6b = > { send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 1 , & mut payment_id , & mut payment_idx ) ; } ,
0x6c = > { send_hop_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , & nodes [ 2 ] , chan_b , 1 , & mut payment_id , & mut payment_idx ) ; } ,
0x6d = > { send_hop_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , & nodes [ 0 ] , chan_a , 1 , & mut payment_id , & mut payment_idx ) ; } ,
2020-11-17 15:24:20 -05:00
2021-06-30 03:09:04 +00:00
0x80 = > {
2023-10-19 09:29:21 -07:00
let mut max_feerate = last_htlc_clear_fee_a ;
if ! anchors {
max_feerate * = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32 ;
}
2021-06-30 03:09:04 +00:00
if fee_est_a . ret_val . fetch_add ( 250 , atomic ::Ordering ::AcqRel ) + 250 > max_feerate {
fee_est_a . ret_val . store ( max_feerate , atomic ::Ordering ::Release ) ;
}
nodes [ 0 ] . maybe_update_chan_fees ( ) ;
} ,
0x81 = > { fee_est_a . ret_val . store ( 253 , atomic ::Ordering ::Release ) ; nodes [ 0 ] . maybe_update_chan_fees ( ) ; } ,
0x84 = > {
2023-10-19 09:29:21 -07:00
let mut max_feerate = last_htlc_clear_fee_b ;
if ! anchors {
max_feerate * = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32 ;
}
2021-06-30 03:09:04 +00:00
if fee_est_b . ret_val . fetch_add ( 250 , atomic ::Ordering ::AcqRel ) + 250 > max_feerate {
fee_est_b . ret_val . store ( max_feerate , atomic ::Ordering ::Release ) ;
}
nodes [ 1 ] . maybe_update_chan_fees ( ) ;
} ,
0x85 = > { fee_est_b . ret_val . store ( 253 , atomic ::Ordering ::Release ) ; nodes [ 1 ] . maybe_update_chan_fees ( ) ; } ,
0x88 = > {
2023-10-19 09:29:21 -07:00
let mut max_feerate = last_htlc_clear_fee_c ;
if ! anchors {
max_feerate * = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32 ;
}
2021-06-30 03:09:04 +00:00
if fee_est_c . ret_val . fetch_add ( 250 , atomic ::Ordering ::AcqRel ) + 250 > max_feerate {
fee_est_c . ret_val . store ( max_feerate , atomic ::Ordering ::Release ) ;
}
nodes [ 2 ] . maybe_update_chan_fees ( ) ;
} ,
0x89 = > { fee_est_c . ret_val . store ( 253 , atomic ::Ordering ::Release ) ; nodes [ 2 ] . maybe_update_chan_fees ( ) ; } ,
2020-11-17 21:07:15 -05:00
0xff = > {
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
// First make sure there are no pending monitor updates, resetting the error state
2021-10-07 23:59:47 +00:00
// and calling force_channel_monitor_updated for each monitor.
2022-07-18 01:32:27 +00:00
* monitor_a . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::Completed ;
* monitor_b . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::Completed ;
* monitor_c . persister . update_ret . lock ( ) . unwrap ( ) = ChannelMonitorUpdateStatus ::Completed ;
2020-11-17 21:07:15 -05:00
if let Some ( ( id , _ ) ) = monitor_a . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
2021-10-07 23:59:47 +00:00
monitor_a . chain_monitor . force_channel_monitor_updated ( chan_1_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 0 ] . process_monitor_events ( ) ;
2020-11-17 21:07:15 -05:00
}
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_1_funding ) {
2021-10-07 23:59:47 +00:00
monitor_b . chain_monitor . force_channel_monitor_updated ( chan_1_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 1 ] . process_monitor_events ( ) ;
2020-11-17 21:07:15 -05:00
}
if let Some ( ( id , _ ) ) = monitor_b . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
2021-10-07 23:59:47 +00:00
monitor_b . chain_monitor . force_channel_monitor_updated ( chan_2_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 1 ] . process_monitor_events ( ) ;
2020-11-17 21:07:15 -05:00
}
if let Some ( ( id , _ ) ) = monitor_c . latest_monitors . lock ( ) . unwrap ( ) . get ( & chan_2_funding ) {
2021-10-07 23:59:47 +00:00
monitor_c . chain_monitor . force_channel_monitor_updated ( chan_2_funding , * id ) ;
2021-10-07 18:51:49 +00:00
nodes [ 2 ] . process_monitor_events ( ) ;
2020-11-17 21:07:15 -05:00
}
// Next, make sure peers are all connected to each other
if chan_a_disconnected {
2023-06-01 10:23:55 +02:00
nodes [ 0 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init {
features : nodes [ 1 ] . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
nodes [ 1 ] . peer_connected ( & nodes [ 0 ] . get_our_node_id ( ) , & Init {
features : nodes [ 0 ] . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2020-11-17 21:07:15 -05:00
chan_a_disconnected = false ;
}
if chan_b_disconnected {
2023-06-01 10:23:55 +02:00
nodes [ 1 ] . peer_connected ( & nodes [ 2 ] . get_our_node_id ( ) , & Init {
features : nodes [ 2 ] . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
nodes [ 2 ] . peer_connected ( & nodes [ 1 ] . get_our_node_id ( ) , & Init {
features : nodes [ 1 ] . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2020-11-17 21:07:15 -05:00
chan_b_disconnected = false ;
}
for i in 0 .. std ::usize ::MAX {
if i = = 100 { panic! ( " It may take may iterations to settle the state, but it should not take forever " ) ; }
// Then, make sure any current forwards make their way to their destination
2021-04-21 17:03:57 +00:00
if process_msg_events! ( 0 , false , ProcessMessages ::AllMessages ) { continue ; }
if process_msg_events! ( 1 , false , ProcessMessages ::AllMessages ) { continue ; }
if process_msg_events! ( 2 , false , ProcessMessages ::AllMessages ) { continue ; }
2020-11-17 21:07:15 -05:00
// ...making sure any pending PendingHTLCsForwardable events are handled and
// payments claimed.
if process_events! ( 0 , false ) { continue ; }
if process_events! ( 1 , false ) { continue ; }
if process_events! ( 2 , false ) { continue ; }
break ;
}
2021-08-21 18:05:51 -04:00
// Finally, make sure that at least one end of each channel can make a substantial payment
2020-11-17 21:07:15 -05:00
assert! (
2022-10-06 21:31:02 +00:00
send_payment ( & nodes [ 0 ] , & nodes [ 1 ] , chan_a , 10_000_000 , & mut payment_id , & mut payment_idx ) | |
send_payment ( & nodes [ 1 ] , & nodes [ 0 ] , chan_a , 10_000_000 , & mut payment_id , & mut payment_idx ) ) ;
2020-11-17 21:07:15 -05:00
assert! (
2022-10-06 21:31:02 +00:00
send_payment ( & nodes [ 1 ] , & nodes [ 2 ] , chan_b , 10_000_000 , & mut payment_id , & mut payment_idx ) | |
send_payment ( & nodes [ 2 ] , & nodes [ 1 ] , chan_b , 10_000_000 , & mut payment_id , & mut payment_idx ) ) ;
2021-06-30 03:09:04 +00:00
last_htlc_clear_fee_a = fee_est_a . ret_val . load ( atomic ::Ordering ::Acquire ) ;
last_htlc_clear_fee_b = fee_est_b . ret_val . load ( atomic ::Ordering ::Acquire ) ;
last_htlc_clear_fee_c = fee_est_c . ret_val . load ( atomic ::Ordering ::Acquire ) ;
2020-11-17 21:07:15 -05:00
} ,
2019-01-07 17:17:36 -05:00
_ = > test_return! ( ) ,
}
2019-07-22 17:28:49 -04:00
2023-08-28 01:25:36 +00:00
if nodes [ 0 ] . get_and_clear_needs_persistence ( ) = = true {
node_a_ser . 0. clear ( ) ;
nodes [ 0 ] . write ( & mut node_a_ser ) . unwrap ( ) ;
}
if nodes [ 1 ] . get_and_clear_needs_persistence ( ) = = true {
node_b_ser . 0. clear ( ) ;
nodes [ 1 ] . write ( & mut node_b_ser ) . unwrap ( ) ;
}
if nodes [ 2 ] . get_and_clear_needs_persistence ( ) = = true {
node_c_ser . 0. clear ( ) ;
nodes [ 2 ] . write ( & mut node_c_ser ) . unwrap ( ) ;
}
2019-01-07 17:17:36 -05:00
}
}
2021-08-21 18:05:51 -04:00
/// We actually have different behavior based on if a certain log string has been seen, so we have
/// to do a bit more tracking.
#[ derive(Clone) ]
struct SearchingOutput < O : Output > {
output : O ,
may_fail : Arc < atomic ::AtomicBool > ,
}
impl < O : Output > Output for SearchingOutput < O > {
fn locked_write ( & self , data : & [ u8 ] ) {
// We hit a design limitation of LN state machine (see CONCURRENT_INBOUND_HTLC_FEE_BUFFER)
if std ::str ::from_utf8 ( data ) . unwrap ( ) . contains ( " Outbound update_fee HTLC buffer overflow - counterparty should force-close this channel " ) {
self . may_fail . store ( true , atomic ::Ordering ::Release ) ;
}
self . output . locked_write ( data )
}
}
impl < O : Output > SearchingOutput < O > {
pub fn new ( output : O ) -> Self {
Self { output , may_fail : Arc ::new ( atomic ::AtomicBool ::new ( false ) ) }
}
}
pub fn chanmon_consistency_test < Out : Output > ( data : & [ u8 ] , out : Out ) {
2023-10-19 09:22:50 -07:00
do_test ( data , out . clone ( ) , false ) ;
do_test ( data , out , true ) ;
2020-02-20 20:11:40 -05:00
}
2019-12-11 13:18:43 -05:00
#[ no_mangle ]
pub extern " C " fn chanmon_consistency_run ( data : * const u8 , datalen : usize ) {
2023-10-19 09:22:50 -07:00
do_test ( unsafe { std ::slice ::from_raw_parts ( data , datalen ) } , test_logger ::DevNull { } , false ) ;
do_test ( unsafe { std ::slice ::from_raw_parts ( data , datalen ) } , test_logger ::DevNull { } , true ) ;
2019-01-07 17:17:36 -05:00
}