Generate shutdown script at channel close

When a shutdown script is omitted from open_channel or accept_channel,
it must be provided when sending shutdown. Generate the shutdown script
at channel closing time in this case rather at channel opening.

This requires producing a ChannelMonitorUpdate with the shutdown script
since it is no longer known at ChannelMonitor creation.
This commit is contained in:
Jeffrey Czyz 2021-07-26 14:04:44 -04:00
parent 1c449a39f4
commit ecb0b84241
No known key found for this signature in database
GPG key ID: 3A4E08275D5E96D2
6 changed files with 389 additions and 85 deletions

View file

@ -438,6 +438,9 @@ pub(crate) enum ChannelMonitorUpdateStep {
/// think we've fallen behind!
should_broadcast: bool,
},
ShutdownScript {
scriptpubkey: Script,
},
}
impl_writeable_tlv_based_enum!(ChannelMonitorUpdateStep,
@ -461,6 +464,9 @@ impl_writeable_tlv_based_enum!(ChannelMonitorUpdateStep,
(4, ChannelForceClosed) => {
(0, should_broadcast, required),
},
(5, ShutdownScript) => {
(0, scriptpubkey, required),
},
;);
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
@ -1437,7 +1443,13 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
// shouldn't print the scary warning above.
log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction.");
}
}
},
ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey } => {
log_trace!(logger, "Updating ChannelMonitor with shutdown script");
if let Some(shutdown_script) = self.shutdown_script.replace(scriptpubkey.clone()) {
panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey);
}
},
}
}
self.latest_update_id = updates.update_id;

View file

@ -2369,3 +2369,66 @@ fn test_reconnect_dup_htlc_claims() {
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
}
#[test]
fn test_temporary_error_during_shutdown() {
// Test that temporary failures when updating the monitor's shutdown script do not prevent
// cooperative close.
let mut config = test_default_channel_config();
config.channel_options.commit_upfront_shutdown_pubkey = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
*nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, false);
check_added_monitors!(nodes[0], 1);
check_added_monitors!(nodes[1], 1);
}
#[test]
fn test_permanent_error_during_sending_shutdown() {
// Test that permanent failures when updating the monitor's shutdown script result in a force
// close when initiating a cooperative close.
let mut config = test_default_channel_config();
config.channel_options.commit_upfront_shutdown_pubkey = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
*nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure));
assert!(nodes[0].node.close_channel(&channel_id).is_ok());
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 2);
}
#[test]
fn test_permanent_error_during_handling_shutdown() {
// Test that permanent failures when updating the monitor's shutdown script result in a force
// close when handling a cooperative close.
let mut config = test_default_channel_config();
config.channel_options.commit_upfront_shutdown_pubkey = false;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure));
assert!(nodes[0].node.close_channel(&channel_id).is_ok());
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 2);
}

View file

@ -574,7 +574,7 @@ impl<Signer: Sign> Channel<Signer> {
}
// Constructors:
pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, APIError>
pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, APIError>
where K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
{
@ -602,6 +602,16 @@ impl<Signer: Sign> Channel<Signer> {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
let shutdown_scriptpubkey = if config.channel_options.commit_upfront_shutdown_pubkey {
Some(keys_provider.get_shutdown_scriptpubkey())
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
if !shutdown_scriptpubkey.is_compatible(&their_features) {
return Err(APIError::APIMisuseError { err: format!("Provided a scriptpubkey format not accepted by peer. script: ({})", shutdown_scriptpubkey.clone().into_inner().to_bytes().to_hex()) });
}
}
Ok(Channel {
user_id,
config: config.channel_options.clone(),
@ -614,7 +624,7 @@ impl<Signer: Sign> Channel<Signer> {
latest_monitor_update_id: 0,
holder_signer,
shutdown_scriptpubkey: Some(keys_provider.get_shutdown_scriptpubkey()),
shutdown_scriptpubkey,
destination_script: keys_provider.get_destination_script(),
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
@ -844,6 +854,16 @@ impl<Signer: Sign> Channel<Signer> {
}
} else { None };
let shutdown_scriptpubkey = if config.channel_options.commit_upfront_shutdown_pubkey {
Some(keys_provider.get_shutdown_scriptpubkey())
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
if !shutdown_scriptpubkey.is_compatible(&their_features) {
return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer. script: ({})", shutdown_scriptpubkey.clone().into_inner().to_bytes().to_hex())));
}
}
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
@ -858,7 +878,7 @@ impl<Signer: Sign> Channel<Signer> {
latest_monitor_update_id: 0,
holder_signer,
shutdown_scriptpubkey: Some(keys_provider.get_shutdown_scriptpubkey()),
shutdown_scriptpubkey,
destination_script: keys_provider.get_destination_script(),
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
@ -1206,6 +1226,7 @@ impl<Signer: Sign> Channel<Signer> {
}, ()));
}
assert!(self.shutdown_scriptpubkey.is_some());
if value_to_self as u64 > self.holder_dust_limit_satoshis {
txouts.push((TxOut {
script_pubkey: self.get_closing_scriptpubkey(),
@ -3096,6 +3117,7 @@ impl<Signer: Sign> Channel<Signer> {
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
assert!(self.shutdown_scriptpubkey.is_some());
Some(msgs::Shutdown {
channel_id: self.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
@ -3207,6 +3229,7 @@ impl<Signer: Sign> Channel<Signer> {
if self.feerate_per_kw > proposed_feerate {
proposed_feerate = self.feerate_per_kw;
}
assert!(self.shutdown_scriptpubkey.is_some());
let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
@ -3225,8 +3248,12 @@ impl<Signer: Sign> Channel<Signer> {
})
}
pub fn shutdown<F: Deref>(&mut self, fee_estimator: &F, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
where F::Target: FeeEstimator
pub fn shutdown<F: Deref, K: Deref>(
&mut self, fee_estimator: &F, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown
) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
where
F::Target: FeeEstimator,
K::Target: KeysInterface<Signer = Signer>
{
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
@ -3257,11 +3284,45 @@ impl<Signer: Sign> Channel<Signer> {
self.counterparty_shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
}
// If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
// immediately after the commitment dance, but we can send a Shutdown because we won't send
// any further commitment updates after we set LocalShutdownSent.
let send_shutdown = (self.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
let update_shutdown_script = match self.shutdown_scriptpubkey {
Some(_) => false,
None => {
assert!(send_shutdown);
let shutdown_scriptpubkey = keys_provider.get_shutdown_scriptpubkey();
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer. script: ({})", shutdown_scriptpubkey.clone().into_inner().to_bytes().to_hex())));
}
self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
true
},
};
// From here on out, we may not fail!
self.channel_state |= ChannelState::RemoteShutdownSent as u32;
self.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
self.latest_monitor_update_id += 1;
Some(ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
})
} else { None };
let shutdown = if send_shutdown {
Some(msgs::Shutdown {
channel_id: self.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
})
} else { None };
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
// cell HTLCs and return them to fail the payment.
@ -3276,23 +3337,11 @@ impl<Signer: Sign> Channel<Signer> {
_ => true
}
});
// If we have any LocalAnnounced updates we'll probably just get back a update_fail_htlc
// immediately after the commitment dance, but we can send a Shutdown cause we won't send
// any further commitment updates after we set LocalShutdownSent.
let shutdown = if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
None
} else {
Some(msgs::Shutdown {
channel_id: self.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
})
};
self.channel_state |= ChannelState::LocalShutdownSent as u32;
self.update_time_counter += 1;
Ok((shutdown, self.maybe_propose_first_closing_signed(fee_estimator), dropped_outbound_htlcs))
Ok((shutdown, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, dropped_outbound_htlcs))
}
fn build_signed_closing_transaction(&self, tx: &mut Transaction, counterparty_sig: &Signature, sig: &Signature) {
@ -3367,6 +3416,7 @@ impl<Signer: Sign> Channel<Signer> {
macro_rules! propose_new_feerate {
($new_feerate: expr) => {
assert!(self.shutdown_scriptpubkey.is_some());
let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
let (closing_tx, used_total_fee) = self.build_closing_transaction($new_feerate as u64 * tx_weight / 1000, false);
let sig = self.holder_signer
@ -3865,7 +3915,10 @@ impl<Signer: Sign> Channel<Signer> {
htlc_basepoint: keys.htlc_basepoint,
first_per_commitment_point,
channel_flags: if self.config.announced_channel {1} else {0},
shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
Some(script) => script.clone().into_inner(),
None => Builder::new().into_script(),
}),
}
}
@ -3898,7 +3951,10 @@ impl<Signer: Sign> Channel<Signer> {
delayed_payment_basepoint: keys.delayed_payment_basepoint,
htlc_basepoint: keys.htlc_basepoint,
first_per_commitment_point,
shutdown_scriptpubkey: OptionalField::Present(if self.config.commit_upfront_shutdown_pubkey { self.get_closing_scriptpubkey() } else { Builder::new().into_script() })
shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
Some(script) => script.clone().into_inner(),
None => Builder::new().into_script(),
}),
}
}
@ -4406,7 +4462,8 @@ impl<Signer: Sign> Channel<Signer> {
/// Begins the shutdown process, getting a message for the remote peer and returning all
/// holding cell HTLCs for payment failure.
pub fn get_shutdown(&mut self) -> Result<(msgs::Shutdown, Vec<(HTLCSource, PaymentHash)>), APIError> {
pub fn get_shutdown<K: Deref>(&mut self, keys_provider: &K, their_features: &InitFeatures) -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
where K::Target: KeysInterface<Signer = Signer> {
for htlc in self.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
@ -4425,7 +4482,17 @@ impl<Signer: Sign> Channel<Signer> {
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
let closing_script = self.get_closing_scriptpubkey();
let update_shutdown_script = match self.shutdown_scriptpubkey {
Some(_) => false,
None => {
let shutdown_scriptpubkey = keys_provider.get_shutdown_scriptpubkey();
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(APIError::APIMisuseError { err: format!("Provided a scriptpubkey format not accepted by peer. script: ({})", shutdown_scriptpubkey.clone().into_inner().to_bytes().to_hex()) });
}
self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
true
},
};
// From here on out, we may not fail!
if self.channel_state < ChannelState::FundingSent as u32 {
@ -4435,6 +4502,20 @@ impl<Signer: Sign> Channel<Signer> {
}
self.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
self.latest_monitor_update_id += 1;
Some(ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
})
} else { None };
let shutdown = msgs::Shutdown {
channel_id: self.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
};
// Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
// our shutdown until we've committed all of the pending changes.
self.holding_cell_update_fee = None;
@ -4449,10 +4530,7 @@ impl<Signer: Sign> Channel<Signer> {
}
});
Ok((msgs::Shutdown {
channel_id: self.channel_id,
scriptpubkey: closing_script,
}, dropped_outbound_htlcs))
Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
/// Gets the latest commitment transaction and any dependent transactions for relay (forcing
@ -5104,7 +5182,9 @@ mod tests {
use chain::transaction::OutPoint;
use util::config::UserConfig;
use util::enforcing_trait_impls::EnforcingSigner;
use util::errors::APIError;
use util::test_utils;
use util::test_utils::OnGetShutdownScriptpubkey;
use util::logger::Logger;
use bitcoin::secp256k1::{Secp256k1, Message, Signature, All};
use bitcoin::secp256k1::ffi::Signature as FFISignature;
@ -5113,6 +5193,7 @@ mod tests {
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
use bitcoin::hash_types::{Txid, WPubkeyHash};
use core::num::NonZeroU8;
use sync::Arc;
use prelude::*;
@ -5163,6 +5244,30 @@ mod tests {
PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
}
#[test]
fn upfront_shutdown_script_incompatibility() {
let features = InitFeatures::known().clear_shutdown_anysegwit();
let non_v0_segwit_shutdown_script =
ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
let seed = [42; 32];
let network = Network::Testnet;
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
keys_provider.expect(OnGetShutdownScriptpubkey { returns: non_v0_segwit_shutdown_script });
let fee_estimator = TestFeeEstimator { fee_est: 253 };
let secp_ctx = Secp256k1::new();
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
match Channel::<EnforcingSigner>::new_outbound(&&fee_estimator, &&keys_provider, node_id, features, 10000000, 100000, 42, &config) {
Err(APIError::APIMisuseError { err }) => {
assert_eq!(err, "Provided a scriptpubkey format not accepted by peer. script: (60020028)");
},
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => panic!("Expected error"),
}
}
// Check that, during channel creation, we use the same feerate in the open channel message
// as we do in the Channel object creation itself.
#[test]
@ -5176,7 +5281,7 @@ mod tests {
let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, 10000000, 100000, 42, &config).unwrap();
let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
@ -5201,7 +5306,7 @@ mod tests {
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
@ -5268,7 +5373,7 @@ mod tests {
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let mut chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_id, 10000000, 100000, 42, &config).unwrap();
let mut chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_id, InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
let commitment_tx_fee_0_htlcs = chan.commit_tx_fee_msat(0);
let commitment_tx_fee_1_htlc = chan.commit_tx_fee_msat(1);
@ -5317,7 +5422,7 @@ mod tests {
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
@ -5379,7 +5484,7 @@ mod tests {
// Create a channel.
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), 10000000, 100000, 42, &config).unwrap();
assert!(node_a_chan.counterparty_forwarding_info.is_none());
assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); // the default
assert!(node_a_chan.counterparty_forwarding_info().is_none());
@ -5443,7 +5548,7 @@ mod tests {
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
config.channel_options.announced_channel = false;
let mut chan = Channel::<InMemorySigner>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test
let mut chan = Channel::<InMemorySigner>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, InitFeatures::known(), 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test
chan.holder_dust_limit_satoshis = 546;
chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel

View file

@ -491,6 +491,8 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
/// Because adding or removing an entry is rare, we usually take an outer read lock and then
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
///
/// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
pending_events: Mutex<Vec<events::Event>>,
@ -871,6 +873,18 @@ macro_rules! try_chan_entry {
}
}
macro_rules! remove_channel {
($channel_state: expr, $entry: expr) => {
{
let channel = $entry.remove_entry().1;
if let Some(short_id) = channel.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
channel
}
}
}
macro_rules! handle_monitor_err {
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
@ -1165,8 +1179,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
let their_features = {
let per_peer_state = self.per_peer_state.read().unwrap();
match per_peer_state.get(&their_network_key) {
Some(peer_state) => peer_state.lock().unwrap().latest_features.clone(),
None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
}
};
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?;
let res = channel.get_open_channel(self.genesis_hash.clone());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
@ -1260,40 +1281,60 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let (mut failed_htlcs, chan_option) = {
let counterparty_node_id;
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
counterparty_node_id = chan_entry.get().get_counterparty_node_id();
let their_features = {
let per_peer_state = self.per_peer_state.read().unwrap();
match per_peer_state.get(&counterparty_node_id) {
Some(peer_state) => peer_state.lock().unwrap().latest_features.clone(),
None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
}
};
let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &their_features)?;
failed_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
let (result, is_permanent) =
handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
if is_permanent {
remove_channel!(channel_state, chan_entry);
break result;
}
}
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: chan_entry.get().get_counterparty_node_id(),
node_id: counterparty_node_id,
msg: shutdown_msg
});
if chan_entry.get().is_shutdown() {
if let Some(short_id) = chan_entry.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
let channel = remove_channel!(channel_state, chan_entry);
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: channel_update
});
}
(failed_htlcs, Some(chan_entry.remove_entry().1))
} else { (failed_htlcs, None) }
}
break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
}
};
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
let chan_update = if let Some(chan) = chan_option {
self.get_channel_update_for_broadcast(&chan).ok()
} else { None };
if let Some(update) = chan_update {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
let _ = handle_error!(self, result, counterparty_node_id);
Ok(())
}
@ -3190,7 +3231,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
let (mut dropped_htlcs, chan_option) = {
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
@ -3199,25 +3241,46 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry);
dropped_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
let (result, is_permanent) =
handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
if is_permanent {
remove_channel!(channel_state, chan_entry);
break result;
}
}
}
if let Some(msg) = shutdown {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
node_id: *counterparty_node_id,
msg,
});
}
if let Some(msg) = closing_signed {
// TODO: Do not send this if the monitor update failed.
channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
node_id: *counterparty_node_id,
msg,
});
}
if chan_entry.get().is_shutdown() {
if let Some(short_id) = chan_entry.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
let channel = remove_channel!(channel_state, chan_entry);
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: channel_update
});
}
(dropped_htlcs, Some(chan_entry.remove_entry().1))
} else { (dropped_htlcs, None) }
}
break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
@ -3225,14 +3288,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
for htlc_source in dropped_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}
let _ = handle_error!(self, result, *counterparty_node_id);
Ok(())
}

View file

@ -28,8 +28,10 @@ use routing::network_graph::RoutingFees;
use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction};
use ln::script::ShutdownScript;
use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
use util::test_utils::OnGetShutdownScriptpubkey;
use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
@ -55,6 +57,7 @@ use io;
use prelude::*;
use alloc::collections::BTreeSet;
use core::default::Default;
use core::num::NonZeroU8;
use sync::{Arc, Mutex};
use ln::functional_test_utils::*;
@ -7477,9 +7480,9 @@ fn test_upfront_shutdown_script() {
let flags_no = InitFeatures::known().clear_upfront_shutdown_script();
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
@ -7491,8 +7494,8 @@ fn test_upfront_shutdown_script() {
// channel smoothly, opt-out is from channel initiator here
let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
check_added_monitors!(nodes[1], 1);
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
@ -7505,8 +7508,8 @@ fn test_upfront_shutdown_script() {
//// channel smoothly
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
check_added_monitors!(nodes[1], 1);
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
@ -7610,6 +7613,7 @@ fn test_segwit_v0_shutdown_script() {
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a segwit v0 script supported even without option_shutdown_anysegwit
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@ -7644,6 +7648,7 @@ fn test_anysegwit_shutdown_script() {
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a non-v0 segwit script supported by option_shutdown_anysegwit
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@ -7672,20 +7677,33 @@ fn test_unsupported_anysegwit_shutdown_script() {
config.channel_options.commit_upfront_shutdown_pubkey = false;
let user_cfgs = [None, Some(config), None];
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit();
node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit();
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let node_features = InitFeatures::known().clear_shutdown_anysegwit();
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), node_features.clone());
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
// Check that using an unsupported shutdown script fails and a supported one succeeds.
let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey();
let unsupported_shutdown_script =
ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
chanmon_cfgs[1].keys_manager
.expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() })
.expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
// Use a non-v0 segwit script supported by option_shutdown_anysegwit
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone());
match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) {
Err(APIError::APIMisuseError { err }) => assert_eq!(err, "Provided a scriptpubkey format not accepted by peer. script: (60020028)"),
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => panic!("Expected error"),
}
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
.push_slice(&[0, 40])
.into_script();
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_features, &node_0_shutdown);
node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner();
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
@ -7713,6 +7731,7 @@ fn test_invalid_shutdown_script() {
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a segwit v0 script with an unsupported witness program
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@ -7748,7 +7767,7 @@ fn test_user_configurable_csv_delay() {
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config) {
match error {
APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),

View file

@ -453,6 +453,7 @@ pub struct TestKeysInterface {
pub override_channel_id_priv: Mutex<Option<[u8; 32]>>,
pub disable_revocation_policy_check: bool,
revoked_commitments: Mutex<HashMap<[u8;32], Arc<Mutex<u64>>>>,
expectations: Mutex<Option<VecDeque<OnGetShutdownScriptpubkey>>>,
}
impl keysinterface::KeysInterface for TestKeysInterface {
@ -460,7 +461,17 @@ impl keysinterface::KeysInterface for TestKeysInterface {
fn get_node_secret(&self) -> SecretKey { self.backing.get_node_secret() }
fn get_destination_script(&self) -> Script { self.backing.get_destination_script() }
fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { self.backing.get_shutdown_scriptpubkey() }
fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
match &mut *self.expectations.lock().unwrap() {
None => self.backing.get_shutdown_scriptpubkey(),
Some(expectations) => match expectations.pop_front() {
None => panic!("Unexpected get_shutdown_scriptpubkey"),
Some(expectation) => expectation.returns,
},
}
}
fn get_channel_signer(&self, inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner {
let keys = self.backing.get_channel_signer(inbound, channel_value_satoshis);
let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed);
@ -503,7 +514,6 @@ impl keysinterface::KeysInterface for TestKeysInterface {
}
}
impl TestKeysInterface {
pub fn new(seed: &[u8; 32], network: Network) -> Self {
let now = Duration::from_secs(genesis_block(network).header.time as u64);
@ -513,8 +523,19 @@ impl TestKeysInterface {
override_channel_id_priv: Mutex::new(None),
disable_revocation_policy_check: false,
revoked_commitments: Mutex::new(HashMap::new()),
expectations: Mutex::new(None),
}
}
/// Sets an expectation that [`keysinterface::KeysInterface::get_shutdown_scriptpubkey`] is
/// called.
pub fn expect(&self, expectation: OnGetShutdownScriptpubkey) -> &Self {
self.expectations.lock().unwrap()
.get_or_insert_with(|| VecDeque::new())
.push_back(expectation);
self
}
pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> EnforcingSigner {
let keys = self.backing.derive_channel_keys(channel_value_satoshis, id);
let revoked_commitment = self.make_revoked_commitment_cell(keys.commitment_seed);
@ -531,6 +552,33 @@ impl TestKeysInterface {
}
}
impl Drop for TestKeysInterface {
fn drop(&mut self) {
if std::thread::panicking() {
return;
}
if let Some(expectations) = &*self.expectations.lock().unwrap() {
if !expectations.is_empty() {
panic!("Unsatisfied expectations: {:?}", expectations);
}
}
}
}
/// An expectation that [`keysinterface::KeysInterface::get_shutdown_scriptpubkey`] was called and
/// returns a [`ShutdownScript`].
pub struct OnGetShutdownScriptpubkey {
/// A shutdown script used to close a channel.
pub returns: ShutdownScript,
}
impl core::fmt::Debug for OnGetShutdownScriptpubkey {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("OnGetShutdownScriptpubkey").finish()
}
}
pub struct TestChainSource {
pub genesis_hash: BlockHash,
pub utxo_ret: Mutex<Result<TxOut, chain::AccessError>>,