2018-09-19 17:39:43 -04:00
//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
//! here.
2018-09-20 12:57:47 -04:00
//!
2018-09-19 17:39:43 -04:00
//! ChannelMonitor objects are generated by ChannelManager in response to relevant
//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
//! be made in responding to certain messages, see ManyChannelMonitor for more.
2018-09-20 12:57:47 -04:00
//!
2018-09-19 17:39:43 -04:00
//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
//! security-domain-separated system design, you should consider having multiple paths for
//! ChannelMonitors to get out of the HSM and onto monitoring devices.
2017-12-25 01:05:27 -05:00
use bitcoin ::blockdata ::block ::BlockHeader ;
use bitcoin ::blockdata ::transaction ::{ TxIn , TxOut , SigHashType , Transaction } ;
2018-08-20 17:13:07 -04:00
use bitcoin ::blockdata ::transaction ::OutPoint as BitcoinOutPoint ;
2018-11-16 20:52:33 -05:00
use bitcoin ::blockdata ::script ::{ Script , Builder } ;
use bitcoin ::blockdata ::opcodes ;
2019-12-13 01:58:08 -05:00
use bitcoin ::consensus ::encode ;
2019-03-04 18:02:02 +01:00
use bitcoin ::util ::hash ::BitcoinHash ;
2017-12-25 01:05:27 -05:00
use bitcoin ::util ::bip143 ;
2018-12-17 23:58:02 -05:00
use bitcoin_hashes ::Hash ;
use bitcoin_hashes ::sha256 ::Hash as Sha256 ;
2018-12-13 16:23:22 -05:00
use bitcoin_hashes ::hash160 ::Hash as Hash160 ;
2019-03-04 18:02:02 +01:00
use bitcoin_hashes ::sha256d ::Hash as Sha256dHash ;
2017-12-25 01:05:27 -05:00
2019-01-17 17:36:49 -05:00
use secp256k1 ::{ Secp256k1 , Signature } ;
2017-12-25 01:05:27 -05:00
use secp256k1 ::key ::{ SecretKey , PublicKey } ;
2018-08-20 17:13:07 -04:00
use secp256k1 ;
2017-12-25 01:05:27 -05:00
2018-11-15 07:47:07 -05:00
use ln ::msgs ::DecodeError ;
2017-12-25 01:05:27 -05:00
use ln ::chan_utils ;
2020-01-14 13:47:01 -05:00
use ln ::chan_utils ::{ HTLCOutputInCommitment , LocalCommitmentTransaction , HTLCType } ;
2018-11-22 21:18:16 -05:00
use ln ::channelmanager ::{ HTLCSource , PaymentPreimage , PaymentHash } ;
2019-07-02 15:52:58 -04:00
use chain ::chaininterface ::{ ChainListener , ChainWatchInterface , BroadcasterInterface , FeeEstimator , ConfirmationTarget , MIN_RELAY_FEE_SAT_PER_1000_WEIGHT } ;
2018-06-27 09:11:58 -04:00
use chain ::transaction ::OutPoint ;
2018-10-19 02:44:40 +00:00
use chain ::keysinterface ::SpendableOutputDescriptor ;
2018-10-18 15:00:12 -04:00
use util ::logger ::Logger ;
2019-12-13 01:58:08 -05:00
use util ::ser ::{ ReadableArgs , Readable , Writer , Writeable , U48 } ;
2018-10-19 02:44:40 +00:00
use util ::{ byte_utils , events } ;
2017-12-25 01:05:27 -05:00
2019-12-10 15:38:04 -05:00
use std ::collections ::{ HashMap , hash_map , HashSet } ;
2017-12-25 01:05:27 -05:00
use std ::sync ::{ Arc , Mutex } ;
2018-10-19 02:44:40 +00:00
use std ::{ hash , cmp , mem } ;
2017-12-25 01:05:27 -05:00
2018-09-19 17:39:43 -04:00
/// An error enum representing a failure to persist a channel monitor update.
2018-10-17 21:26:48 -04:00
#[ derive(Clone) ]
2018-04-24 00:19:52 -04:00
pub enum ChannelMonitorUpdateErr {
2019-03-25 17:26:32 -04:00
/// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
/// our state failed, but is expected to succeed at some point in the future).
2018-09-20 12:57:47 -04:00
///
2018-04-24 00:19:52 -04:00
/// Such a failure will "freeze" a channel, preventing us from revoking old states or
/// submitting new commitment transactions to the remote party.
/// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
/// the channel to an operational state.
2018-10-17 18:21:06 -04:00
///
/// Note that continuing to operate when no copy of the updated ChannelMonitor could be
/// persisted is unsafe - if you failed to store the update on your own local disk you should
/// instead return PermanentFailure to force closure of the channel ASAP.
///
/// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
/// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
/// to claim it on this channel) and those updates must be applied wherever they can be. At
/// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
/// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
/// the channel which would invalidate previous ChannelMonitors are not made when a channel has
/// been "frozen".
///
/// Note that even if updates made after TemporaryFailure succeed you must still call
/// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal
/// channel operation.
2019-03-25 17:26:32 -04:00
///
/// For deployments where a copy of ChannelMonitors and other local state are backed up in a
/// remote location (with local copies persisted immediately), it is anticipated that all
/// updates will return TemporaryFailure until the remote copies could be updated.
2018-04-24 00:19:52 -04:00
TemporaryFailure ,
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
/// different watchtower and cannot update with all watchtowers that were previously informed
/// of this channel). This will force-close the channel in question.
2018-12-11 13:16:38 -05:00
///
/// Should also be used to indicate a failure to update the local copy of the channel monitor.
2018-04-24 00:19:52 -04:00
PermanentFailure ,
}
2018-11-15 07:47:07 -05:00
/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::insert_combine this
/// means you tried to merge two monitors for different channels or for a channel which was
/// restored from a backup and then generated new commitment updates.
/// Contains a human-readable error message.
#[ derive(Debug) ]
pub struct MonitorUpdateError ( pub & 'static str ) ;
2018-12-10 23:56:02 -05:00
/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
/// forward channel and from which info are needed to update HTLC in a backward channel.
pub struct HTLCUpdate {
2018-11-22 21:18:16 -05:00
pub ( super ) payment_hash : PaymentHash ,
pub ( super ) payment_preimage : Option < PaymentPreimage > ,
2018-12-10 23:56:02 -05:00
pub ( super ) source : HTLCSource
}
2017-12-25 01:05:27 -05:00
/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
/// events to it, while also taking any add_update_monitor events and passing them to some remote
/// server(s).
2018-09-20 12:57:47 -04:00
///
2018-04-24 00:19:52 -04:00
/// Note that any updates to a channel's monitor *must* be applied to each instance of the
/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
/// which we have revoked, allowing our counterparty to claim all funds in the channel!
2019-11-08 20:12:13 -05:00
///
/// User needs to notify implementors of ManyChannelMonitor when a new block is connected or
/// disconnected using their `block_connected` and `block_disconnected` methods. However, rather
/// than calling these methods directly, the user should register implementors as listeners to the
/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify
/// all registered listeners in one go.
2017-12-25 01:05:27 -05:00
pub trait ManyChannelMonitor : Send + Sync {
2018-06-27 09:11:58 -04:00
/// Adds or updates a monitor for the given `funding_txo`.
2018-09-20 12:57:47 -04:00
///
2018-09-08 10:32:39 -04:00
/// Implementor must also ensure that the funding_txo outpoint is registered with any relevant
/// ChainWatchInterfaces such that the provided monitor receives block_connected callbacks with
/// any spends of it.
2018-06-27 09:11:58 -04:00
fn add_update_monitor ( & self , funding_txo : OutPoint , monitor : ChannelMonitor ) -> Result < ( ) , ChannelMonitorUpdateErr > ;
2018-12-10 23:56:02 -05:00
/// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
/// with success or failure backward
fn fetch_pending_htlc_updated ( & self ) -> Vec < HTLCUpdate > ;
2017-12-25 01:05:27 -05:00
}
/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
/// watchtower or watch our own channels.
2018-09-20 12:57:47 -04:00
///
2017-12-25 01:05:27 -05:00
/// Note that you must provide your own key by which to refer to channels.
2018-09-20 12:57:47 -04:00
///
2017-12-25 01:05:27 -05:00
/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that
/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably
/// index by a PublicKey which is required to sign any updates.
2018-09-20 12:57:47 -04:00
///
2017-12-25 01:05:27 -05:00
/// If you're using this for local monitoring of your own channels, you probably want to use
2018-06-27 09:11:58 -04:00
/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
2017-12-25 01:05:27 -05:00
pub struct SimpleManyChannelMonitor < Key > {
2018-09-14 15:19:03 -04:00
#[ cfg(test) ] // Used in ChannelManager tests to manipulate channels directly
pub monitors : Mutex < HashMap < Key , ChannelMonitor > > ,
#[ cfg(not(test)) ]
2017-12-25 01:05:27 -05:00
monitors : Mutex < HashMap < Key , ChannelMonitor > > ,
chain_monitor : Arc < ChainWatchInterface > ,
2018-10-19 02:44:40 +00:00
broadcaster : Arc < BroadcasterInterface > ,
pending_events : Mutex < Vec < events ::Event > > ,
2018-11-22 21:18:16 -05:00
pending_htlc_updated : Mutex < HashMap < PaymentHash , Vec < ( HTLCSource , Option < PaymentPreimage > ) > > > ,
2018-10-29 13:38:15 -04:00
logger : Arc < Logger > ,
2019-04-08 21:11:16 -04:00
fee_estimator : Arc < FeeEstimator >
2017-12-25 01:05:27 -05:00
}
2019-11-18 16:40:05 -05:00
impl < ' a , Key : Send + cmp ::Eq + hash ::Hash > ChainListener for SimpleManyChannelMonitor < Key > {
2018-10-24 11:14:12 -04:00
fn block_connected ( & self , header : & BlockHeader , height : u32 , txn_matched : & [ & Transaction ] , _indexes_of_txn_matched : & [ u32 ] ) {
let block_hash = header . bitcoin_hash ( ) ;
2018-10-19 02:44:40 +00:00
let mut new_events : Vec < events ::Event > = Vec ::with_capacity ( 0 ) ;
2018-12-10 23:56:02 -05:00
let mut htlc_updated_infos = Vec ::new ( ) ;
2018-10-19 02:44:40 +00:00
{
2018-10-24 11:14:12 -04:00
let mut monitors = self . monitors . lock ( ) . unwrap ( ) ;
for monitor in monitors . values_mut ( ) {
2019-04-08 21:11:16 -04:00
let ( txn_outputs , spendable_outputs , mut htlc_updated ) = monitor . block_connected ( txn_matched , height , & block_hash , & * self . broadcaster , & * self . fee_estimator ) ;
2018-10-19 02:44:40 +00:00
if spendable_outputs . len ( ) > 0 {
new_events . push ( events ::Event ::SpendableOutputs {
outputs : spendable_outputs ,
} ) ;
}
2018-12-10 23:56:02 -05:00
2018-10-19 02:44:40 +00:00
for ( ref txid , ref outputs ) in txn_outputs {
for ( idx , output ) in outputs . iter ( ) . enumerate ( ) {
self . chain_monitor . install_watch_outpoint ( ( txid . clone ( ) , idx as u32 ) , & output . script_pubkey ) ;
}
2018-09-07 01:40:41 +00:00
}
2018-12-10 23:56:02 -05:00
htlc_updated_infos . append ( & mut htlc_updated ) ;
}
}
{
// ChannelManager will just need to fetch pending_htlc_updated and pass state backward
let mut pending_htlc_updated = self . pending_htlc_updated . lock ( ) . unwrap ( ) ;
for htlc in htlc_updated_infos . drain ( .. ) {
match pending_htlc_updated . entry ( htlc . 2 ) {
hash_map ::Entry ::Occupied ( mut e ) = > {
// In case of reorg we may have htlc outputs solved in a different way so
// we prefer to keep claims but don't store duplicate updates for a given
// (payment_hash, HTLCSource) pair.
let mut existing_claim = false ;
e . get_mut ( ) . retain ( | htlc_data | {
if htlc . 0 = = htlc_data . 0 {
if htlc_data . 1. is_some ( ) {
existing_claim = true ;
true
} else { false }
} else { true }
} ) ;
if ! existing_claim {
e . get_mut ( ) . push ( ( htlc . 0 , htlc . 1 ) ) ;
}
}
hash_map ::Entry ::Vacant ( e ) = > {
e . insert ( vec! [ ( htlc . 0 , htlc . 1 ) ] ) ;
}
}
2018-09-07 01:40:41 +00:00
}
2017-12-25 01:05:27 -05:00
}
2018-10-19 02:44:40 +00:00
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . append ( & mut new_events ) ;
2017-12-25 01:05:27 -05:00
}
2019-02-04 21:21:11 -05:00
fn block_disconnected ( & self , header : & BlockHeader , disconnected_height : u32 ) {
let block_hash = header . bitcoin_hash ( ) ;
let mut monitors = self . monitors . lock ( ) . unwrap ( ) ;
for monitor in monitors . values_mut ( ) {
2019-12-09 16:59:08 -05:00
monitor . block_disconnected ( disconnected_height , & block_hash , & * self . broadcaster , & * self . fee_estimator ) ;
2019-02-04 21:21:11 -05:00
}
}
2017-12-25 01:05:27 -05:00
}
impl < Key : Send + cmp ::Eq + hash ::Hash + 'static > SimpleManyChannelMonitor < Key > {
2018-09-19 17:39:43 -04:00
/// Creates a new object which can be used to monitor several channels given the chain
/// interface with which to register to receive notifications.
2019-04-08 21:11:16 -04:00
pub fn new ( chain_monitor : Arc < ChainWatchInterface > , broadcaster : Arc < BroadcasterInterface > , logger : Arc < Logger > , feeest : Arc < FeeEstimator > ) -> Arc < SimpleManyChannelMonitor < Key > > {
2017-12-25 01:05:27 -05:00
let res = Arc ::new ( SimpleManyChannelMonitor {
monitors : Mutex ::new ( HashMap ::new ( ) ) ,
2018-03-13 18:51:33 +01:00
chain_monitor ,
2018-10-19 02:44:40 +00:00
broadcaster ,
pending_events : Mutex ::new ( Vec ::new ( ) ) ,
2018-12-10 23:56:02 -05:00
pending_htlc_updated : Mutex ::new ( HashMap ::new ( ) ) ,
2018-10-29 13:38:15 -04:00
logger ,
2019-04-08 21:11:16 -04:00
fee_estimator : feeest ,
2017-12-25 01:05:27 -05:00
} ) ;
2019-11-08 20:12:13 -05:00
2017-12-25 01:05:27 -05:00
res
}
2019-01-24 16:41:51 +02:00
/// Adds or updates the monitor which monitors the channel referred to by the given key.
2018-11-15 07:47:07 -05:00
pub fn add_update_monitor_by_key ( & self , key : Key , monitor : ChannelMonitor ) -> Result < ( ) , MonitorUpdateError > {
2017-12-25 01:05:27 -05:00
let mut monitors = self . monitors . lock ( ) . unwrap ( ) ;
match monitors . get_mut ( & key ) {
2018-10-29 13:38:15 -04:00
Some ( orig_monitor ) = > {
2018-11-26 19:50:16 -05:00
log_trace! ( self , " Updating Channel Monitor for channel {} " , log_funding_info! ( monitor . key_storage ) ) ;
2018-10-29 13:38:15 -04:00
return orig_monitor . insert_combine ( monitor ) ;
} ,
2017-12-25 01:05:27 -05:00
None = > { }
} ;
2018-11-26 19:50:16 -05:00
match monitor . key_storage {
Storage ::Local { ref funding_info , .. } = > {
match funding_info {
& None = > {
return Err ( MonitorUpdateError ( " Try to update a useless monitor without funding_txo ! " ) ) ;
} ,
& Some ( ( ref outpoint , ref script ) ) = > {
log_trace! ( self , " Got new Channel Monitor for channel {} " , log_bytes! ( outpoint . to_channel_id ( ) [ .. ] ) ) ;
self . chain_monitor . install_watch_tx ( & outpoint . txid , script ) ;
self . chain_monitor . install_watch_outpoint ( ( outpoint . txid , outpoint . index as u32 ) , script ) ;
} ,
}
2018-07-22 13:57:55 -04:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > {
self . chain_monitor . watch_all_txn ( ) ;
}
2017-12-25 01:05:27 -05:00
}
monitors . insert ( key , monitor ) ;
Ok ( ( ) )
}
}
2018-06-27 09:11:58 -04:00
impl ManyChannelMonitor for SimpleManyChannelMonitor < OutPoint > {
fn add_update_monitor ( & self , funding_txo : OutPoint , monitor : ChannelMonitor ) -> Result < ( ) , ChannelMonitorUpdateErr > {
2018-04-24 00:19:52 -04:00
match self . add_update_monitor_by_key ( funding_txo , monitor ) {
Ok ( _ ) = > Ok ( ( ) ) ,
Err ( _ ) = > Err ( ChannelMonitorUpdateErr ::PermanentFailure ) ,
}
2017-12-25 01:05:27 -05:00
}
2018-12-10 23:56:02 -05:00
fn fetch_pending_htlc_updated ( & self ) -> Vec < HTLCUpdate > {
let mut updated = self . pending_htlc_updated . lock ( ) . unwrap ( ) ;
let mut pending_htlcs_updated = Vec ::with_capacity ( updated . len ( ) ) ;
for ( k , v ) in updated . drain ( ) {
for htlc_data in v {
pending_htlcs_updated . push ( HTLCUpdate {
payment_hash : k ,
payment_preimage : htlc_data . 1 ,
source : htlc_data . 0 ,
} ) ;
}
}
pending_htlcs_updated
}
2017-12-25 01:05:27 -05:00
}
2018-10-19 02:44:40 +00:00
impl < Key : Send + cmp ::Eq + hash ::Hash > events ::EventsProvider for SimpleManyChannelMonitor < Key > {
fn get_and_clear_pending_events ( & self ) -> Vec < events ::Event > {
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
let mut ret = Vec ::new ( ) ;
mem ::swap ( & mut ret , & mut * pending_events ) ;
ret
}
}
2018-04-24 00:19:52 -04:00
/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
/// instead claiming it in its own individual transaction.
const CLTV_SHARED_CLAIM_BUFFER : u32 = 12 ;
/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
/// HTLC-Success transaction.
2018-10-16 11:40:21 -04:00
/// In other words, this is an upper bound on how many blocks we think it can take us to get a
/// transaction confirmed (and we use it in a few more, equivalent, places).
pub ( crate ) const CLTV_CLAIM_BUFFER : u32 = 6 ;
/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
2019-07-18 18:50:03 -04:00
/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
/// due to expiration but increase the cost of funds being locked longuer in case of failure.
/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
/// accurate block height.
/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
/// with at worst this delay, so we are not only using this value as a mercy for them but also
/// us as a safeguard to delay with enough time.
pub ( crate ) const LATENCY_GRACE_PERIOD_BLOCKS : u32 = 3 ;
/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
/// keeping bumping another claim tx to solve the outpoint.
pub ( crate ) const ANTI_REORG_DELAY : u32 = 6 ;
2017-12-25 01:05:27 -05:00
2018-07-16 21:12:54 -07:00
#[ derive(Clone, PartialEq) ]
2018-11-26 19:50:16 -05:00
enum Storage {
Local {
2019-12-13 01:58:08 -05:00
funding_key : SecretKey ,
2017-12-25 01:05:27 -05:00
revocation_base_key : SecretKey ,
2018-04-24 00:19:52 -04:00
htlc_base_key : SecretKey ,
2018-10-19 02:44:40 +00:00
delayed_payment_base_key : SecretKey ,
2018-11-01 03:22:56 +00:00
payment_base_key : SecretKey ,
2018-11-16 20:52:33 -05:00
shutdown_pubkey : PublicKey ,
2018-11-26 19:50:16 -05:00
funding_info : Option < ( OutPoint , Script ) > ,
2018-12-10 23:56:34 -05:00
current_remote_commitment_txid : Option < Sha256dHash > ,
prev_remote_commitment_txid : Option < Sha256dHash > ,
2017-12-25 01:05:27 -05:00
} ,
2018-11-26 19:50:16 -05:00
Watchtower {
2017-12-25 01:05:27 -05:00
revocation_base_key : PublicKey ,
2018-04-24 00:19:52 -04:00
htlc_base_key : PublicKey ,
2017-12-25 01:05:27 -05:00
}
}
2018-07-16 21:12:54 -07:00
#[ derive(Clone, PartialEq) ]
2018-04-24 00:19:52 -04:00
struct LocalSignedTx {
2018-07-08 16:31:48 -04:00
/// txid of the transaction in tx, just used to make comparison faster
2018-04-24 00:19:52 -04:00
txid : Sha256dHash ,
2019-12-13 01:58:08 -05:00
tx : LocalCommitmentTransaction ,
2018-04-24 00:19:52 -04:00
revocation_key : PublicKey ,
a_htlc_key : PublicKey ,
b_htlc_key : PublicKey ,
delayed_payment_key : PublicKey ,
2019-12-13 14:56:57 -05:00
per_commitment_point : PublicKey ,
2018-04-24 00:19:52 -04:00
feerate_per_kw : u64 ,
2019-12-13 14:56:57 -05:00
htlc_outputs : Vec < ( HTLCOutputInCommitment , Option < Signature > , Option < HTLCSource > ) > ,
2017-12-25 01:05:27 -05:00
}
2019-04-10 18:56:22 -04:00
#[ derive(PartialEq) ]
2019-04-08 21:11:16 -04:00
enum InputDescriptors {
RevokedOfferedHTLC ,
RevokedReceivedHTLC ,
OfferedHTLC ,
ReceivedHTLC ,
RevokedOutput , // either a revoked to_local output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
}
2019-03-30 22:12:55 -04:00
/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs
/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build
/// a new bumped one in case of lenghty confirmation delay
#[ derive(Clone, PartialEq) ]
2019-12-09 22:18:20 -05:00
enum InputMaterial {
2019-03-30 22:12:55 -04:00
Revoked {
script : Script ,
pubkey : Option < PublicKey > ,
key : SecretKey ,
is_htlc : bool ,
amount : u64 ,
} ,
RemoteHTLC {
script : Script ,
key : SecretKey ,
preimage : Option < PaymentPreimage > ,
amount : u64 ,
2019-07-03 10:26:17 -04:00
locktime : u32 ,
2019-03-30 22:12:55 -04:00
} ,
LocalHTLC {
script : Script ,
sigs : ( Signature , Signature ) ,
preimage : Option < PaymentPreimage > ,
amount : u64 ,
}
}
2019-12-09 22:18:20 -05:00
impl Writeable for InputMaterial {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& InputMaterial ::Revoked { ref script , ref pubkey , ref key , ref is_htlc , ref amount } = > {
writer . write_all ( & [ 0 ; 1 ] ) ? ;
script . write ( writer ) ? ;
pubkey . write ( writer ) ? ;
writer . write_all ( & key [ .. ] ) ? ;
if * is_htlc {
writer . write_all ( & [ 0 ; 1 ] ) ? ;
} else {
writer . write_all ( & [ 1 ; 1 ] ) ? ;
}
writer . write_all ( & byte_utils ::be64_to_array ( * amount ) ) ? ;
} ,
2019-07-03 10:26:17 -04:00
& InputMaterial ::RemoteHTLC { ref script , ref key , ref preimage , ref amount , ref locktime } = > {
2019-12-09 22:18:20 -05:00
writer . write_all ( & [ 1 ; 1 ] ) ? ;
script . write ( writer ) ? ;
key . write ( writer ) ? ;
preimage . write ( writer ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( * amount ) ) ? ;
2019-07-03 10:26:17 -04:00
writer . write_all ( & byte_utils ::be32_to_array ( * locktime ) ) ? ;
2019-12-09 22:18:20 -05:00
} ,
& InputMaterial ::LocalHTLC { ref script , ref sigs , ref preimage , ref amount } = > {
writer . write_all ( & [ 2 ; 1 ] ) ? ;
script . write ( writer ) ? ;
sigs . 0. write ( writer ) ? ;
sigs . 1. write ( writer ) ? ;
preimage . write ( writer ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( * amount ) ) ? ;
}
}
Ok ( ( ) )
}
}
impl < R : ::std ::io ::Read > Readable < R > for InputMaterial {
fn read ( reader : & mut R ) -> Result < Self , DecodeError > {
let input_material = match < u8 as Readable < R > > ::read ( reader ) ? {
0 = > {
let script = Readable ::read ( reader ) ? ;
let pubkey = Readable ::read ( reader ) ? ;
let key = Readable ::read ( reader ) ? ;
let is_htlc = match < u8 as Readable < R > > ::read ( reader ) ? {
0 = > true ,
1 = > false ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
let amount = Readable ::read ( reader ) ? ;
InputMaterial ::Revoked {
script ,
pubkey ,
key ,
is_htlc ,
amount
}
} ,
1 = > {
let script = Readable ::read ( reader ) ? ;
let key = Readable ::read ( reader ) ? ;
let preimage = Readable ::read ( reader ) ? ;
let amount = Readable ::read ( reader ) ? ;
2019-07-03 10:26:17 -04:00
let locktime = Readable ::read ( reader ) ? ;
2019-12-09 22:18:20 -05:00
InputMaterial ::RemoteHTLC {
script ,
key ,
preimage ,
2019-07-03 10:26:17 -04:00
amount ,
locktime
2019-12-09 22:18:20 -05:00
}
} ,
2 = > {
let script = Readable ::read ( reader ) ? ;
let their_sig = Readable ::read ( reader ) ? ;
let our_sig = Readable ::read ( reader ) ? ;
let preimage = Readable ::read ( reader ) ? ;
let amount = Readable ::read ( reader ) ? ;
InputMaterial ::LocalHTLC {
script ,
sigs : ( their_sig , our_sig ) ,
preimage ,
amount
}
}
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
Ok ( input_material )
}
}
2019-03-30 21:56:51 -04:00
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
2019-07-18 18:50:03 -04:00
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
2019-03-30 21:56:51 -04:00
#[ derive(Clone, PartialEq) ]
enum OnchainEvent {
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
/// bump-txn candidate buffer.
Claim {
2019-12-09 22:18:20 -05:00
claim_request : Sha256dHash ,
2019-03-30 21:56:51 -04:00
} ,
/// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve
/// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can
/// only win from it, so it's never an OnchainEvent
HTLCUpdate {
htlc_update : ( HTLCSource , PaymentHash ) ,
} ,
2019-12-09 16:59:08 -05:00
/// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a remote party tx.
/// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
/// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
ContentiousOutpoint {
outpoint : BitcoinOutPoint ,
input_material : InputMaterial ,
}
2019-03-30 21:56:51 -04:00
}
2019-12-09 22:18:20 -05:00
/// Higher-level cache structure needed to re-generate bumped claim txn if needed
#[ derive(Clone, PartialEq) ]
2019-12-10 17:25:27 -05:00
pub struct ClaimTxBumpMaterial {
2019-12-09 22:18:20 -05:00
// At every block tick, used to check if pending claiming tx is taking too
// much time for confirmation and we need to bump it.
height_timer : u32 ,
// Tracked in case of reorg to wipe out now-superflous bump material
feerate_previous : u64 ,
// Soonest timelocks among set of outpoints claimed, used to compute
// a priority of not feerate
soonest_timelock : u32 ,
// Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey.
per_input_material : HashMap < BitcoinOutPoint , InputMaterial > ,
}
impl Writeable for ClaimTxBumpMaterial {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
writer . write_all ( & byte_utils ::be32_to_array ( self . height_timer ) ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( self . feerate_previous ) ) ? ;
writer . write_all ( & byte_utils ::be32_to_array ( self . soonest_timelock ) ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( self . per_input_material . len ( ) as u64 ) ) ? ;
for ( outp , tx_material ) in self . per_input_material . iter ( ) {
outp . write ( writer ) ? ;
tx_material . write ( writer ) ? ;
}
Ok ( ( ) )
}
}
impl < R : ::std ::io ::Read > Readable < R > for ClaimTxBumpMaterial {
fn read ( reader : & mut R ) -> Result < Self , DecodeError > {
let height_timer = Readable ::read ( reader ) ? ;
let feerate_previous = Readable ::read ( reader ) ? ;
let soonest_timelock = Readable ::read ( reader ) ? ;
let per_input_material_len : u64 = Readable ::read ( reader ) ? ;
let mut per_input_material = HashMap ::with_capacity ( cmp ::min ( per_input_material_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. per_input_material_len {
let outpoint = Readable ::read ( reader ) ? ;
let input_material = Readable ::read ( reader ) ? ;
per_input_material . insert ( outpoint , input_material ) ;
}
Ok ( Self { height_timer , feerate_previous , soonest_timelock , per_input_material } )
}
}
2018-07-09 16:40:21 -04:00
const SERIALIZATION_VERSION : u8 = 1 ;
const MIN_SERIALIZATION_VERSION : u8 = 1 ;
2018-09-19 17:39:43 -04:00
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
/// on-chain transactions to ensure no loss of funds occurs.
2018-09-20 12:57:47 -04:00
///
2018-09-19 17:39:43 -04:00
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
/// information and are actively monitoring the chain.
2018-10-24 13:59:03 -04:00
#[ derive(Clone) ]
2017-12-25 01:05:27 -05:00
pub struct ChannelMonitor {
commitment_transaction_number_obscure_factor : u64 ,
2018-11-26 19:50:16 -05:00
key_storage : Storage ,
2017-12-25 01:05:27 -05:00
their_htlc_base_key : Option < PublicKey > ,
2018-09-11 01:37:31 +00:00
their_delayed_payment_base_key : Option < PublicKey > ,
2019-12-12 14:56:28 -05:00
funding_redeemscript : Option < Script > ,
channel_value_satoshis : Option < u64 > ,
2018-04-24 00:19:52 -04:00
// first is the idx of the first of the two revocation points
their_cur_revocation_points : Option < ( u64 , PublicKey , Option < PublicKey > ) > ,
our_to_self_delay : u16 ,
their_to_self_delay : Option < u16 > ,
2017-12-25 01:05:27 -05:00
old_secrets : [ ( [ u8 ; 32 ] , u64 ) ; 49 ] ,
2019-01-04 14:37:48 -05:00
remote_claimable_outpoints : HashMap < Sha256dHash , Vec < ( HTLCOutputInCommitment , Option < Box < HTLCSource > > ) > > ,
2018-07-08 16:31:48 -04:00
/// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
/// Nor can we figure out their commitment numbers without the commitment transaction they are
/// spending. Thus, in order to claim them via revocation key, we track all the remote
/// commitment transactions which we find on-chain, mapping them to the commitment number which
/// can be used to derive the revocation key and claim the transactions.
2018-10-24 13:59:03 -04:00
remote_commitment_txn_on_chain : HashMap < Sha256dHash , ( u64 , Vec < Script > ) > ,
2018-07-08 16:31:48 -04:00
/// Cache used to make pruning of payment_preimages faster.
/// Maps payment_hash values to commitment numbers for remote transactions for non-revoked
/// remote transactions (ie should remain pretty small).
/// Serialized to disk but should generally not be sent to Watchtowers.
2018-11-22 21:18:16 -05:00
remote_hash_commitment_number : HashMap < PaymentHash , u64 > ,
2018-04-24 00:19:52 -04:00
// We store two local commitment transactions to avoid any race conditions where we may update
// some monitors (potentially on watchtowers) but then fail to update others, resulting in the
// various monitors for one channel being out of sync, and us broadcasting a local
// transaction for which we have deleted claim information on some watchtowers.
prev_local_signed_commitment_tx : Option < LocalSignedTx > ,
current_local_signed_commitment_tx : Option < LocalSignedTx > ,
2018-10-25 12:56:02 -04:00
// Used just for ChannelManager to make sure it has the latest channel data during
// deserialization
current_remote_commitment_number : u64 ,
2018-11-22 21:18:16 -05:00
payment_preimages : HashMap < PaymentHash , PaymentPreimage > ,
2017-12-25 01:05:27 -05:00
destination_script : Script ,
2019-08-02 16:29:12 -04:00
// Thanks to data loss protection, we may be able to claim our non-htlc funds
// back, this is the script we have to spend from but we need to
// scan every commitment transaction for that
to_remote_rescue : Option < ( Script , SecretKey ) > ,
2018-10-18 15:00:12 -04:00
2019-12-09 22:18:20 -05:00
// Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
// it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
// another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
// same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
// block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
// equality between spending transaction and claim request. If true, it means transaction was one our claiming one
// after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
// we need to regenerate new claim request we reduced set of stil-claimable outpoints.
// Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
2019-12-10 17:25:27 -05:00
#[ cfg(test) ] // Used in functional_test to verify sanitization
pub pending_claim_requests : HashMap < Sha256dHash , ClaimTxBumpMaterial > ,
#[ cfg(not(test)) ]
2019-12-09 22:18:20 -05:00
pending_claim_requests : HashMap < Sha256dHash , ClaimTxBumpMaterial > ,
// Used to link outpoints claimed in a connected block to a pending claim request.
// Key is outpoint than monitor parsing has detected we have keys/scripts to claim
// Value is (pending claim request identifier, confirmation_block), identifier
// is txid of the initial claiming transaction and is immutable until outpoint is
// post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
// block with output gets disconnected.
2019-12-10 17:25:27 -05:00
#[ cfg(test) ] // Used in functional_test to verify sanitization
pub claimable_outpoints : HashMap < BitcoinOutPoint , ( Sha256dHash , u32 ) > ,
#[ cfg(not(test)) ]
2019-12-09 22:18:20 -05:00
claimable_outpoints : HashMap < BitcoinOutPoint , ( Sha256dHash , u32 ) > ,
2019-03-30 22:12:55 -04:00
2019-03-30 21:56:51 -04:00
// Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
// we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
// actions when we receive a block with given height. Actions depend on OnchainEvent type.
onchain_events_waiting_threshold_conf : HashMap < u32 , Vec < OnchainEvent > > ,
2019-05-30 20:54:02 -04:00
2018-10-24 11:14:12 -04:00
// We simply modify last_block_hash in Channel's block_connected so that serialization is
// consistent but hopefully the users' copy handles block_connected in a consistent way.
// (we do *not*, however, update them in insert_combine to ensure any local user copies keep
// their last_block_hash from its state and not based on updated copies that didn't run through
// the full block_connected).
pub ( crate ) last_block_hash : Sha256dHash ,
2018-08-20 17:13:07 -04:00
secp_ctx : Secp256k1 < secp256k1 ::All > , //TODO: dedup this a bit...
2018-10-18 15:00:12 -04:00
logger : Arc < Logger > ,
2017-12-25 01:05:27 -05:00
}
2019-06-14 18:45:38 -04:00
macro_rules ! subtract_high_prio_fee {
2019-07-02 15:52:58 -04:00
( $self : ident , $fee_estimator : expr , $value : expr , $predicted_weight : expr , $used_feerate : expr ) = > {
2019-06-14 18:45:38 -04:00
{
2019-03-30 22:12:55 -04:00
$used_feerate = $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::HighPriority ) ;
2019-08-23 19:12:55 -04:00
let mut fee = $used_feerate * ( $predicted_weight as u64 ) / 1000 ;
2019-06-14 18:45:38 -04:00
if $value < = fee {
2019-03-30 22:12:55 -04:00
$used_feerate = $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::Normal ) ;
2019-08-23 19:12:55 -04:00
fee = $used_feerate * ( $predicted_weight as u64 ) / 1000 ;
2019-06-14 18:45:38 -04:00
if $value < = fee {
2019-03-30 22:12:55 -04:00
$used_feerate = $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::Background ) ;
2019-08-23 19:12:55 -04:00
fee = $used_feerate * ( $predicted_weight as u64 ) / 1000 ;
2019-06-14 18:45:38 -04:00
if $value < = fee {
2019-07-02 15:52:58 -04:00
log_error! ( $self , " Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat) " ,
fee , $value ) ;
2019-06-14 18:45:38 -04:00
false
} else {
2019-07-02 15:52:58 -04:00
log_warn! ( $self , " Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat) " ,
$value ) ;
2019-06-14 18:45:38 -04:00
$value - = fee ;
true
}
} else {
2019-07-02 15:52:58 -04:00
log_warn! ( $self , " Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat) " ,
$value ) ;
2019-06-14 18:45:38 -04:00
$value - = fee ;
true
}
} else {
$value - = fee ;
true
}
}
}
}
2018-07-16 21:12:54 -07:00
#[ cfg(any(test, feature = " fuzztarget " )) ]
/// Used only in testing and fuzztarget to check serialization roundtrips don't change the
/// underlying object
impl PartialEq for ChannelMonitor {
fn eq ( & self , other : & Self ) -> bool {
2018-11-26 19:50:16 -05:00
if self . commitment_transaction_number_obscure_factor ! = other . commitment_transaction_number_obscure_factor | |
2018-07-16 21:12:54 -07:00
self . key_storage ! = other . key_storage | |
self . their_htlc_base_key ! = other . their_htlc_base_key | |
2018-09-11 01:37:31 +00:00
self . their_delayed_payment_base_key ! = other . their_delayed_payment_base_key | |
2019-12-12 14:56:28 -05:00
self . funding_redeemscript ! = other . funding_redeemscript | |
self . channel_value_satoshis ! = other . channel_value_satoshis | |
2018-07-16 21:12:54 -07:00
self . their_cur_revocation_points ! = other . their_cur_revocation_points | |
self . our_to_self_delay ! = other . our_to_self_delay | |
self . their_to_self_delay ! = other . their_to_self_delay | |
self . remote_claimable_outpoints ! = other . remote_claimable_outpoints | |
2018-10-24 13:59:03 -04:00
self . remote_commitment_txn_on_chain ! = other . remote_commitment_txn_on_chain | |
2018-07-16 21:12:54 -07:00
self . remote_hash_commitment_number ! = other . remote_hash_commitment_number | |
self . prev_local_signed_commitment_tx ! = other . prev_local_signed_commitment_tx | |
2018-10-25 12:56:02 -04:00
self . current_remote_commitment_number ! = other . current_remote_commitment_number | |
2018-07-16 21:12:54 -07:00
self . current_local_signed_commitment_tx ! = other . current_local_signed_commitment_tx | |
self . payment_preimages ! = other . payment_preimages | |
2019-05-30 20:54:02 -04:00
self . destination_script ! = other . destination_script | |
2019-08-02 16:29:12 -04:00
self . to_remote_rescue ! = other . to_remote_rescue | |
2019-12-09 22:18:20 -05:00
self . pending_claim_requests ! = other . pending_claim_requests | |
self . claimable_outpoints ! = other . claimable_outpoints | |
2019-03-30 21:56:51 -04:00
self . onchain_events_waiting_threshold_conf ! = other . onchain_events_waiting_threshold_conf
2018-07-16 21:12:54 -07:00
{
false
} else {
for ( & ( ref secret , ref idx ) , & ( ref o_secret , ref o_idx ) ) in self . old_secrets . iter ( ) . zip ( other . old_secrets . iter ( ) ) {
if secret ! = o_secret | | idx ! = o_idx {
return false
}
}
2018-10-24 13:59:03 -04:00
true
2018-07-16 21:12:54 -07:00
}
}
}
2017-12-25 01:05:27 -05:00
impl ChannelMonitor {
2019-12-13 01:58:08 -05:00
pub ( super ) fn new ( funding_key : & SecretKey , revocation_base_key : & SecretKey , delayed_payment_base_key : & SecretKey , htlc_base_key : & SecretKey , payment_base_key : & SecretKey , shutdown_pubkey : & PublicKey , our_to_self_delay : u16 , destination_script : Script , logger : Arc < Logger > ) -> ChannelMonitor {
2017-12-25 01:05:27 -05:00
ChannelMonitor {
commitment_transaction_number_obscure_factor : 0 ,
2018-11-26 19:50:16 -05:00
key_storage : Storage ::Local {
2019-12-13 01:58:08 -05:00
funding_key : funding_key . clone ( ) ,
2017-12-25 01:05:27 -05:00
revocation_base_key : revocation_base_key . clone ( ) ,
2018-04-24 00:19:52 -04:00
htlc_base_key : htlc_base_key . clone ( ) ,
2018-10-19 02:44:40 +00:00
delayed_payment_base_key : delayed_payment_base_key . clone ( ) ,
2018-11-01 03:22:56 +00:00
payment_base_key : payment_base_key . clone ( ) ,
2018-11-16 20:52:33 -05:00
shutdown_pubkey : shutdown_pubkey . clone ( ) ,
2018-11-26 19:50:16 -05:00
funding_info : None ,
2018-12-10 23:56:34 -05:00
current_remote_commitment_txid : None ,
prev_remote_commitment_txid : None ,
2017-12-25 01:05:27 -05:00
} ,
their_htlc_base_key : None ,
2018-09-11 01:37:31 +00:00
their_delayed_payment_base_key : None ,
2019-12-12 14:56:28 -05:00
funding_redeemscript : None ,
channel_value_satoshis : None ,
2018-04-24 00:19:52 -04:00
their_cur_revocation_points : None ,
our_to_self_delay : our_to_self_delay ,
their_to_self_delay : None ,
2017-12-25 01:05:27 -05:00
old_secrets : [ ( [ 0 ; 32 ] , 1 < < 48 ) ; 49 ] ,
2018-04-24 00:19:52 -04:00
remote_claimable_outpoints : HashMap ::new ( ) ,
2018-10-24 13:59:03 -04:00
remote_commitment_txn_on_chain : HashMap ::new ( ) ,
2018-06-09 01:18:00 +02:00
remote_hash_commitment_number : HashMap ::new ( ) ,
2018-04-24 00:19:52 -04:00
prev_local_signed_commitment_tx : None ,
current_local_signed_commitment_tx : None ,
2018-10-25 12:56:02 -04:00
current_remote_commitment_number : 1 < < 48 ,
2018-04-24 00:19:52 -04:00
payment_preimages : HashMap ::new ( ) ,
2017-12-25 01:05:27 -05:00
destination_script : destination_script ,
2019-08-02 16:29:12 -04:00
to_remote_rescue : None ,
2018-10-18 15:00:12 -04:00
2019-12-09 22:18:20 -05:00
pending_claim_requests : HashMap ::new ( ) ,
claimable_outpoints : HashMap ::new ( ) ,
2019-03-30 22:12:55 -04:00
2019-03-30 21:56:51 -04:00
onchain_events_waiting_threshold_conf : HashMap ::new ( ) ,
2019-05-30 20:54:02 -04:00
2018-10-24 11:14:12 -04:00
last_block_hash : Default ::default ( ) ,
2017-12-25 01:05:27 -05:00
secp_ctx : Secp256k1 ::new ( ) ,
2018-10-18 15:00:12 -04:00
logger ,
2017-12-25 01:05:27 -05:00
}
}
2019-08-23 19:12:55 -04:00
fn get_witnesses_weight ( inputs : & [ InputDescriptors ] ) -> usize {
2019-04-08 21:11:16 -04:00
let mut tx_weight = 2 ; // count segwit flags
for inp in inputs {
// We use expected weight (and not actual) as signatures and time lock delays may vary
tx_weight + = match inp {
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
& InputDescriptors ::RevokedOfferedHTLC = > {
1 + 1 + 73 + 1 + 33 + 1 + 133
} ,
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
& InputDescriptors ::RevokedReceivedHTLC = > {
1 + 1 + 73 + 1 + 33 + 1 + 139
} ,
// number_of_witness_elements + sig_length + remotehtlc_sig + preimage_length + preimage + witness_script_length + witness_script
& InputDescriptors ::OfferedHTLC = > {
1 + 1 + 73 + 1 + 32 + 1 + 133
} ,
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
& InputDescriptors ::ReceivedHTLC = > {
1 + 1 + 73 + 1 + 1 + 1 + 139
} ,
// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
& InputDescriptors ::RevokedOutput = > {
1 + 1 + 73 + 1 + 1 + 1 + 77
} ,
} ;
}
tx_weight
}
2019-07-18 17:27:48 -04:00
fn get_height_timer ( current_height : u32 , timelock_expiration : u32 ) -> u32 {
if timelock_expiration < = current_height | | timelock_expiration - current_height < = 3 {
return current_height + 1
} else if timelock_expiration - current_height < = 15 {
return current_height + 3
}
current_height + 15
}
2017-12-25 01:05:27 -05:00
#[ inline ]
fn place_secret ( idx : u64 ) -> u8 {
for i in 0 .. 48 {
if idx & ( 1 < < i ) = = ( 1 < < i ) {
return i
}
}
48
}
#[ inline ]
fn derive_secret ( secret : [ u8 ; 32 ] , bits : u8 , idx : u64 ) -> [ u8 ; 32 ] {
let mut res : [ u8 ; 32 ] = secret ;
for i in 0 .. bits {
let bitpos = bits - 1 - i ;
if idx & ( 1 < < bitpos ) = = ( 1 < < bitpos ) {
res [ ( bitpos / 8 ) as usize ] ^ = 1 < < ( bitpos & 7 ) ;
2018-12-17 23:58:02 -05:00
res = Sha256 ::hash ( & res ) . into_inner ( ) ;
2017-12-25 01:05:27 -05:00
}
}
res
}
2018-11-01 03:19:37 +00:00
/// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
2018-06-09 01:18:00 +02:00
/// needed by local commitment transactions HTCLs nor by remote ones. Unless we haven't already seen remote
/// commitment transaction's secret, they are de facto pruned (we can use revocation key).
2018-11-15 07:47:07 -05:00
pub ( super ) fn provide_secret ( & mut self , idx : u64 , secret : [ u8 ; 32 ] ) -> Result < ( ) , MonitorUpdateError > {
2017-12-25 01:05:27 -05:00
let pos = ChannelMonitor ::place_secret ( idx ) ;
for i in 0 .. pos {
let ( old_secret , old_idx ) = self . old_secrets [ i as usize ] ;
if ChannelMonitor ::derive_secret ( secret , pos , old_idx ) ! = old_secret {
2018-11-15 07:47:07 -05:00
return Err ( MonitorUpdateError ( " Previous secret did not match new one " ) ) ;
2017-12-25 01:05:27 -05:00
}
}
2018-12-10 23:56:34 -05:00
if self . get_min_seen_secret ( ) < = idx {
return Ok ( ( ) ) ;
}
2017-12-25 01:05:27 -05:00
self . old_secrets [ pos as usize ] = ( secret , idx ) ;
2018-04-24 00:19:52 -04:00
2018-12-10 23:56:34 -05:00
// Prune HTLCs from the previous remote commitment tx so we don't generate failure/fulfill
// events for now-revoked/fulfilled HTLCs.
// TODO: We should probably consider whether we're really getting the next secret here.
if let Storage ::Local { ref mut prev_remote_commitment_txid , .. } = self . key_storage {
if let Some ( txid ) = prev_remote_commitment_txid . take ( ) {
2019-01-04 14:37:48 -05:00
for & mut ( _ , ref mut source ) in self . remote_claimable_outpoints . get_mut ( & txid ) . unwrap ( ) {
* source = None ;
}
2018-12-10 23:56:34 -05:00
}
}
2018-06-29 16:37:45 -04:00
if ! self . payment_preimages . is_empty ( ) {
let local_signed_commitment_tx = self . current_local_signed_commitment_tx . as_ref ( ) . expect ( " Channel needs at least an initial commitment tx ! " ) ;
let prev_local_signed_commitment_tx = self . prev_local_signed_commitment_tx . as_ref ( ) ;
2018-06-09 01:18:00 +02:00
let min_idx = self . get_min_seen_secret ( ) ;
2018-06-29 16:37:45 -04:00
let remote_hash_commitment_number = & mut self . remote_hash_commitment_number ;
2018-06-09 01:18:00 +02:00
self . payment_preimages . retain ( | & k , _ | {
2018-06-29 16:37:45 -04:00
for & ( ref htlc , _ , _ ) in & local_signed_commitment_tx . htlc_outputs {
if k = = htlc . payment_hash {
return true
}
2018-06-29 15:40:47 -04:00
}
2018-06-29 16:37:45 -04:00
if let Some ( prev_local_commitment_tx ) = prev_local_signed_commitment_tx {
for & ( ref htlc , _ , _ ) in prev_local_commitment_tx . htlc_outputs . iter ( ) {
2018-06-09 01:18:00 +02:00
if k = = htlc . payment_hash {
return true
}
}
2018-06-29 15:40:47 -04:00
}
2018-06-29 16:37:45 -04:00
let contains = if let Some ( cn ) = remote_hash_commitment_number . get ( & k ) {
if * cn < min_idx {
return true
2018-06-09 01:18:00 +02:00
}
2018-06-29 16:37:45 -04:00
true
} else { false } ;
if contains {
remote_hash_commitment_number . remove ( & k ) ;
}
false
} ) ;
2018-06-09 01:18:00 +02:00
}
2017-12-25 01:05:27 -05:00
Ok ( ( ) )
}
2018-04-24 00:19:52 -04:00
/// Informs this monitor of the latest remote (ie non-broadcastable) commitment transaction.
/// The monitor watches for it to be broadcasted and then uses the HTLC information (and
/// possibly future revocation/preimage information) to claim outputs where possible.
2018-06-09 01:18:00 +02:00
/// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers.
2019-01-04 14:37:48 -05:00
pub ( super ) fn provide_latest_remote_commitment_tx_info ( & mut self , unsigned_commitment_tx : & Transaction , htlc_outputs : Vec < ( HTLCOutputInCommitment , Option < Box < HTLCSource > > ) > , commitment_number : u64 , their_revocation_point : PublicKey ) {
2017-12-25 01:05:27 -05:00
// TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
// so that a remote monitor doesn't learn anything unless there is a malicious close.
2018-04-24 00:19:52 -04:00
// (only maybe, sadly we cant do the same for local info, as we need to be aware of
// timeouts)
2019-01-04 14:37:48 -05:00
for & ( ref htlc , _ ) in & htlc_outputs {
2018-06-09 01:18:00 +02:00
self . remote_hash_commitment_number . insert ( htlc . payment_hash , commitment_number ) ;
}
2018-12-10 23:56:34 -05:00
let new_txid = unsigned_commitment_tx . txid ( ) ;
2018-12-19 19:36:27 -05:00
log_trace! ( self , " Tracking new remote commitment transaction with txid {} at commitment number {} with {} HTLC outputs " , new_txid , commitment_number , htlc_outputs . len ( ) ) ;
log_trace! ( self , " New potential remote commitment transaction: {} " , encode ::serialize_hex ( unsigned_commitment_tx ) ) ;
2018-12-10 23:56:34 -05:00
if let Storage ::Local { ref mut current_remote_commitment_txid , ref mut prev_remote_commitment_txid , .. } = self . key_storage {
* prev_remote_commitment_txid = current_remote_commitment_txid . take ( ) ;
* current_remote_commitment_txid = Some ( new_txid ) ;
2018-11-30 10:58:44 -05:00
}
2019-01-04 14:37:48 -05:00
self . remote_claimable_outpoints . insert ( new_txid , htlc_outputs ) ;
2018-10-25 12:56:02 -04:00
self . current_remote_commitment_number = commitment_number ;
2018-11-19 17:12:17 -05:00
//TODO: Merge this into the other per-remote-transaction output storage stuff
match self . their_cur_revocation_points {
Some ( old_points ) = > {
if old_points . 0 = = commitment_number + 1 {
self . their_cur_revocation_points = Some ( ( old_points . 0 , old_points . 1 , Some ( their_revocation_point ) ) ) ;
} else if old_points . 0 = = commitment_number + 2 {
if let Some ( old_second_point ) = old_points . 2 {
self . their_cur_revocation_points = Some ( ( old_points . 0 - 1 , old_second_point , Some ( their_revocation_point ) ) ) ;
} else {
self . their_cur_revocation_points = Some ( ( commitment_number , their_revocation_point , None ) ) ;
}
} else {
self . their_cur_revocation_points = Some ( ( commitment_number , their_revocation_point , None ) ) ;
}
} ,
None = > {
self . their_cur_revocation_points = Some ( ( commitment_number , their_revocation_point , None ) ) ;
}
}
2018-04-24 00:19:52 -04:00
}
2019-07-10 16:39:10 -04:00
pub ( super ) fn provide_rescue_remote_commitment_tx_info ( & mut self , their_revocation_point : PublicKey ) {
2019-08-02 16:29:12 -04:00
match self . key_storage {
Storage ::Local { ref payment_base_key , .. } = > {
if let Ok ( payment_key ) = chan_utils ::derive_public_key ( & self . secp_ctx , & their_revocation_point , & PublicKey ::from_secret_key ( & self . secp_ctx , & payment_base_key ) ) {
let to_remote_script = Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 )
. push_slice ( & Hash160 ::hash ( & payment_key . serialize ( ) ) [ .. ] )
. into_script ( ) ;
if let Ok ( to_remote_key ) = chan_utils ::derive_private_key ( & self . secp_ctx , & their_revocation_point , & payment_base_key ) {
self . to_remote_rescue = Some ( ( to_remote_script , to_remote_key ) ) ;
}
}
} ,
Storage ::Watchtower { .. } = > { }
}
2019-07-10 16:39:10 -04:00
}
2018-04-24 00:19:52 -04:00
/// Informs this monitor of the latest local (ie broadcastable) commitment transaction. The
/// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our local commitment transaction is updated.
/// Panics if set_their_to_self_delay has never been called.
2019-12-13 14:56:57 -05:00
pub ( super ) fn provide_latest_local_commitment_tx_info ( & mut self , commitment_tx : LocalCommitmentTransaction , local_keys : chan_utils ::TxCreationKeys , feerate_per_kw : u64 , htlc_outputs : Vec < ( HTLCOutputInCommitment , Option < Signature > , Option < HTLCSource > ) > ) {
2018-04-24 00:19:52 -04:00
assert! ( self . their_to_self_delay . is_some ( ) ) ;
self . prev_local_signed_commitment_tx = self . current_local_signed_commitment_tx . take ( ) ;
self . current_local_signed_commitment_tx = Some ( LocalSignedTx {
2019-12-13 01:58:08 -05:00
txid : commitment_tx . txid ( ) ,
tx : commitment_tx ,
2018-04-24 00:19:52 -04:00
revocation_key : local_keys . revocation_key ,
a_htlc_key : local_keys . a_htlc_key ,
b_htlc_key : local_keys . b_htlc_key ,
delayed_payment_key : local_keys . a_delayed_payment_key ,
2019-12-13 14:56:57 -05:00
per_commitment_point : local_keys . per_commitment_point ,
2018-04-24 00:19:52 -04:00
feerate_per_kw ,
htlc_outputs ,
2017-12-25 01:05:27 -05:00
} ) ;
}
2018-04-24 00:19:52 -04:00
/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
/// commitment_tx_infos which contain the payment hash have been revoked.
2018-11-22 21:18:16 -05:00
pub ( super ) fn provide_payment_preimage ( & mut self , payment_hash : & PaymentHash , payment_preimage : & PaymentPreimage ) {
2018-04-24 00:19:52 -04:00
self . payment_preimages . insert ( payment_hash . clone ( ) , payment_preimage . clone ( ) ) ;
}
2018-09-19 17:39:43 -04:00
/// Combines this ChannelMonitor with the information contained in the other ChannelMonitor.
/// After a successful call this ChannelMonitor is up-to-date and is safe to use to monitor the
/// chain for new blocks/transactions.
2018-11-15 07:47:07 -05:00
pub fn insert_combine ( & mut self , mut other : ChannelMonitor ) -> Result < ( ) , MonitorUpdateError > {
2018-12-02 14:04:50 -05:00
match self . key_storage {
Storage ::Local { ref funding_info , .. } = > {
if funding_info . is_none ( ) { return Err ( MonitorUpdateError ( " Try to combine a Local monitor without funding_info " ) ) ; }
2018-11-26 19:50:16 -05:00
let our_funding_info = funding_info ;
2018-12-02 14:04:50 -05:00
if let Storage ::Local { ref funding_info , .. } = other . key_storage {
if funding_info . is_none ( ) { return Err ( MonitorUpdateError ( " Try to combine a Local monitor without funding_info " ) ) ; }
2019-01-24 16:41:51 +02:00
// We should be able to compare the entire funding_txo, but in fuzztarget it's trivially
2018-11-26 19:50:16 -05:00
// easy to collide the funding_txo hash and have a different scriptPubKey.
2018-12-02 14:04:50 -05:00
if funding_info . as_ref ( ) . unwrap ( ) . 0 ! = our_funding_info . as_ref ( ) . unwrap ( ) . 0 {
return Err ( MonitorUpdateError ( " Funding transaction outputs are not identical! " ) ) ;
2018-11-26 19:50:16 -05:00
}
} else {
return Err ( MonitorUpdateError ( " Try to combine a Local monitor with a Watchtower one ! " ) ) ;
}
} ,
Storage ::Watchtower { .. } = > {
2018-12-02 14:04:50 -05:00
if let Storage ::Watchtower { .. } = other . key_storage {
unimplemented! ( ) ;
2018-11-26 19:50:16 -05:00
} else {
return Err ( MonitorUpdateError ( " Try to combine a Watchtower monitor with a Local one ! " ) ) ;
}
} ,
2018-12-02 14:04:50 -05:00
}
2018-04-24 00:19:52 -04:00
let other_min_secret = other . get_min_seen_secret ( ) ;
let our_min_secret = self . get_min_seen_secret ( ) ;
if our_min_secret > other_min_secret {
2018-11-01 03:19:37 +00:00
self . provide_secret ( other_min_secret , other . get_secret ( other_min_secret ) . unwrap ( ) ) ? ;
2018-04-24 00:19:52 -04:00
}
2018-11-06 03:23:22 +00:00
if let Some ( ref local_tx ) = self . current_local_signed_commitment_tx {
if let Some ( ref other_local_tx ) = other . current_local_signed_commitment_tx {
2019-12-13 01:58:08 -05:00
let our_commitment_number = 0xffffffffffff - ( ( ( ( local_tx . tx . without_valid_witness ( ) . input [ 0 ] . sequence as u64 & 0xffffff ) < < 3 * 8 ) | ( local_tx . tx . without_valid_witness ( ) . lock_time as u64 & 0xffffff ) ) ^ self . commitment_transaction_number_obscure_factor ) ;
let other_commitment_number = 0xffffffffffff - ( ( ( ( other_local_tx . tx . without_valid_witness ( ) . input [ 0 ] . sequence as u64 & 0xffffff ) < < 3 * 8 ) | ( other_local_tx . tx . without_valid_witness ( ) . lock_time as u64 & 0xffffff ) ) ^ other . commitment_transaction_number_obscure_factor ) ;
2018-11-06 03:23:22 +00:00
if our_commitment_number > = other_commitment_number {
self . key_storage = other . key_storage ;
}
}
}
2018-10-25 12:56:02 -04:00
// TODO: We should use current_remote_commitment_number and the commitment number out of
// local transactions to decide how to merge
2018-04-24 00:19:52 -04:00
if our_min_secret > = other_min_secret {
self . their_cur_revocation_points = other . their_cur_revocation_points ;
for ( txid , htlcs ) in other . remote_claimable_outpoints . drain ( ) {
self . remote_claimable_outpoints . insert ( txid , htlcs ) ;
}
if let Some ( local_tx ) = other . prev_local_signed_commitment_tx {
self . prev_local_signed_commitment_tx = Some ( local_tx ) ;
}
if let Some ( local_tx ) = other . current_local_signed_commitment_tx {
self . current_local_signed_commitment_tx = Some ( local_tx ) ;
}
self . payment_preimages = other . payment_preimages ;
2019-08-02 16:29:12 -04:00
self . to_remote_rescue = other . to_remote_rescue ;
2018-04-24 00:19:52 -04:00
}
2018-11-06 03:23:22 +00:00
2018-10-25 12:56:02 -04:00
self . current_remote_commitment_number = cmp ::min ( self . current_remote_commitment_number , other . current_remote_commitment_number ) ;
2018-04-24 00:19:52 -04:00
Ok ( ( ) )
2017-12-25 01:05:27 -05:00
}
/// Allows this monitor to scan only for transactions which are applicable. Note that this is
/// optional, without it this monitor cannot be used in an SPV client, but you may wish to
/// avoid this (or call unset_funding_info) on a monitor you wish to send to a watchtower as it
/// provides slightly better privacy.
2018-09-08 10:32:39 -04:00
/// It's the responsibility of the caller to register outpoint and script with passing the former
2018-09-07 01:40:41 +00:00
/// value as key to add_update_monitor.
2018-12-02 14:11:13 -05:00
pub ( super ) fn set_funding_info ( & mut self , new_funding_info : ( OutPoint , Script ) ) {
match self . key_storage {
Storage ::Local { ref mut funding_info , .. } = > {
* funding_info = Some ( new_funding_info ) ;
2018-11-26 19:50:16 -05:00
} ,
Storage ::Watchtower { .. } = > {
2018-12-02 14:11:13 -05:00
panic! ( " Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode? " ) ;
2018-11-26 19:50:16 -05:00
}
2018-12-02 14:11:13 -05:00
}
2017-12-25 01:05:27 -05:00
}
2018-09-11 01:37:31 +00:00
/// We log these base keys at channel opening to being able to rebuild redeemscript in case of leaked revoked commit tx
2019-12-12 14:56:28 -05:00
/// Panics if commitment_transaction_number_obscure_factor doesn't fit in 48 bits
pub ( super ) fn set_basic_channel_info ( & mut self , their_htlc_base_key : & PublicKey , their_delayed_payment_base_key : & PublicKey , their_to_self_delay : u16 , funding_redeemscript : Script , channel_value_satoshis : u64 , commitment_transaction_number_obscure_factor : u64 ) {
2017-12-25 01:05:27 -05:00
self . their_htlc_base_key = Some ( their_htlc_base_key . clone ( ) ) ;
2018-09-11 01:37:31 +00:00
self . their_delayed_payment_base_key = Some ( their_delayed_payment_base_key . clone ( ) ) ;
2018-04-24 00:19:52 -04:00
self . their_to_self_delay = Some ( their_to_self_delay ) ;
2019-12-12 14:56:28 -05:00
self . funding_redeemscript = Some ( funding_redeemscript ) ;
self . channel_value_satoshis = Some ( channel_value_satoshis ) ;
assert! ( commitment_transaction_number_obscure_factor < ( 1 < < 48 ) ) ;
self . commitment_transaction_number_obscure_factor = commitment_transaction_number_obscure_factor ;
2018-04-24 00:19:52 -04:00
}
2018-07-07 16:51:20 -04:00
pub ( super ) fn unset_funding_info ( & mut self ) {
2018-12-02 14:11:13 -05:00
match self . key_storage {
Storage ::Local { ref mut funding_info , .. } = > {
* funding_info = None ;
2018-11-26 19:50:16 -05:00
} ,
Storage ::Watchtower { .. } = > {
2018-12-02 14:11:13 -05:00
panic! ( " Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode? " ) ;
2018-11-26 19:50:16 -05:00
} ,
}
2017-12-25 01:05:27 -05:00
}
2018-09-19 17:39:43 -04:00
/// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
2018-06-27 09:11:58 -04:00
pub fn get_funding_txo ( & self ) -> Option < OutPoint > {
2018-11-26 19:50:16 -05:00
match self . key_storage {
Storage ::Local { ref funding_info , .. } = > {
match funding_info {
& Some ( ( outpoint , _ ) ) = > Some ( outpoint ) ,
& None = > None
}
} ,
Storage ::Watchtower { .. } = > {
return None ;
}
2018-07-19 22:06:07 -04:00
}
2017-12-25 01:05:27 -05:00
}
2018-10-24 13:59:03 -04:00
/// Gets the sets of all outpoints which this ChannelMonitor expects to hear about spends of.
/// Generally useful when deserializing as during normal operation the return values of
/// block_connected are sufficient to ensure all relevant outpoints are being monitored (note
/// that the get_funding_txo outpoint and transaction must also be monitored for!).
pub fn get_monitored_outpoints ( & self ) -> Vec < ( Sha256dHash , u32 , & Script ) > {
let mut res = Vec ::with_capacity ( self . remote_commitment_txn_on_chain . len ( ) * 2 ) ;
for ( ref txid , & ( _ , ref outputs ) ) in self . remote_commitment_txn_on_chain . iter ( ) {
for ( idx , output ) in outputs . iter ( ) . enumerate ( ) {
res . push ( ( ( * txid ) . clone ( ) , idx as u32 , output ) ) ;
}
}
res
}
2018-07-09 16:40:21 -04:00
/// Serializes into a vec, with various modes for the exposed pub fns
2018-09-19 13:31:14 -04:00
fn write < W : Writer > ( & self , writer : & mut W , for_local_storage : bool ) -> Result < ( ) , ::std ::io ::Error > {
//TODO: We still write out all the serialization here manually instead of using the fancy
//serialization framework we have, we should migrate things over to it.
writer . write_all ( & [ SERIALIZATION_VERSION ; 1 ] ) ? ;
writer . write_all ( & [ MIN_SERIALIZATION_VERSION ; 1 ] ) ? ;
2018-07-09 16:40:21 -04:00
// Set in initial Channel-object creation, so should always be set by now:
2018-10-24 10:34:16 -04:00
U48 ( self . commitment_transaction_number_obscure_factor ) . write ( writer ) ? ;
2018-07-09 16:40:21 -04:00
2019-01-06 17:02:53 -05:00
macro_rules ! write_option {
( $thing : expr ) = > {
match $thing {
& Some ( ref t ) = > {
1 u8 . write ( writer ) ? ;
t . write ( writer ) ? ;
} ,
& None = > 0 u8 . write ( writer ) ? ,
}
}
}
2018-07-09 16:40:21 -04:00
match self . key_storage {
2019-12-13 14:56:57 -05:00
Storage ::Local { ref funding_key , ref revocation_base_key , ref htlc_base_key , ref delayed_payment_base_key , ref payment_base_key , ref shutdown_pubkey , ref funding_info , ref current_remote_commitment_txid , ref prev_remote_commitment_txid } = > {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ 0 ; 1 ] ) ? ;
2019-12-13 01:58:08 -05:00
writer . write_all ( & funding_key [ .. ] ) ? ;
2018-09-19 13:31:14 -04:00
writer . write_all ( & revocation_base_key [ .. ] ) ? ;
writer . write_all ( & htlc_base_key [ .. ] ) ? ;
2018-10-19 02:44:40 +00:00
writer . write_all ( & delayed_payment_base_key [ .. ] ) ? ;
2018-11-01 03:22:56 +00:00
writer . write_all ( & payment_base_key [ .. ] ) ? ;
2018-11-16 20:52:33 -05:00
writer . write_all ( & shutdown_pubkey . serialize ( ) ) ? ;
2018-11-26 19:50:16 -05:00
match funding_info {
& Some ( ( ref outpoint , ref script ) ) = > {
writer . write_all ( & outpoint . txid [ .. ] ) ? ;
writer . write_all ( & byte_utils ::be16_to_array ( outpoint . index ) ) ? ;
script . write ( writer ) ? ;
} ,
& None = > {
debug_assert! ( false , " Try to serialize a useless Local monitor ! " ) ;
} ,
}
2019-04-09 20:01:18 -04:00
current_remote_commitment_txid . write ( writer ) ? ;
prev_remote_commitment_txid . write ( writer ) ? ;
2018-07-09 16:40:21 -04:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > unimplemented! ( ) ,
2018-07-09 16:40:21 -04:00
}
2018-09-19 13:31:14 -04:00
writer . write_all ( & self . their_htlc_base_key . as_ref ( ) . unwrap ( ) . serialize ( ) ) ? ;
writer . write_all ( & self . their_delayed_payment_base_key . as_ref ( ) . unwrap ( ) . serialize ( ) ) ? ;
2019-12-12 14:56:28 -05:00
self . funding_redeemscript . as_ref ( ) . unwrap ( ) . write ( writer ) ? ;
self . channel_value_satoshis . unwrap ( ) . write ( writer ) ? ;
2018-07-09 16:40:21 -04:00
match self . their_cur_revocation_points {
Some ( ( idx , pubkey , second_option ) ) = > {
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be48_to_array ( idx ) ) ? ;
writer . write_all ( & pubkey . serialize ( ) ) ? ;
2018-07-09 16:40:21 -04:00
match second_option {
Some ( second_pubkey ) = > {
2018-09-19 13:31:14 -04:00
writer . write_all ( & second_pubkey . serialize ( ) ) ? ;
2018-07-09 16:40:21 -04:00
} ,
None = > {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ 0 ; 33 ] ) ? ;
2018-07-09 16:40:21 -04:00
} ,
}
} ,
None = > {
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be48_to_array ( 0 ) ) ? ;
2018-07-09 16:40:21 -04:00
} ,
}
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be16_to_array ( self . our_to_self_delay ) ) ? ;
writer . write_all ( & byte_utils ::be16_to_array ( self . their_to_self_delay . unwrap ( ) ) ) ? ;
2018-07-09 16:40:21 -04:00
for & ( ref secret , ref idx ) in self . old_secrets . iter ( ) {
2018-09-19 13:31:14 -04:00
writer . write_all ( secret ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( * idx ) ) ? ;
2018-07-09 16:40:21 -04:00
}
macro_rules ! serialize_htlc_in_commitment {
( $htlc_output : expr ) = > {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ $htlc_output . offered as u8 ; 1 ] ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( $htlc_output . amount_msat ) ) ? ;
writer . write_all ( & byte_utils ::be32_to_array ( $htlc_output . cltv_expiry ) ) ? ;
2018-11-22 21:18:16 -05:00
writer . write_all ( & $htlc_output . payment_hash . 0 [ .. ] ) ? ;
2019-04-09 20:01:18 -04:00
$htlc_output . transaction_output_index . write ( writer ) ? ;
2018-07-09 16:40:21 -04:00
}
}
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( self . remote_claimable_outpoints . len ( ) as u64 ) ) ? ;
2019-01-04 14:37:48 -05:00
for ( ref txid , ref htlc_infos ) in self . remote_claimable_outpoints . iter ( ) {
2018-09-19 13:31:14 -04:00
writer . write_all ( & txid [ .. ] ) ? ;
2018-11-30 10:58:44 -05:00
writer . write_all ( & byte_utils ::be64_to_array ( htlc_infos . len ( ) as u64 ) ) ? ;
2019-01-04 14:37:48 -05:00
for & ( ref htlc_output , ref htlc_source ) in htlc_infos . iter ( ) {
2018-07-09 16:40:21 -04:00
serialize_htlc_in_commitment! ( htlc_output ) ;
2019-01-04 14:37:48 -05:00
write_option! ( htlc_source ) ;
2018-11-30 10:58:44 -05:00
}
2018-07-09 16:40:21 -04:00
}
2018-10-24 13:59:03 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( self . remote_commitment_txn_on_chain . len ( ) as u64 ) ) ? ;
2018-10-26 14:35:50 -04:00
for ( ref txid , & ( commitment_number , ref txouts ) ) in self . remote_commitment_txn_on_chain . iter ( ) {
2018-10-24 13:59:03 -04:00
writer . write_all ( & txid [ .. ] ) ? ;
2018-10-26 14:35:50 -04:00
writer . write_all ( & byte_utils ::be48_to_array ( commitment_number ) ) ? ;
2018-10-24 13:59:03 -04:00
( txouts . len ( ) as u64 ) . write ( writer ) ? ;
for script in txouts . iter ( ) {
script . write ( writer ) ? ;
2018-07-09 16:40:21 -04:00
}
}
if for_local_storage {
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( self . remote_hash_commitment_number . len ( ) as u64 ) ) ? ;
2018-10-26 14:35:50 -04:00
for ( ref payment_hash , commitment_number ) in self . remote_hash_commitment_number . iter ( ) {
2018-11-22 21:18:16 -05:00
writer . write_all ( & payment_hash . 0 [ .. ] ) ? ;
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be48_to_array ( * commitment_number ) ) ? ;
2018-07-09 16:40:21 -04:00
}
} else {
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( 0 ) ) ? ;
2018-07-09 16:40:21 -04:00
}
macro_rules ! serialize_local_tx {
( $local_tx : expr ) = > {
2019-12-13 01:58:08 -05:00
$local_tx . tx . write ( writer ) ? ;
2018-09-19 13:31:14 -04:00
writer . write_all ( & $local_tx . revocation_key . serialize ( ) ) ? ;
writer . write_all ( & $local_tx . a_htlc_key . serialize ( ) ) ? ;
writer . write_all ( & $local_tx . b_htlc_key . serialize ( ) ) ? ;
writer . write_all ( & $local_tx . delayed_payment_key . serialize ( ) ) ? ;
2019-12-13 14:56:57 -05:00
writer . write_all ( & $local_tx . per_commitment_point . serialize ( ) ) ? ;
2018-07-09 16:40:21 -04:00
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( $local_tx . feerate_per_kw ) ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( $local_tx . htlc_outputs . len ( ) as u64 ) ) ? ;
2019-12-13 14:56:57 -05:00
for & ( ref htlc_output , ref sig , ref htlc_source ) in $local_tx . htlc_outputs . iter ( ) {
2018-07-09 16:40:21 -04:00
serialize_htlc_in_commitment! ( htlc_output ) ;
2019-12-13 14:56:57 -05:00
if let & Some ( ref their_sig ) = sig {
2019-01-04 14:37:48 -05:00
1 u8 . write ( writer ) ? ;
2019-01-16 15:45:05 -05:00
writer . write_all ( & their_sig . serialize_compact ( ) ) ? ;
2019-01-04 14:37:48 -05:00
} else {
0 u8 . write ( writer ) ? ;
}
write_option! ( htlc_source ) ;
2018-11-30 10:58:44 -05:00
}
2018-07-09 16:40:21 -04:00
}
}
if let Some ( ref prev_local_tx ) = self . prev_local_signed_commitment_tx {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ 1 ; 1 ] ) ? ;
2018-07-09 16:40:21 -04:00
serialize_local_tx! ( prev_local_tx ) ;
} else {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ 0 ; 1 ] ) ? ;
2018-07-09 16:40:21 -04:00
}
if let Some ( ref cur_local_tx ) = self . current_local_signed_commitment_tx {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ 1 ; 1 ] ) ? ;
2018-07-09 16:40:21 -04:00
serialize_local_tx! ( cur_local_tx ) ;
} else {
2018-09-19 13:31:14 -04:00
writer . write_all ( & [ 0 ; 1 ] ) ? ;
2018-07-09 16:40:21 -04:00
}
2018-10-25 12:56:02 -04:00
if for_local_storage {
writer . write_all ( & byte_utils ::be48_to_array ( self . current_remote_commitment_number ) ) ? ;
} else {
writer . write_all ( & byte_utils ::be48_to_array ( 0 ) ) ? ;
}
2018-09-19 13:31:14 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( self . payment_preimages . len ( ) as u64 ) ) ? ;
2018-07-09 16:40:21 -04:00
for payment_preimage in self . payment_preimages . values ( ) {
2018-11-22 21:18:16 -05:00
writer . write_all ( & payment_preimage . 0 [ .. ] ) ? ;
2018-07-09 16:40:21 -04:00
}
2018-10-24 11:14:12 -04:00
self . last_block_hash . write ( writer ) ? ;
2018-10-24 10:34:16 -04:00
self . destination_script . write ( writer ) ? ;
2019-08-02 16:29:12 -04:00
if let Some ( ( ref to_remote_script , ref local_key ) ) = self . to_remote_rescue {
writer . write_all ( & [ 1 ; 1 ] ) ? ;
to_remote_script . write ( writer ) ? ;
local_key . write ( writer ) ? ;
} else {
writer . write_all ( & [ 0 ; 1 ] ) ? ;
}
2018-07-09 16:40:21 -04:00
2019-12-09 22:18:20 -05:00
writer . write_all ( & byte_utils ::be64_to_array ( self . pending_claim_requests . len ( ) as u64 ) ) ? ;
for ( ref ancestor_claim_txid , claim_tx_data ) in self . pending_claim_requests . iter ( ) {
ancestor_claim_txid . write ( writer ) ? ;
claim_tx_data . write ( writer ) ? ;
}
writer . write_all ( & byte_utils ::be64_to_array ( self . claimable_outpoints . len ( ) as u64 ) ) ? ;
for ( ref outp , ref claim_and_height ) in self . claimable_outpoints . iter ( ) {
outp . write ( writer ) ? ;
claim_and_height . 0. write ( writer ) ? ;
claim_and_height . 1. write ( writer ) ? ;
2019-03-30 22:12:55 -04:00
}
2019-03-30 21:56:51 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( self . onchain_events_waiting_threshold_conf . len ( ) as u64 ) ) ? ;
for ( ref target , ref events ) in self . onchain_events_waiting_threshold_conf . iter ( ) {
2019-05-30 20:54:02 -04:00
writer . write_all ( & byte_utils ::be32_to_array ( * * target ) ) ? ;
2019-03-30 21:56:51 -04:00
writer . write_all ( & byte_utils ::be64_to_array ( events . len ( ) as u64 ) ) ? ;
for ev in events . iter ( ) {
match * ev {
2019-12-09 22:18:20 -05:00
OnchainEvent ::Claim { ref claim_request } = > {
2019-03-30 21:56:51 -04:00
writer . write_all ( & [ 0 ; 1 ] ) ? ;
2019-12-09 22:18:20 -05:00
claim_request . write ( writer ) ? ;
2019-03-30 21:56:51 -04:00
} ,
OnchainEvent ::HTLCUpdate { ref htlc_update } = > {
writer . write_all ( & [ 1 ; 1 ] ) ? ;
htlc_update . 0. write ( writer ) ? ;
htlc_update . 1. write ( writer ) ? ;
2019-12-09 16:59:08 -05:00
} ,
OnchainEvent ::ContentiousOutpoint { ref outpoint , ref input_material } = > {
writer . write_all ( & [ 2 ; 1 ] ) ? ;
outpoint . write ( writer ) ? ;
input_material . write ( writer ) ? ;
2019-03-30 21:56:51 -04:00
}
}
2019-05-30 20:54:02 -04:00
}
}
2018-09-19 13:31:14 -04:00
Ok ( ( ) )
2018-07-09 16:40:21 -04:00
}
2018-09-19 13:31:14 -04:00
/// Writes this monitor into the given writer, suitable for writing to disk.
2018-10-24 11:14:12 -04:00
///
/// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie not just starting at the same height but starting at the highest
/// common block that appears on your best chain as well as on the chain which contains the
/// last block hash returned) upon deserializing the object!
2018-09-19 13:31:14 -04:00
pub fn write_for_disk < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
self . write ( writer , true )
2018-07-09 16:40:21 -04:00
}
2018-09-19 13:31:14 -04:00
/// Encodes this monitor into the given writer, suitable for sending to a remote watchtower
2018-10-24 11:14:12 -04:00
///
/// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie not just starting at the same height but starting at the highest
/// common block that appears on your best chain as well as on the chain which contains the
/// last block hash returned) upon deserializing the object!
2018-09-19 13:31:14 -04:00
pub fn write_for_watchtower < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
self . write ( writer , false )
2018-07-09 16:40:21 -04:00
}
2017-12-25 01:05:27 -05:00
/// Can only fail if idx is < get_min_seen_secret
2018-11-15 07:47:07 -05:00
pub ( super ) fn get_secret ( & self , idx : u64 ) -> Option < [ u8 ; 32 ] > {
2017-12-25 01:05:27 -05:00
for i in 0 .. self . old_secrets . len ( ) {
if ( idx & ( ! ( ( 1 < < i ) - 1 ) ) ) = = self . old_secrets [ i ] . 1 {
2018-11-15 07:47:07 -05:00
return Some ( ChannelMonitor ::derive_secret ( self . old_secrets [ i ] . 0 , i as u8 , idx ) )
2017-12-25 01:05:27 -05:00
}
}
assert! ( idx < self . get_min_seen_secret ( ) ) ;
2018-11-15 07:47:07 -05:00
None
2017-12-25 01:05:27 -05:00
}
2018-09-19 17:37:51 -04:00
pub ( super ) fn get_min_seen_secret ( & self ) -> u64 {
2017-12-25 01:05:27 -05:00
//TODO This can be optimized?
let mut min = 1 < < 48 ;
for & ( _ , idx ) in self . old_secrets . iter ( ) {
if idx < min {
min = idx ;
}
}
min
}
2018-10-25 12:56:02 -04:00
pub ( super ) fn get_cur_remote_commitment_number ( & self ) -> u64 {
self . current_remote_commitment_number
}
pub ( super ) fn get_cur_local_commitment_number ( & self ) -> u64 {
if let & Some ( ref local_tx ) = & self . current_local_signed_commitment_tx {
2019-12-13 01:58:08 -05:00
0xffff_ffff_ffff - ( ( ( ( local_tx . tx . without_valid_witness ( ) . input [ 0 ] . sequence as u64 & 0xffffff ) < < 3 * 8 ) | ( local_tx . tx . without_valid_witness ( ) . lock_time as u64 & 0xffffff ) ) ^ self . commitment_transaction_number_obscure_factor )
2018-10-25 12:56:02 -04:00
} else { 0xffff_ffff_ffff }
}
2018-04-24 00:19:52 -04:00
/// Attempts to claim a remote commitment transaction's outputs using the revocation key and
/// data in remote_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
/// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
2018-09-11 01:40:53 +00:00
/// HTLC-Success/HTLC-Timeout transactions.
2018-12-10 23:56:02 -05:00
/// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
/// revoked remote commitment tx
2019-05-30 20:54:02 -04:00
fn check_spend_remote_transaction ( & mut self , tx : & Transaction , height : u32 , fee_estimator : & FeeEstimator ) -> ( Vec < Transaction > , ( Sha256dHash , Vec < TxOut > ) , Vec < SpendableOutputDescriptor > ) {
2017-12-25 01:05:27 -05:00
// Most secp and related errors trying to create keys means we have no hope of constructing
// a spend transaction...so we return no transactions to broadcast
2018-04-24 00:19:52 -04:00
let mut txn_to_broadcast = Vec ::new ( ) ;
2018-09-07 01:40:41 +00:00
let mut watch_outputs = Vec ::new ( ) ;
2018-10-19 02:44:40 +00:00
let mut spendable_outputs = Vec ::new ( ) ;
2018-09-07 01:40:41 +00:00
let commitment_txid = tx . txid ( ) ; //TODO: This is gonna be a performance bottleneck for watchtowers!
let per_commitment_option = self . remote_claimable_outpoints . get ( & commitment_txid ) ;
2017-12-25 01:05:27 -05:00
macro_rules ! ignore_error {
( $thing : expr ) = > {
match $thing {
Ok ( a ) = > a ,
2019-05-30 20:54:02 -04:00
Err ( _ ) = > return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs )
2017-12-25 01:05:27 -05:00
}
} ;
}
2018-07-19 17:17:06 -04:00
let commitment_number = 0xffffffffffff - ( ( ( ( tx . input [ 0 ] . sequence as u64 & 0xffffff ) < < 3 * 8 ) | ( tx . lock_time as u64 & 0xffffff ) ) ^ self . commitment_transaction_number_obscure_factor ) ;
2017-12-25 01:05:27 -05:00
if commitment_number > = self . get_min_seen_secret ( ) {
let secret = self . get_secret ( commitment_number ) . unwrap ( ) ;
2019-01-16 15:45:05 -05:00
let per_commitment_key = ignore_error! ( SecretKey ::from_slice ( & secret ) ) ;
2018-11-20 15:09:47 -05:00
let ( revocation_pubkey , b_htlc_key , local_payment_key ) = match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref revocation_base_key , ref htlc_base_key , ref payment_base_key , .. } = > {
2018-08-20 17:13:07 -04:00
let per_commitment_point = PublicKey ::from_secret_key ( & self . secp_ctx , & per_commitment_key ) ;
( ignore_error! ( chan_utils ::derive_public_revocation_key ( & self . secp_ctx , & per_commitment_point , & PublicKey ::from_secret_key ( & self . secp_ctx , & revocation_base_key ) ) ) ,
2018-11-20 15:09:47 -05:00
ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , & per_commitment_point , & PublicKey ::from_secret_key ( & self . secp_ctx , & htlc_base_key ) ) ) ,
Some ( ignore_error! ( chan_utils ::derive_private_key ( & self . secp_ctx , & per_commitment_point , & payment_base_key ) ) ) )
2017-12-25 01:05:27 -05:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { ref revocation_base_key , ref htlc_base_key , .. } = > {
2018-08-20 17:13:07 -04:00
let per_commitment_point = PublicKey ::from_secret_key ( & self . secp_ctx , & per_commitment_key ) ;
2018-04-24 00:19:52 -04:00
( ignore_error! ( chan_utils ::derive_public_revocation_key ( & self . secp_ctx , & per_commitment_point , & revocation_base_key ) ) ,
2018-11-20 15:09:47 -05:00
ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , & per_commitment_point , & htlc_base_key ) ) ,
None )
2017-12-25 01:05:27 -05:00
} ,
} ;
2018-09-29 14:26:12 -04:00
let delayed_key = ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , & PublicKey ::from_secret_key ( & self . secp_ctx , & per_commitment_key ) , & self . their_delayed_payment_base_key . unwrap ( ) ) ) ;
2018-04-24 00:19:52 -04:00
let a_htlc_key = match self . their_htlc_base_key {
2019-05-30 20:54:02 -04:00
None = > return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ,
2018-08-20 17:13:07 -04:00
Some ( their_htlc_base_key ) = > ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , & PublicKey ::from_secret_key ( & self . secp_ctx , & per_commitment_key ) , & their_htlc_base_key ) ) ,
2017-12-25 01:05:27 -05:00
} ;
2018-04-24 00:19:52 -04:00
let revokeable_redeemscript = chan_utils ::get_revokeable_redeemscript ( & revocation_pubkey , self . our_to_self_delay , & delayed_key ) ;
let revokeable_p2wsh = revokeable_redeemscript . to_v0_p2wsh ( ) ;
2017-12-25 01:05:27 -05:00
2018-11-20 15:09:47 -05:00
let local_payment_p2wpkh = if let Some ( payment_key ) = local_payment_key {
// Note that the Network here is ignored as we immediately drop the address for the
// script_pubkey version.
2018-12-13 16:23:22 -05:00
let payment_hash160 = Hash160 ::hash ( & PublicKey ::from_secret_key ( & self . secp_ctx , & payment_key ) . serialize ( ) ) ;
2019-01-16 15:45:05 -05:00
Some ( Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 ) . push_slice ( & payment_hash160 [ .. ] ) . into_script ( ) )
2018-11-20 15:09:47 -05:00
} else { None } ;
2017-12-25 01:05:27 -05:00
let mut total_value = 0 ;
2018-04-24 00:19:52 -04:00
let mut inputs = Vec ::new ( ) ;
2019-03-30 22:12:55 -04:00
let mut inputs_info = Vec ::new ( ) ;
let mut inputs_desc = Vec ::new ( ) ;
2017-12-25 01:05:27 -05:00
2018-04-24 00:19:52 -04:00
for ( idx , outp ) in tx . output . iter ( ) . enumerate ( ) {
if outp . script_pubkey = = revokeable_p2wsh {
inputs . push ( TxIn {
2018-08-20 17:13:07 -04:00
previous_output : BitcoinOutPoint {
txid : commitment_txid ,
vout : idx as u32 ,
} ,
2017-12-25 01:05:27 -05:00
script_sig : Script ::new ( ) ,
2018-04-24 00:19:52 -04:00
sequence : 0xfffffffd ,
2018-03-26 14:03:59 -04:00
witness : Vec ::new ( ) ,
2017-12-25 01:05:27 -05:00
} ) ;
2019-03-30 22:12:55 -04:00
inputs_desc . push ( InputDescriptors ::RevokedOutput ) ;
2019-07-18 17:27:48 -04:00
inputs_info . push ( ( None , outp . value , self . our_to_self_delay as u32 ) ) ;
2018-04-24 00:19:52 -04:00
total_value + = outp . value ;
2018-11-20 15:09:47 -05:00
} else if Some ( & outp . script_pubkey ) = = local_payment_p2wpkh . as_ref ( ) {
spendable_outputs . push ( SpendableOutputDescriptor ::DynamicOutputP2WPKH {
outpoint : BitcoinOutPoint { txid : commitment_txid , vout : idx as u32 } ,
key : local_payment_key . unwrap ( ) ,
output : outp . clone ( ) ,
} ) ;
2018-04-24 00:19:52 -04:00
}
}
macro_rules ! sign_input {
( $sighash_parts : expr , $input : expr , $htlc_idx : expr , $amount : expr ) = > {
{
2019-03-30 22:12:55 -04:00
let ( sig , redeemscript , revocation_key ) = match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref revocation_base_key , .. } = > {
2018-04-24 00:19:52 -04:00
let redeemscript = if $htlc_idx . is_none ( ) { revokeable_redeemscript . clone ( ) } else {
2019-01-04 14:37:48 -05:00
let htlc = & per_commitment_option . unwrap ( ) [ $htlc_idx . unwrap ( ) ] . 0 ;
2018-04-24 00:19:52 -04:00
chan_utils ::get_htlc_redeemscript_with_explicit_keys ( htlc , & a_htlc_key , & b_htlc_key , & revocation_pubkey )
} ;
2019-01-17 17:36:49 -05:00
let sighash = hash_to_message! ( & $sighash_parts . sighash_all ( & $input , & redeemscript , $amount ) [ .. ] ) ;
2018-04-24 00:19:52 -04:00
let revocation_key = ignore_error! ( chan_utils ::derive_private_revocation_key ( & self . secp_ctx , & per_commitment_key , & revocation_base_key ) ) ;
2019-03-30 22:12:55 -04:00
( self . secp_ctx . sign ( & sighash , & revocation_key ) , redeemscript , revocation_key )
2018-04-24 00:19:52 -04:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > {
2018-04-24 00:19:52 -04:00
unimplemented! ( ) ;
}
} ;
2019-01-16 15:45:05 -05:00
$input . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
2018-04-24 00:19:52 -04:00
$input . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
if $htlc_idx . is_none ( ) {
$input . witness . push ( vec! ( 1 ) ) ;
2017-12-25 01:05:27 -05:00
} else {
2018-04-24 00:19:52 -04:00
$input . witness . push ( revocation_pubkey . serialize ( ) . to_vec ( ) ) ;
2017-12-25 01:05:27 -05:00
}
2019-03-30 22:12:55 -04:00
$input . witness . push ( redeemscript . clone ( ) . into_bytes ( ) ) ;
( redeemscript , revocation_key )
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
}
}
2019-01-04 14:37:48 -05:00
if let Some ( ref per_commitment_data ) = per_commitment_option {
2018-04-24 00:19:52 -04:00
inputs . reserve_exact ( per_commitment_data . len ( ) ) ;
2019-01-04 14:37:48 -05:00
for ( idx , & ( ref htlc , _ ) ) in per_commitment_data . iter ( ) . enumerate ( ) {
2019-01-06 17:02:53 -05:00
if let Some ( transaction_output_index ) = htlc . transaction_output_index {
let expected_script = chan_utils ::get_htlc_redeemscript_with_explicit_keys ( & htlc , & a_htlc_key , & b_htlc_key , & revocation_pubkey ) ;
if transaction_output_index as usize > = tx . output . len ( ) | |
tx . output [ transaction_output_index as usize ] . value ! = htlc . amount_msat / 1000 | |
tx . output [ transaction_output_index as usize ] . script_pubkey ! = expected_script . to_v0_p2wsh ( ) {
2019-05-30 20:54:02 -04:00
return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ; // Corrupted per_commitment_data, fuck this user
2019-01-06 17:02:53 -05:00
}
let input = TxIn {
previous_output : BitcoinOutPoint {
txid : commitment_txid ,
vout : transaction_output_index ,
} ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
2018-04-24 00:19:52 -04:00
} ;
2019-01-06 17:02:53 -05:00
if htlc . cltv_expiry > height + CLTV_SHARED_CLAIM_BUFFER {
inputs . push ( input ) ;
2019-03-30 22:12:55 -04:00
inputs_desc . push ( if htlc . offered { InputDescriptors ::RevokedOfferedHTLC } else { InputDescriptors ::RevokedReceivedHTLC } ) ;
2019-07-18 17:27:48 -04:00
inputs_info . push ( ( Some ( idx ) , tx . output [ transaction_output_index as usize ] . value , htlc . cltv_expiry ) ) ;
2019-03-30 22:12:55 -04:00
total_value + = tx . output [ transaction_output_index as usize ] . value ;
2019-01-06 17:02:53 -05:00
} else {
let mut single_htlc_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : vec ! [ input ] ,
output : vec ! ( TxOut {
script_pubkey : self . destination_script . clone ( ) ,
2019-04-08 21:11:16 -04:00
value : htlc . amount_msat / 1000 ,
2019-01-06 17:02:53 -05:00
} ) ,
} ;
2019-04-14 16:13:44 -04:00
let predicted_weight = single_htlc_tx . get_weight ( ) + Self ::get_witnesses_weight ( & [ if htlc . offered { InputDescriptors ::RevokedOfferedHTLC } else { InputDescriptors ::RevokedReceivedHTLC } ] ) ;
2019-07-18 17:27:48 -04:00
let height_timer = Self ::get_height_timer ( height , htlc . cltv_expiry ) ;
2019-03-30 22:12:55 -04:00
let mut used_feerate ;
2019-07-02 15:52:58 -04:00
if subtract_high_prio_fee! ( self , fee_estimator , single_htlc_tx . output [ 0 ] . value , predicted_weight , used_feerate ) {
2019-06-14 18:45:38 -04:00
let sighash_parts = bip143 ::SighashComponents ::new ( & single_htlc_tx ) ;
2019-03-30 22:12:55 -04:00
let ( redeemscript , revocation_key ) = sign_input! ( sighash_parts , single_htlc_tx . input [ 0 ] , Some ( idx ) , htlc . amount_msat / 1000 ) ;
2019-06-14 18:45:38 -04:00
assert! ( predicted_weight > = single_htlc_tx . get_weight ( ) ) ;
2019-07-04 16:09:19 -04:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , single_htlc_tx . input [ 0 ] . previous_output . txid , single_htlc_tx . input [ 0 ] . previous_output . vout , height_timer ) ;
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( 1 ) ;
per_input_material . insert ( single_htlc_tx . input [ 0 ] . previous_output , InputMaterial ::Revoked { script : redeemscript , pubkey : Some ( revocation_pubkey ) , key : revocation_key , is_htlc : true , amount : htlc . amount_msat / 1000 } ) ;
2019-12-09 16:59:08 -05:00
match self . claimable_outpoints . entry ( single_htlc_tx . input [ 0 ] . previous_output ) {
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ( single_htlc_tx . txid ( ) , height ) ) ; }
}
2019-12-09 22:18:20 -05:00
match self . pending_claim_requests . entry ( single_htlc_tx . txid ( ) ) {
2019-03-30 22:12:55 -04:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
2019-12-09 22:18:20 -05:00
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ClaimTxBumpMaterial { height_timer , feerate_previous : used_feerate , soonest_timelock : htlc . cltv_expiry , per_input_material } ) ; }
2019-03-30 22:12:55 -04:00
}
2019-06-14 18:45:38 -04:00
txn_to_broadcast . push ( single_htlc_tx ) ;
}
2019-01-06 17:02:53 -05:00
}
2017-12-25 01:05:27 -05:00
}
}
2018-04-24 00:19:52 -04:00
}
2018-12-21 14:32:44 -05:00
if ! inputs . is_empty ( ) | | ! txn_to_broadcast . is_empty ( ) | | per_commitment_option . is_some ( ) { // ie we're confident this is actually ours
2018-04-24 00:19:52 -04:00
// We're definitely a remote commitment transaction!
2018-12-09 12:17:27 -05:00
log_trace! ( self , " Got broadcast of revoked remote commitment transaction, generating general spend tx with {} inputs and {} other txn to broadcast " , inputs . len ( ) , txn_to_broadcast . len ( ) ) ;
2018-09-07 01:40:41 +00:00
watch_outputs . append ( & mut tx . output . clone ( ) ) ;
2018-10-24 13:59:03 -04:00
self . remote_commitment_txn_on_chain . insert ( commitment_txid , ( commitment_number , tx . output . iter ( ) . map ( | output | { output . script_pubkey . clone ( ) } ) . collect ( ) ) ) ;
2018-12-21 14:32:44 -05:00
2018-12-27 14:12:11 -05:00
macro_rules ! check_htlc_fails {
( $txid : expr , $commitment_tx : expr ) = > {
2019-03-04 18:02:02 +01:00
if let Some ( ref outpoints ) = self . remote_claimable_outpoints . get ( $txid ) {
2019-01-04 14:37:48 -05:00
for & ( ref htlc , ref source_option ) in outpoints . iter ( ) {
if let & Some ( ref source ) = source_option {
2019-07-18 18:50:03 -04:00
log_info! ( self , " Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {}) " , log_bytes! ( htlc . payment_hash . 0 ) , $commitment_tx , height + ANTI_REORG_DELAY - 1 ) ;
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
2019-05-30 20:54:02 -04:00
hash_map ::Entry ::Occupied ( mut entry ) = > {
let e = entry . get_mut ( ) ;
2019-03-30 21:56:51 -04:00
e . retain ( | ref event | {
match * * event {
OnchainEvent ::HTLCUpdate { ref htlc_update } = > {
return htlc_update . 0 ! = * * source
} ,
_ = > return true
}
} ) ;
e . push ( OnchainEvent ::HTLCUpdate { htlc_update : ( ( * * source ) . clone ( ) , htlc . payment_hash . clone ( ) ) } ) ;
2019-05-30 20:54:02 -04:00
}
hash_map ::Entry ::Vacant ( entry ) = > {
2019-03-30 21:56:51 -04:00
entry . insert ( vec! [ OnchainEvent ::HTLCUpdate { htlc_update : ( ( * * source ) . clone ( ) , htlc . payment_hash . clone ( ) ) } ] ) ;
2019-05-30 20:54:02 -04:00
}
}
2019-01-04 14:37:48 -05:00
}
2018-12-21 14:32:44 -05:00
}
}
}
2018-12-27 14:12:11 -05:00
}
if let Storage ::Local { ref current_remote_commitment_txid , ref prev_remote_commitment_txid , .. } = self . key_storage {
if let & Some ( ref txid ) = current_remote_commitment_txid {
check_htlc_fails! ( txid , " current " ) ;
}
2018-12-21 14:32:44 -05:00
if let & Some ( ref txid ) = prev_remote_commitment_txid {
2018-12-27 14:12:11 -05:00
check_htlc_fails! ( txid , " remote " ) ;
2018-12-21 14:32:44 -05:00
}
}
// No need to check local commitment txn, symmetric HTLCSource must be present as per-htlc data on remote commitment tx
2018-04-24 00:19:52 -04:00
}
2019-05-30 20:54:02 -04:00
if inputs . is_empty ( ) { return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ; } // Nothing to be done...probably a false positive/local tx
2017-12-25 01:05:27 -05:00
let outputs = vec! ( TxOut {
script_pubkey : self . destination_script . clone ( ) ,
2019-04-08 21:11:16 -04:00
value : total_value ,
2017-12-25 01:05:27 -05:00
} ) ;
let mut spend_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : inputs ,
output : outputs ,
} ;
2019-06-14 18:45:38 -04:00
2019-03-30 22:12:55 -04:00
let predicted_weight = spend_tx . get_weight ( ) + Self ::get_witnesses_weight ( & inputs_desc [ .. ] ) ;
let mut used_feerate ;
2019-07-02 15:52:58 -04:00
if ! subtract_high_prio_fee! ( self , fee_estimator , spend_tx . output [ 0 ] . value , predicted_weight , used_feerate ) {
2019-05-30 20:54:02 -04:00
return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ;
2019-06-14 18:45:38 -04:00
}
2017-12-25 01:05:27 -05:00
2018-03-26 14:03:59 -04:00
let sighash_parts = bip143 ::SighashComponents ::new ( & spend_tx ) ;
2017-12-25 01:05:27 -05:00
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( spend_tx . input . len ( ) ) ;
let mut soonest_timelock = ::std ::u32 ::MAX ;
for info in inputs_info . iter ( ) {
if info . 2 < = soonest_timelock {
soonest_timelock = info . 2 ;
}
}
let height_timer = Self ::get_height_timer ( height , soonest_timelock ) ;
2019-12-09 16:59:08 -05:00
let spend_txid = spend_tx . txid ( ) ;
2019-03-30 22:12:55 -04:00
for ( input , info ) in spend_tx . input . iter_mut ( ) . zip ( inputs_info . iter ( ) ) {
let ( redeemscript , revocation_key ) = sign_input! ( sighash_parts , input , info . 0 , info . 1 ) ;
2019-07-04 16:09:19 -04:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , input . previous_output . txid , input . previous_output . vout , height_timer ) ;
2019-12-09 22:18:20 -05:00
per_input_material . insert ( input . previous_output , InputMaterial ::Revoked { script : redeemscript , pubkey : if info . 0. is_some ( ) { Some ( revocation_pubkey ) } else { None } , key : revocation_key , is_htlc : if info . 0. is_some ( ) { true } else { false } , amount : info . 1 } ) ;
2019-12-09 16:59:08 -05:00
match self . claimable_outpoints . entry ( input . previous_output ) {
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ( spend_txid , height ) ) ; }
2019-03-30 22:12:55 -04:00
}
2017-12-25 01:05:27 -05:00
}
2019-12-09 16:59:08 -05:00
match self . pending_claim_requests . entry ( spend_txid ) {
2019-12-09 22:18:20 -05:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ClaimTxBumpMaterial { height_timer , feerate_previous : used_feerate , soonest_timelock , per_input_material } ) ; }
}
2019-04-14 16:13:44 -04:00
assert! ( predicted_weight > = spend_tx . get_weight ( ) ) ;
2017-12-25 01:05:27 -05:00
2018-10-19 02:44:40 +00:00
spendable_outputs . push ( SpendableOutputDescriptor ::StaticOutput {
outpoint : BitcoinOutPoint { txid : spend_tx . txid ( ) , vout : 0 } ,
output : spend_tx . output [ 0 ] . clone ( ) ,
} ) ;
2018-04-24 00:19:52 -04:00
txn_to_broadcast . push ( spend_tx ) ;
} else if let Some ( per_commitment_data ) = per_commitment_option {
2018-07-08 16:31:48 -04:00
// While this isn't useful yet, there is a potential race where if a counterparty
// revokes a state at the same time as the commitment transaction for that state is
// confirmed, and the watchtower receives the block before the user, the user could
// upload a new ChannelMonitor with the revocation secret but the watchtower has
// already processed the block, resulting in the remote_commitment_txn_on_chain entry
// not being generated by the above conditional. Thus, to be safe, we go ahead and
// insert it here.
2018-09-07 01:40:41 +00:00
watch_outputs . append ( & mut tx . output . clone ( ) ) ;
2018-10-24 13:59:03 -04:00
self . remote_commitment_txn_on_chain . insert ( commitment_txid , ( commitment_number , tx . output . iter ( ) . map ( | output | { output . script_pubkey . clone ( ) } ) . collect ( ) ) ) ;
2018-07-08 16:31:48 -04:00
2018-12-19 19:36:27 -05:00
log_trace! ( self , " Got broadcast of non-revoked remote commitment transaction {} " , commitment_txid ) ;
2018-12-21 15:16:46 -05:00
macro_rules ! check_htlc_fails {
( $txid : expr , $commitment_tx : expr , $id : tt ) = > {
2019-03-04 18:02:02 +01:00
if let Some ( ref latest_outpoints ) = self . remote_claimable_outpoints . get ( $txid ) {
2019-01-04 14:37:48 -05:00
$id : for & ( ref htlc , ref source_option ) in latest_outpoints . iter ( ) {
if let & Some ( ref source ) = source_option {
// Check if the HTLC is present in the commitment transaction that was
// broadcast, but not if it was below the dust limit, which we should
// fail backwards immediately as there is no way for us to learn the
// payment_preimage.
// Note that if the dust limit were allowed to change between
// commitment transactions we'd want to be check whether *any*
// broadcastable commitment transaction has the HTLC in it, but it
// cannot currently change after channel initialization, so we don't
// need to here.
for & ( ref broadcast_htlc , ref broadcast_source ) in per_commitment_data . iter ( ) {
if broadcast_htlc . transaction_output_index . is_some ( ) & & Some ( source ) = = broadcast_source . as_ref ( ) {
continue $id ;
}
2018-12-21 15:16:46 -05:00
}
2019-01-04 14:37:48 -05:00
log_trace! ( self , " Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of remote commitment transaction " , log_bytes! ( htlc . payment_hash . 0 ) , $commitment_tx ) ;
2019-07-18 18:50:03 -04:00
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
2019-05-30 20:54:02 -04:00
hash_map ::Entry ::Occupied ( mut entry ) = > {
let e = entry . get_mut ( ) ;
2019-03-30 21:56:51 -04:00
e . retain ( | ref event | {
match * * event {
OnchainEvent ::HTLCUpdate { ref htlc_update } = > {
return htlc_update . 0 ! = * * source
} ,
_ = > return true
}
} ) ;
e . push ( OnchainEvent ::HTLCUpdate { htlc_update : ( ( * * source ) . clone ( ) , htlc . payment_hash . clone ( ) ) } ) ;
2019-05-30 20:54:02 -04:00
}
hash_map ::Entry ::Vacant ( entry ) = > {
2019-03-30 21:56:51 -04:00
entry . insert ( vec! [ OnchainEvent ::HTLCUpdate { htlc_update : ( ( * * source ) . clone ( ) , htlc . payment_hash . clone ( ) ) } ] ) ;
2019-05-30 20:54:02 -04:00
}
}
2018-12-21 15:16:46 -05:00
}
}
}
}
}
if let Storage ::Local { ref current_remote_commitment_txid , ref prev_remote_commitment_txid , .. } = self . key_storage {
if let & Some ( ref txid ) = current_remote_commitment_txid {
check_htlc_fails! ( txid , " current " , ' current_loop ) ;
}
if let & Some ( ref txid ) = prev_remote_commitment_txid {
check_htlc_fails! ( txid , " previous " , ' prev_loop ) ;
}
}
2018-04-24 00:19:52 -04:00
if let Some ( revocation_points ) = self . their_cur_revocation_points {
let revocation_point_option =
if revocation_points . 0 = = commitment_number { Some ( & revocation_points . 1 ) }
else if let Some ( point ) = revocation_points . 2. as_ref ( ) {
if revocation_points . 0 = = commitment_number + 1 { Some ( point ) } else { None }
} else { None } ;
if let Some ( revocation_point ) = revocation_point_option {
let ( revocation_pubkey , b_htlc_key ) = match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref revocation_base_key , ref htlc_base_key , .. } = > {
2018-08-20 17:13:07 -04:00
( ignore_error! ( chan_utils ::derive_public_revocation_key ( & self . secp_ctx , revocation_point , & PublicKey ::from_secret_key ( & self . secp_ctx , & revocation_base_key ) ) ) ,
ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , revocation_point , & PublicKey ::from_secret_key ( & self . secp_ctx , & htlc_base_key ) ) ) )
2018-04-24 00:19:52 -04:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { ref revocation_base_key , ref htlc_base_key , .. } = > {
2018-04-24 00:19:52 -04:00
( ignore_error! ( chan_utils ::derive_public_revocation_key ( & self . secp_ctx , revocation_point , & revocation_base_key ) ) ,
ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , revocation_point , & htlc_base_key ) ) )
} ,
} ;
let a_htlc_key = match self . their_htlc_base_key {
2019-05-30 20:54:02 -04:00
None = > return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ,
2018-04-24 00:19:52 -04:00
Some ( their_htlc_base_key ) = > ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , revocation_point , & their_htlc_base_key ) ) ,
} ;
2018-11-01 03:22:56 +00:00
for ( idx , outp ) in tx . output . iter ( ) . enumerate ( ) {
if outp . script_pubkey . is_v0_p2wpkh ( ) {
match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref payment_base_key , .. } = > {
2018-11-01 03:22:56 +00:00
if let Ok ( local_key ) = chan_utils ::derive_private_key ( & self . secp_ctx , & revocation_point , & payment_base_key ) {
spendable_outputs . push ( SpendableOutputDescriptor ::DynamicOutputP2WPKH {
outpoint : BitcoinOutPoint { txid : commitment_txid , vout : idx as u32 } ,
key : local_key ,
output : outp . clone ( ) ,
} ) ;
}
2018-11-20 15:09:47 -05:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > { }
2018-11-01 03:22:56 +00:00
}
break ; // Only to_remote ouput is claimable
}
}
2018-04-24 00:19:52 -04:00
let mut total_value = 0 ;
let mut inputs = Vec ::new ( ) ;
2019-03-30 22:12:55 -04:00
let mut inputs_desc = Vec ::new ( ) ;
let mut inputs_info = Vec ::new ( ) ;
2018-04-24 00:19:52 -04:00
macro_rules ! sign_input {
( $sighash_parts : expr , $input : expr , $amount : expr , $preimage : expr ) = > {
{
2019-03-30 22:12:55 -04:00
let ( sig , redeemscript , htlc_key ) = match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref htlc_base_key , .. } = > {
2019-01-04 14:37:48 -05:00
let htlc = & per_commitment_option . unwrap ( ) [ $input . sequence as usize ] . 0 ;
2018-04-24 00:19:52 -04:00
let redeemscript = chan_utils ::get_htlc_redeemscript_with_explicit_keys ( htlc , & a_htlc_key , & b_htlc_key , & revocation_pubkey ) ;
2019-01-17 17:36:49 -05:00
let sighash = hash_to_message! ( & $sighash_parts . sighash_all ( & $input , & redeemscript , $amount ) [ .. ] ) ;
2018-04-24 00:19:52 -04:00
let htlc_key = ignore_error! ( chan_utils ::derive_private_key ( & self . secp_ctx , revocation_point , & htlc_base_key ) ) ;
2019-03-30 22:12:55 -04:00
( self . secp_ctx . sign ( & sighash , & htlc_key ) , redeemscript , htlc_key )
2018-04-24 00:19:52 -04:00
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > {
2018-04-24 00:19:52 -04:00
unimplemented! ( ) ;
}
} ;
2019-01-16 15:45:05 -05:00
$input . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
2018-04-24 00:19:52 -04:00
$input . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
$input . witness . push ( $preimage ) ;
2019-03-30 22:12:55 -04:00
$input . witness . push ( redeemscript . clone ( ) . into_bytes ( ) ) ;
( redeemscript , htlc_key )
2018-04-24 00:19:52 -04:00
}
}
}
2017-12-25 01:05:27 -05:00
2019-01-04 14:37:48 -05:00
for ( idx , & ( ref htlc , _ ) ) in per_commitment_data . iter ( ) . enumerate ( ) {
2019-01-06 17:02:53 -05:00
if let Some ( transaction_output_index ) = htlc . transaction_output_index {
let expected_script = chan_utils ::get_htlc_redeemscript_with_explicit_keys ( & htlc , & a_htlc_key , & b_htlc_key , & revocation_pubkey ) ;
if transaction_output_index as usize > = tx . output . len ( ) | |
tx . output [ transaction_output_index as usize ] . value ! = htlc . amount_msat / 1000 | |
tx . output [ transaction_output_index as usize ] . script_pubkey ! = expected_script . to_v0_p2wsh ( ) {
2019-05-30 20:54:02 -04:00
return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ; // Corrupted per_commitment_data, fuck this user
2019-01-06 17:02:53 -05:00
}
if let Some ( payment_preimage ) = self . payment_preimages . get ( & htlc . payment_hash ) {
2019-11-12 19:27:55 -05:00
if htlc . offered {
let input = TxIn {
previous_output : BitcoinOutPoint {
txid : commitment_txid ,
vout : transaction_output_index ,
} ,
script_sig : Script ::new ( ) ,
sequence : idx as u32 , // reset to 0xfffffffd in sign_input
witness : Vec ::new ( ) ,
2019-01-06 17:02:53 -05:00
} ;
2019-11-12 19:27:55 -05:00
if htlc . cltv_expiry > height + CLTV_SHARED_CLAIM_BUFFER {
inputs . push ( input ) ;
inputs_desc . push ( if htlc . offered { InputDescriptors ::OfferedHTLC } else { InputDescriptors ::ReceivedHTLC } ) ;
inputs_info . push ( ( payment_preimage , tx . output [ transaction_output_index as usize ] . value , htlc . cltv_expiry ) ) ;
total_value + = tx . output [ transaction_output_index as usize ] . value ;
} else {
let mut single_htlc_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : vec ! [ input ] ,
output : vec ! ( TxOut {
script_pubkey : self . destination_script . clone ( ) ,
value : htlc . amount_msat / 1000 ,
} ) ,
} ;
let predicted_weight = single_htlc_tx . get_weight ( ) + Self ::get_witnesses_weight ( & [ if htlc . offered { InputDescriptors ::OfferedHTLC } else { InputDescriptors ::ReceivedHTLC } ] ) ;
let height_timer = Self ::get_height_timer ( height , htlc . cltv_expiry ) ;
let mut used_feerate ;
2019-07-02 15:52:58 -04:00
if subtract_high_prio_fee! ( self , fee_estimator , single_htlc_tx . output [ 0 ] . value , predicted_weight , used_feerate ) {
2019-11-12 19:27:55 -05:00
let sighash_parts = bip143 ::SighashComponents ::new ( & single_htlc_tx ) ;
let ( redeemscript , htlc_key ) = sign_input! ( sighash_parts , single_htlc_tx . input [ 0 ] , htlc . amount_msat / 1000 , payment_preimage . 0. to_vec ( ) ) ;
assert! ( predicted_weight > = single_htlc_tx . get_weight ( ) ) ;
spendable_outputs . push ( SpendableOutputDescriptor ::StaticOutput {
outpoint : BitcoinOutPoint { txid : single_htlc_tx . txid ( ) , vout : 0 } ,
output : single_htlc_tx . output [ 0 ] . clone ( ) ,
} ) ;
2019-07-04 16:09:19 -04:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , single_htlc_tx . input [ 0 ] . previous_output . txid , single_htlc_tx . input [ 0 ] . previous_output . vout , height_timer ) ;
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( 1 ) ;
2019-07-03 10:26:17 -04:00
per_input_material . insert ( single_htlc_tx . input [ 0 ] . previous_output , InputMaterial ::RemoteHTLC { script : redeemscript , key : htlc_key , preimage : Some ( * payment_preimage ) , amount : htlc . amount_msat / 1000 , locktime : 0 } ) ;
2019-12-09 16:59:08 -05:00
match self . claimable_outpoints . entry ( single_htlc_tx . input [ 0 ] . previous_output ) {
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ( single_htlc_tx . txid ( ) , height ) ) ; }
}
2019-12-09 22:18:20 -05:00
match self . pending_claim_requests . entry ( single_htlc_tx . txid ( ) ) {
2019-11-12 19:27:55 -05:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
2019-12-09 22:18:20 -05:00
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ClaimTxBumpMaterial { height_timer , feerate_previous : used_feerate , soonest_timelock : htlc . cltv_expiry , per_input_material } ) ; }
2019-11-12 19:27:55 -05:00
}
txn_to_broadcast . push ( single_htlc_tx ) ;
2019-03-30 22:12:55 -04:00
}
2019-06-14 18:45:38 -04:00
}
2019-01-06 17:02:53 -05:00
}
}
if ! htlc . offered {
// TODO: If the HTLC has already expired, potentially merge it with the
// rest of the claim transaction, as above.
let input = TxIn {
previous_output : BitcoinOutPoint {
txid : commitment_txid ,
vout : transaction_output_index ,
} ,
script_sig : Script ::new ( ) ,
sequence : idx as u32 ,
witness : Vec ::new ( ) ,
} ;
let mut timeout_tx = Transaction {
2018-04-24 00:19:52 -04:00
version : 2 ,
2019-01-06 17:02:53 -05:00
lock_time : htlc . cltv_expiry ,
2018-04-24 00:19:52 -04:00
input : vec ! [ input ] ,
output : vec ! ( TxOut {
script_pubkey : self . destination_script . clone ( ) ,
2019-01-06 17:02:53 -05:00
value : htlc . amount_msat / 1000 ,
2018-04-24 00:19:52 -04:00
} ) ,
} ;
2019-03-30 22:12:55 -04:00
let predicted_weight = timeout_tx . get_weight ( ) + Self ::get_witnesses_weight ( & [ InputDescriptors ::ReceivedHTLC ] ) ;
2019-07-18 17:27:48 -04:00
let height_timer = Self ::get_height_timer ( height , htlc . cltv_expiry ) ;
2019-03-30 22:12:55 -04:00
let mut used_feerate ;
2019-07-02 15:52:58 -04:00
if subtract_high_prio_fee! ( self , fee_estimator , timeout_tx . output [ 0 ] . value , predicted_weight , used_feerate ) {
2019-03-30 22:12:55 -04:00
let sighash_parts = bip143 ::SighashComponents ::new ( & timeout_tx ) ;
let ( redeemscript , htlc_key ) = sign_input! ( sighash_parts , timeout_tx . input [ 0 ] , htlc . amount_msat / 1000 , vec! [ 0 ] ) ;
assert! ( predicted_weight > = timeout_tx . get_weight ( ) ) ;
//TODO: track SpendableOutputDescriptor
2019-07-04 16:09:19 -04:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , timeout_tx . input [ 0 ] . previous_output . txid , timeout_tx . input [ 0 ] . previous_output . vout , height_timer ) ;
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( 1 ) ;
2019-07-03 10:26:17 -04:00
per_input_material . insert ( timeout_tx . input [ 0 ] . previous_output , InputMaterial ::RemoteHTLC { script : redeemscript , key : htlc_key , preimage : None , amount : htlc . amount_msat / 1000 , locktime : htlc . cltv_expiry } ) ;
2019-12-09 16:59:08 -05:00
match self . claimable_outpoints . entry ( timeout_tx . input [ 0 ] . previous_output ) {
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ( timeout_tx . txid ( ) , height ) ) ; }
}
2019-12-09 22:18:20 -05:00
match self . pending_claim_requests . entry ( timeout_tx . txid ( ) ) {
2019-03-30 22:12:55 -04:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
2019-12-09 22:18:20 -05:00
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ClaimTxBumpMaterial { height_timer , feerate_previous : used_feerate , soonest_timelock : htlc . cltv_expiry , per_input_material } ) ; }
2019-03-30 22:12:55 -04:00
}
}
2019-01-06 17:02:53 -05:00
txn_to_broadcast . push ( timeout_tx ) ;
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
}
}
2017-12-25 01:05:27 -05:00
2019-05-30 20:54:02 -04:00
if inputs . is_empty ( ) { return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ; } // Nothing to be done...probably a false positive/local tx
2018-04-24 00:19:52 -04:00
let outputs = vec! ( TxOut {
script_pubkey : self . destination_script . clone ( ) ,
2019-04-08 21:11:16 -04:00
value : total_value
2018-04-24 00:19:52 -04:00
} ) ;
let mut spend_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : inputs ,
output : outputs ,
} ;
2019-03-30 22:12:55 -04:00
2019-11-15 17:19:46 -05:00
let predicted_weight = spend_tx . get_weight ( ) + Self ::get_witnesses_weight ( & inputs_desc [ .. ] ) ;
2019-03-30 22:12:55 -04:00
let mut used_feerate ;
2019-07-02 15:52:58 -04:00
if ! subtract_high_prio_fee! ( self , fee_estimator , spend_tx . output [ 0 ] . value , predicted_weight , used_feerate ) {
2019-05-30 20:54:02 -04:00
return ( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs ) ;
2019-06-14 18:45:38 -04:00
}
2018-04-24 00:19:52 -04:00
let sighash_parts = bip143 ::SighashComponents ::new ( & spend_tx ) ;
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( spend_tx . input . len ( ) ) ;
let mut soonest_timelock = ::std ::u32 ::MAX ;
for info in inputs_info . iter ( ) {
if info . 2 < = soonest_timelock {
soonest_timelock = info . 2 ;
}
}
let height_timer = Self ::get_height_timer ( height , soonest_timelock ) ;
2019-12-09 16:59:08 -05:00
let spend_txid = spend_tx . txid ( ) ;
2019-03-30 22:12:55 -04:00
for ( input , info ) in spend_tx . input . iter_mut ( ) . zip ( inputs_info . iter ( ) ) {
let ( redeemscript , htlc_key ) = sign_input! ( sighash_parts , input , info . 1 , ( info . 0 ) . 0. to_vec ( ) ) ;
2019-07-04 16:09:19 -04:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , input . previous_output . txid , input . previous_output . vout , height_timer ) ;
2019-07-03 10:26:17 -04:00
per_input_material . insert ( input . previous_output , InputMaterial ::RemoteHTLC { script : redeemscript , key : htlc_key , preimage : Some ( * ( info . 0 ) ) , amount : info . 1 , locktime : 0 } ) ;
2019-12-09 16:59:08 -05:00
match self . claimable_outpoints . entry ( input . previous_output ) {
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ( spend_txid , height ) ) ; }
}
2019-12-09 22:18:20 -05:00
}
2019-12-09 16:59:08 -05:00
match self . pending_claim_requests . entry ( spend_txid ) {
2019-12-09 22:18:20 -05:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ClaimTxBumpMaterial { height_timer , feerate_previous : used_feerate , soonest_timelock , per_input_material } ) ; }
2017-12-25 01:05:27 -05:00
}
2019-04-14 16:13:44 -04:00
assert! ( predicted_weight > = spend_tx . get_weight ( ) ) ;
2018-10-19 02:44:40 +00:00
spendable_outputs . push ( SpendableOutputDescriptor ::StaticOutput {
outpoint : BitcoinOutPoint { txid : spend_tx . txid ( ) , vout : 0 } ,
output : spend_tx . output [ 0 ] . clone ( ) ,
} ) ;
2018-04-24 00:19:52 -04:00
txn_to_broadcast . push ( spend_tx ) ;
2017-12-25 01:05:27 -05:00
}
}
2019-08-02 16:29:12 -04:00
} else if let Some ( ( ref to_remote_rescue , ref local_key ) ) = self . to_remote_rescue {
for ( idx , outp ) in tx . output . iter ( ) . enumerate ( ) {
if to_remote_rescue = = & outp . script_pubkey {
spendable_outputs . push ( SpendableOutputDescriptor ::DynamicOutputP2WPKH {
outpoint : BitcoinOutPoint { txid : commitment_txid , vout : idx as u32 } ,
key : local_key . clone ( ) ,
output : outp . clone ( ) ,
} ) ;
}
}
2017-12-25 01:05:27 -05:00
}
2019-05-30 20:54:02 -04:00
( txn_to_broadcast , ( commitment_txid , watch_outputs ) , spendable_outputs )
2017-12-25 01:05:27 -05:00
}
2019-01-24 16:41:51 +02:00
/// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key
2019-03-30 22:12:55 -04:00
fn check_spend_remote_htlc ( & mut self , tx : & Transaction , commitment_number : u64 , height : u32 , fee_estimator : & FeeEstimator ) -> ( Option < Transaction > , Option < SpendableOutputDescriptor > ) {
2019-12-06 16:01:41 -05:00
//TODO: send back new outputs to guarantee pending_claim_request consistency
2018-09-26 10:48:30 -04:00
if tx . input . len ( ) ! = 1 | | tx . output . len ( ) ! = 1 {
2018-10-19 02:44:40 +00:00
return ( None , None )
2018-09-26 10:48:30 -04:00
}
2018-09-11 01:40:53 +00:00
macro_rules ! ignore_error {
( $thing : expr ) = > {
match $thing {
Ok ( a ) = > a ,
2018-10-19 02:44:40 +00:00
Err ( _ ) = > return ( None , None )
2018-09-11 01:40:53 +00:00
}
} ;
}
2018-11-15 07:47:07 -05:00
let secret = if let Some ( secret ) = self . get_secret ( commitment_number ) { secret } else { return ( None , None ) ; } ;
2019-01-16 15:45:05 -05:00
let per_commitment_key = ignore_error! ( SecretKey ::from_slice ( & secret ) ) ;
2018-09-13 11:34:26 -04:00
let per_commitment_point = PublicKey ::from_secret_key ( & self . secp_ctx , & per_commitment_key ) ;
2018-09-11 01:40:53 +00:00
let revocation_pubkey = match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref revocation_base_key , .. } = > {
2018-09-11 01:40:53 +00:00
ignore_error! ( chan_utils ::derive_public_revocation_key ( & self . secp_ctx , & per_commitment_point , & PublicKey ::from_secret_key ( & self . secp_ctx , & revocation_base_key ) ) )
} ,
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { ref revocation_base_key , .. } = > {
2018-09-11 01:40:53 +00:00
ignore_error! ( chan_utils ::derive_public_revocation_key ( & self . secp_ctx , & per_commitment_point , & revocation_base_key ) )
} ,
} ;
let delayed_key = match self . their_delayed_payment_base_key {
2018-10-19 02:44:40 +00:00
None = > return ( None , None ) ,
2018-09-13 11:34:26 -04:00
Some ( their_delayed_payment_base_key ) = > ignore_error! ( chan_utils ::derive_public_key ( & self . secp_ctx , & per_commitment_point , & their_delayed_payment_base_key ) ) ,
2018-09-11 01:40:53 +00:00
} ;
2019-07-19 17:57:17 -04:00
let redeemscript = chan_utils ::get_revokeable_redeemscript ( & revocation_pubkey , self . our_to_self_delay , & delayed_key ) ;
2018-09-11 01:40:53 +00:00
let revokeable_p2wsh = redeemscript . to_v0_p2wsh ( ) ;
2018-09-26 10:48:30 -04:00
let htlc_txid = tx . txid ( ) ; //TODO: This is gonna be a performance bottleneck for watchtowers!
2018-09-11 01:40:53 +00:00
let mut inputs = Vec ::new ( ) ;
let mut amount = 0 ;
if tx . output [ 0 ] . script_pubkey = = revokeable_p2wsh { //HTLC transactions have one txin, one txout
inputs . push ( TxIn {
previous_output : BitcoinOutPoint {
txid : htlc_txid ,
vout : 0 ,
} ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
} ) ;
amount = tx . output [ 0 ] . value ;
}
if ! inputs . is_empty ( ) {
let outputs = vec! ( TxOut {
script_pubkey : self . destination_script . clone ( ) ,
2019-04-09 19:43:03 -04:00
value : amount
2018-09-11 01:40:53 +00:00
} ) ;
let mut spend_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : inputs ,
output : outputs ,
} ;
2019-04-14 16:13:44 -04:00
let predicted_weight = spend_tx . get_weight ( ) + Self ::get_witnesses_weight ( & [ InputDescriptors ::RevokedOutput ] ) ;
2019-03-30 22:12:55 -04:00
let mut used_feerate ;
2019-07-02 15:52:58 -04:00
if ! subtract_high_prio_fee! ( self , fee_estimator , spend_tx . output [ 0 ] . value , predicted_weight , used_feerate ) {
2019-06-14 18:45:38 -04:00
return ( None , None ) ;
}
2018-09-11 01:40:53 +00:00
let sighash_parts = bip143 ::SighashComponents ::new ( & spend_tx ) ;
2019-03-30 22:12:55 -04:00
let ( sig , revocation_key ) = match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref revocation_base_key , .. } = > {
2019-01-17 17:36:49 -05:00
let sighash = hash_to_message! ( & sighash_parts . sighash_all ( & spend_tx . input [ 0 ] , & redeemscript , amount ) [ .. ] ) ;
2018-09-11 01:40:53 +00:00
let revocation_key = ignore_error! ( chan_utils ::derive_private_revocation_key ( & self . secp_ctx , & per_commitment_key , & revocation_base_key ) ) ;
2019-03-30 22:12:55 -04:00
( self . secp_ctx . sign ( & sighash , & revocation_key ) , revocation_key )
2018-09-11 01:40:53 +00:00
}
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > {
2018-09-11 01:40:53 +00:00
unimplemented! ( ) ;
}
} ;
2019-01-16 15:45:05 -05:00
spend_tx . input [ 0 ] . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
2018-09-11 01:40:53 +00:00
spend_tx . input [ 0 ] . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
spend_tx . input [ 0 ] . witness . push ( vec! ( 1 ) ) ;
2019-03-30 22:12:55 -04:00
spend_tx . input [ 0 ] . witness . push ( redeemscript . clone ( ) . into_bytes ( ) ) ;
2018-09-11 01:40:53 +00:00
2019-04-14 16:13:44 -04:00
assert! ( predicted_weight > = spend_tx . get_weight ( ) ) ;
2018-10-19 02:44:40 +00:00
let outpoint = BitcoinOutPoint { txid : spend_tx . txid ( ) , vout : 0 } ;
let output = spend_tx . output [ 0 ] . clone ( ) ;
2019-07-18 17:27:48 -04:00
let height_timer = Self ::get_height_timer ( height , self . their_to_self_delay . unwrap ( ) as u32 ) ; // We can safely unwrap given we are past channel opening
2019-07-04 16:09:19 -04:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , spend_tx . input [ 0 ] . previous_output . txid , spend_tx . input [ 0 ] . previous_output . vout , height_timer ) ;
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( 1 ) ;
per_input_material . insert ( spend_tx . input [ 0 ] . previous_output , InputMaterial ::Revoked { script : redeemscript , pubkey : None , key : revocation_key , is_htlc : false , amount : tx . output [ 0 ] . value } ) ;
2019-12-09 16:59:08 -05:00
match self . claimable_outpoints . entry ( spend_tx . input [ 0 ] . previous_output ) {
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ( spend_tx . txid ( ) , height ) ) ; }
}
2019-12-09 22:18:20 -05:00
match self . pending_claim_requests . entry ( spend_tx . txid ( ) ) {
2019-03-30 22:12:55 -04:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
2019-12-09 22:18:20 -05:00
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( ClaimTxBumpMaterial { height_timer , feerate_previous : used_feerate , soonest_timelock : height + self . our_to_self_delay as u32 , per_input_material } ) ; }
2019-03-30 22:12:55 -04:00
}
2018-10-19 02:44:40 +00:00
( Some ( spend_tx ) , Some ( SpendableOutputDescriptor ::StaticOutput { outpoint , output } ) )
} else { ( None , None ) }
2018-09-11 01:40:53 +00:00
}
2019-12-13 14:56:57 -05:00
fn broadcast_by_local_state ( & self , local_tx : & LocalSignedTx , delayed_payment_base_key : & SecretKey , height : u32 ) -> ( Vec < Transaction > , Vec < SpendableOutputDescriptor > , Vec < TxOut > , Vec < ( Sha256dHash , ClaimTxBumpMaterial ) > ) {
2018-04-24 00:19:52 -04:00
let mut res = Vec ::with_capacity ( local_tx . htlc_outputs . len ( ) ) ;
2018-10-19 02:44:40 +00:00
let mut spendable_outputs = Vec ::with_capacity ( local_tx . htlc_outputs . len ( ) ) ;
2018-11-26 19:54:00 -05:00
let mut watch_outputs = Vec ::with_capacity ( local_tx . htlc_outputs . len ( ) ) ;
2019-03-30 22:12:55 -04:00
let mut pending_claims = Vec ::with_capacity ( local_tx . htlc_outputs . len ( ) ) ;
2018-04-24 00:19:52 -04:00
2018-10-31 02:45:50 +00:00
macro_rules ! add_dynamic_output {
( $father_tx : expr , $vout : expr ) = > {
2019-12-13 14:56:57 -05:00
if let Ok ( local_delayedkey ) = chan_utils ::derive_private_key ( & self . secp_ctx , & local_tx . per_commitment_point , delayed_payment_base_key ) {
spendable_outputs . push ( SpendableOutputDescriptor ::DynamicOutputP2WSH {
outpoint : BitcoinOutPoint { txid : $father_tx . txid ( ) , vout : $vout } ,
key : local_delayedkey ,
witness_script : chan_utils ::get_revokeable_redeemscript ( & local_tx . revocation_key , self . our_to_self_delay , & local_tx . delayed_payment_key ) ,
to_self_delay : self . our_to_self_delay ,
output : $father_tx . output [ $vout as usize ] . clone ( ) ,
} ) ;
2018-10-31 02:45:50 +00:00
}
}
}
let redeemscript = chan_utils ::get_revokeable_redeemscript ( & local_tx . revocation_key , self . their_to_self_delay . unwrap ( ) , & local_tx . delayed_payment_key ) ;
let revokeable_p2wsh = redeemscript . to_v0_p2wsh ( ) ;
2019-12-13 01:58:08 -05:00
for ( idx , output ) in local_tx . tx . without_valid_witness ( ) . output . iter ( ) . enumerate ( ) {
2018-10-31 02:45:50 +00:00
if output . script_pubkey = = revokeable_p2wsh {
2019-12-13 01:58:08 -05:00
add_dynamic_output! ( local_tx . tx . without_valid_witness ( ) , idx as u32 ) ;
2018-10-31 02:45:50 +00:00
break ;
}
}
2019-12-13 14:56:57 -05:00
if let & Storage ::Local { ref htlc_base_key , .. } = & self . key_storage {
for & ( ref htlc , ref sigs , _ ) in local_tx . htlc_outputs . iter ( ) {
if let Some ( transaction_output_index ) = htlc . transaction_output_index {
if let & Some ( ref their_sig ) = sigs {
if htlc . offered {
log_trace! ( self , " Broadcasting HTLC-Timeout transaction against local commitment transactions " ) ;
let mut htlc_timeout_tx = chan_utils ::build_htlc_transaction ( & local_tx . txid , local_tx . feerate_per_kw , self . their_to_self_delay . unwrap ( ) , htlc , & local_tx . delayed_payment_key , & local_tx . revocation_key ) ;
let ( our_sig , htlc_script ) = match
chan_utils ::sign_htlc_transaction ( & mut htlc_timeout_tx , their_sig , & None , htlc , & local_tx . a_htlc_key , & local_tx . b_htlc_key , & local_tx . revocation_key , & local_tx . per_commitment_point , htlc_base_key , & self . secp_ctx ) {
Ok ( res ) = > res ,
Err ( _ ) = > continue ,
} ;
2018-04-24 00:19:52 -04:00
2019-12-13 14:56:57 -05:00
add_dynamic_output! ( htlc_timeout_tx , 0 ) ;
2019-07-18 17:27:48 -04:00
let height_timer = Self ::get_height_timer ( height , htlc . cltv_expiry ) ;
2019-12-09 22:18:20 -05:00
let mut per_input_material = HashMap ::with_capacity ( 1 ) ;
2019-12-13 14:56:57 -05:00
per_input_material . insert ( htlc_timeout_tx . input [ 0 ] . previous_output , InputMaterial ::LocalHTLC { script : htlc_script , sigs : ( * their_sig , our_sig ) , preimage : None , amount : htlc . amount_msat / 1000 } ) ;
2019-12-09 16:59:08 -05:00
//TODO: with option_simplified_commitment track outpoint too
2019-12-13 14:56:57 -05:00
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , htlc_timeout_tx . input [ 0 ] . previous_output . vout , htlc_timeout_tx . input [ 0 ] . previous_output . txid , height_timer ) ;
pending_claims . push ( ( htlc_timeout_tx . txid ( ) , ClaimTxBumpMaterial { height_timer , feerate_previous : 0 , soonest_timelock : htlc . cltv_expiry , per_input_material } ) ) ;
res . push ( htlc_timeout_tx ) ;
} else {
if let Some ( payment_preimage ) = self . payment_preimages . get ( & htlc . payment_hash ) {
log_trace! ( self , " Broadcasting HTLC-Success transaction against local commitment transactions " ) ;
let mut htlc_success_tx = chan_utils ::build_htlc_transaction ( & local_tx . txid , local_tx . feerate_per_kw , self . their_to_self_delay . unwrap ( ) , htlc , & local_tx . delayed_payment_key , & local_tx . revocation_key ) ;
let ( our_sig , htlc_script ) = match
chan_utils ::sign_htlc_transaction ( & mut htlc_success_tx , their_sig , & Some ( * payment_preimage ) , htlc , & local_tx . a_htlc_key , & local_tx . b_htlc_key , & local_tx . revocation_key , & local_tx . per_commitment_point , htlc_base_key , & self . secp_ctx ) {
Ok ( res ) = > res ,
Err ( _ ) = > continue ,
} ;
add_dynamic_output! ( htlc_success_tx , 0 ) ;
let height_timer = Self ::get_height_timer ( height , htlc . cltv_expiry ) ;
let mut per_input_material = HashMap ::with_capacity ( 1 ) ;
per_input_material . insert ( htlc_success_tx . input [ 0 ] . previous_output , InputMaterial ::LocalHTLC { script : htlc_script , sigs : ( * their_sig , our_sig ) , preimage : Some ( * payment_preimage ) , amount : htlc . amount_msat / 1000 } ) ;
//TODO: with option_simplified_commitment track outpoint too
log_trace! ( self , " Outpoint {}:{} is being being claimed, if it doesn't succeed, a bumped claiming txn is going to be broadcast at height {} " , htlc_success_tx . input [ 0 ] . previous_output . vout , htlc_success_tx . input [ 0 ] . previous_output . txid , height_timer ) ;
pending_claims . push ( ( htlc_success_tx . txid ( ) , ClaimTxBumpMaterial { height_timer , feerate_previous : 0 , soonest_timelock : htlc . cltv_expiry , per_input_material } ) ) ;
res . push ( htlc_success_tx ) ;
}
2019-01-04 14:37:48 -05:00
}
2019-12-13 14:56:57 -05:00
watch_outputs . push ( local_tx . tx . without_valid_witness ( ) . output [ transaction_output_index as usize ] . clone ( ) ) ;
} else { panic! ( " Should have sigs for non-dust local tx outputs! " ) }
}
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
}
2019-03-30 22:12:55 -04:00
( res , spendable_outputs , watch_outputs , pending_claims )
2018-04-24 00:19:52 -04:00
}
/// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
/// revoked using data in local_claimable_outpoints.
/// Should not be used if check_spend_revoked_transaction succeeds.
2019-02-06 20:28:55 -05:00
fn check_spend_local_transaction ( & mut self , tx : & Transaction , height : u32 ) -> ( Vec < Transaction > , Vec < SpendableOutputDescriptor > , ( Sha256dHash , Vec < TxOut > ) ) {
2018-04-24 00:19:52 -04:00
let commitment_txid = tx . txid ( ) ;
2019-02-06 20:28:55 -05:00
let mut local_txn = Vec ::new ( ) ;
let mut spendable_outputs = Vec ::new ( ) ;
let mut watch_outputs = Vec ::new ( ) ;
macro_rules ! wait_threshold_conf {
2019-03-30 21:56:51 -04:00
( $height : expr , $source : expr , $commitment_tx : expr , $payment_hash : expr ) = > {
2019-07-18 18:50:03 -04:00
log_trace! ( self , " Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{}) " , log_bytes! ( $payment_hash . 0 ) , $commitment_tx , height + ANTI_REORG_DELAY - 1 ) ;
match self . onchain_events_waiting_threshold_conf . entry ( $height + ANTI_REORG_DELAY - 1 ) {
2019-02-06 20:28:55 -05:00
hash_map ::Entry ::Occupied ( mut entry ) = > {
let e = entry . get_mut ( ) ;
2019-03-30 21:56:51 -04:00
e . retain ( | ref event | {
match * * event {
OnchainEvent ::HTLCUpdate { ref htlc_update } = > {
return htlc_update . 0 ! = $source
} ,
_ = > return true
}
} ) ;
e . push ( OnchainEvent ::HTLCUpdate { htlc_update : ( $source , $payment_hash ) } ) ;
2019-02-06 20:28:55 -05:00
}
hash_map ::Entry ::Vacant ( entry ) = > {
2019-03-30 21:56:51 -04:00
entry . insert ( vec! [ OnchainEvent ::HTLCUpdate { htlc_update : ( $source , $payment_hash ) } ] ) ;
2019-02-06 20:28:55 -05:00
}
}
}
}
macro_rules ! append_onchain_update {
( $updates : expr ) = > {
local_txn . append ( & mut $updates . 0 ) ;
spendable_outputs . append ( & mut $updates . 1 ) ;
watch_outputs . append ( & mut $updates . 2 ) ;
2019-03-30 22:12:55 -04:00
for claim in $updates . 3 {
2019-12-09 22:18:20 -05:00
match self . pending_claim_requests . entry ( claim . 0 ) {
2019-03-30 22:12:55 -04:00
hash_map ::Entry ::Occupied ( _ ) = > { } ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( claim . 1 ) ; }
}
}
2019-02-06 20:28:55 -05:00
}
}
// HTLCs set may differ between last and previous local commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
let mut is_local_tx = false ;
2019-12-13 01:58:08 -05:00
if let & mut Some ( ref mut local_tx ) = & mut self . current_local_signed_commitment_tx {
if local_tx . txid = = commitment_txid {
match self . key_storage {
Storage ::Local { ref funding_key , .. } = > {
local_tx . tx . add_local_sig ( funding_key , self . funding_redeemscript . as_ref ( ) . unwrap ( ) , self . channel_value_satoshis . unwrap ( ) , & self . secp_ctx ) ;
} ,
_ = > { } ,
}
}
}
2018-04-24 00:19:52 -04:00
if let & Some ( ref local_tx ) = & self . current_local_signed_commitment_tx {
if local_tx . txid = = commitment_txid {
2019-02-06 20:28:55 -05:00
is_local_tx = true ;
2019-01-04 14:38:05 -05:00
log_trace! ( self , " Got latest local commitment tx broadcast, searching for available HTLCs to claim " ) ;
2019-12-13 01:58:08 -05:00
assert! ( local_tx . tx . has_local_sig ( ) ) ;
2018-10-19 02:44:40 +00:00
match self . key_storage {
2019-12-13 14:56:57 -05:00
Storage ::Local { ref delayed_payment_base_key , .. } = > {
append_onchain_update! ( self . broadcast_by_local_state ( local_tx , delayed_payment_base_key , height ) ) ;
2018-10-19 02:44:40 +00:00
} ,
2019-12-13 14:56:57 -05:00
Storage ::Watchtower { .. } = > { }
2018-10-19 02:44:40 +00:00
}
2018-04-24 00:19:52 -04:00
}
}
2019-12-13 01:58:08 -05:00
if let & mut Some ( ref mut local_tx ) = & mut self . prev_local_signed_commitment_tx {
if local_tx . txid = = commitment_txid {
match self . key_storage {
Storage ::Local { ref funding_key , .. } = > {
local_tx . tx . add_local_sig ( funding_key , self . funding_redeemscript . as_ref ( ) . unwrap ( ) , self . channel_value_satoshis . unwrap ( ) , & self . secp_ctx ) ;
} ,
_ = > { } ,
}
}
}
2018-04-24 00:19:52 -04:00
if let & Some ( ref local_tx ) = & self . prev_local_signed_commitment_tx {
if local_tx . txid = = commitment_txid {
2019-02-06 20:28:55 -05:00
is_local_tx = true ;
2019-01-04 14:38:05 -05:00
log_trace! ( self , " Got previous local commitment tx broadcast, searching for available HTLCs to claim " ) ;
2019-12-13 01:58:08 -05:00
assert! ( local_tx . tx . has_local_sig ( ) ) ;
2018-10-19 02:44:40 +00:00
match self . key_storage {
2019-12-13 14:56:57 -05:00
Storage ::Local { ref delayed_payment_base_key , .. } = > {
append_onchain_update! ( self . broadcast_by_local_state ( local_tx , delayed_payment_base_key , height ) ) ;
2018-10-19 02:44:40 +00:00
} ,
2019-12-13 14:56:57 -05:00
Storage ::Watchtower { .. } = > { }
2018-10-19 02:44:40 +00:00
}
2018-04-24 00:19:52 -04:00
}
}
2019-02-06 20:28:55 -05:00
macro_rules ! fail_dust_htlcs_after_threshold_conf {
( $local_tx : expr ) = > {
for & ( ref htlc , _ , ref source ) in & $local_tx . htlc_outputs {
if htlc . transaction_output_index . is_none ( ) {
if let & Some ( ref source ) = source {
2019-03-30 21:56:51 -04:00
wait_threshold_conf! ( height , source . clone ( ) , " lastest " , htlc . payment_hash . clone ( ) ) ;
2019-02-06 20:28:55 -05:00
}
}
}
}
}
if is_local_tx {
if let & Some ( ref local_tx ) = & self . current_local_signed_commitment_tx {
fail_dust_htlcs_after_threshold_conf! ( local_tx ) ;
}
if let & Some ( ref local_tx ) = & self . prev_local_signed_commitment_tx {
fail_dust_htlcs_after_threshold_conf! ( local_tx ) ;
}
}
( local_txn , spendable_outputs , ( commitment_txid , watch_outputs ) )
2018-04-24 00:19:52 -04:00
}
2017-12-25 01:05:27 -05:00
2018-11-16 20:52:33 -05:00
/// Generate a spendable output event when closing_transaction get registered onchain.
fn check_spend_closing_transaction ( & self , tx : & Transaction ) -> Option < SpendableOutputDescriptor > {
2018-12-02 18:22:40 -05:00
if tx . input [ 0 ] . sequence = = 0xFFFFFFFF & & ! tx . input [ 0 ] . witness . is_empty ( ) & & tx . input [ 0 ] . witness . last ( ) . unwrap ( ) . len ( ) = = 71 {
2018-11-16 20:52:33 -05:00
match self . key_storage {
2018-11-26 19:50:16 -05:00
Storage ::Local { ref shutdown_pubkey , .. } = > {
2018-12-13 16:23:22 -05:00
let our_channel_close_key_hash = Hash160 ::hash ( & shutdown_pubkey . serialize ( ) ) ;
2019-01-16 15:45:05 -05:00
let shutdown_script = Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 ) . push_slice ( & our_channel_close_key_hash [ .. ] ) . into_script ( ) ;
2018-11-16 20:52:33 -05:00
for ( idx , output ) in tx . output . iter ( ) . enumerate ( ) {
if shutdown_script = = output . script_pubkey {
return Some ( SpendableOutputDescriptor ::StaticOutput {
outpoint : BitcoinOutPoint { txid : tx . txid ( ) , vout : idx as u32 } ,
output : output . clone ( ) ,
} ) ;
}
}
}
2018-11-26 19:50:16 -05:00
Storage ::Watchtower { .. } = > {
2018-11-16 20:52:33 -05:00
//TODO: we need to ensure an offline client will generate the event when it
2019-01-24 16:41:51 +02:00
// comes back online after only the watchtower saw the transaction
2018-11-16 20:52:33 -05:00
}
}
}
None
}
2019-08-01 10:54:02 -04:00
/// Used by ChannelManager deserialization to broadcast the latest local state if its copy of
/// the Channel was out-of-date. You may use it to get a broadcastable local toxic tx in case of
/// fallen-behind, i.e when receiving a channel_reestablish with a proof that our remote side knows
/// a higher revocation secret than the local commitment number we are aware of. Broadcasting these
/// transactions are UNSAFE, as they allow remote side to punish you. Nevertheless you may want to
/// broadcast them if remote don't close channel with his higher commitment transaction after a
/// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
/// out-of-band the other node operator to coordinate with him if option is available to you.
/// In any-case, choice is up to the user.
2019-12-13 01:58:08 -05:00
pub fn get_latest_local_commitment_txn ( & mut self ) -> Vec < Transaction > {
2019-12-12 22:42:08 -05:00
log_trace! ( self , " Getting signed latest local commitment transaction! " ) ;
2019-12-13 01:58:08 -05:00
if let & mut Some ( ref mut local_tx ) = & mut self . current_local_signed_commitment_tx {
match self . key_storage {
Storage ::Local { ref funding_key , .. } = > {
local_tx . tx . add_local_sig ( funding_key , self . funding_redeemscript . as_ref ( ) . unwrap ( ) , self . channel_value_satoshis . unwrap ( ) , & self . secp_ctx ) ;
} ,
_ = > { } ,
}
}
2018-10-25 12:56:02 -04:00
if let & Some ( ref local_tx ) = & self . current_local_signed_commitment_tx {
2019-12-13 01:58:08 -05:00
let mut res = vec! [ local_tx . tx . with_valid_witness ( ) . clone ( ) ] ;
2018-10-25 12:56:02 -04:00
match self . key_storage {
2019-12-13 14:56:57 -05:00
Storage ::Local { ref delayed_payment_base_key , .. } = > {
res . append ( & mut self . broadcast_by_local_state ( local_tx , delayed_payment_base_key , 0 ) . 0 ) ;
2019-03-30 22:12:55 -04:00
// We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
// The data will be re-generated and tracked in check_spend_local_transaction if we get a confirmation.
2018-10-25 12:56:02 -04:00
} ,
_ = > panic! ( " Can only broadcast by local channelmonitor " ) ,
} ;
res
} else {
Vec ::new ( )
}
}
2019-04-08 21:11:16 -04:00
fn block_connected ( & mut self , txn_matched : & [ & Transaction ] , height : u32 , block_hash : & Sha256dHash , broadcaster : & BroadcasterInterface , fee_estimator : & FeeEstimator ) -> ( Vec < ( Sha256dHash , Vec < TxOut > ) > , Vec < SpendableOutputDescriptor > , Vec < ( HTLCSource , Option < PaymentPreimage > , PaymentHash ) > ) {
2019-12-10 16:28:33 -05:00
log_trace! ( self , " Block {} at height {} connected with {} txn matched " , block_hash , height , txn_matched . len ( ) ) ;
2018-09-07 01:40:41 +00:00
let mut watch_outputs = Vec ::new ( ) ;
2018-10-19 02:44:40 +00:00
let mut spendable_outputs = Vec ::new ( ) ;
2018-12-10 23:56:02 -05:00
let mut htlc_updated = Vec ::new ( ) ;
2019-12-10 15:38:04 -05:00
let mut bump_candidates = HashSet ::new ( ) ;
2018-04-24 00:19:52 -04:00
for tx in txn_matched {
2018-09-13 11:35:23 -04:00
if tx . input . len ( ) = = 1 {
// Assuming our keys were not leaked (in which case we're screwed no matter what),
// commitment transactions and HTLC transactions will all only ever have one input,
// which is an easy way to filter out any potential non-matching txn for lazy
// filters.
let prevout = & tx . input [ 0 ] . previous_output ;
let mut txn : Vec < Transaction > = Vec ::new ( ) ;
2018-11-26 19:50:16 -05:00
let funding_txo = match self . key_storage {
Storage ::Local { ref funding_info , .. } = > {
funding_info . clone ( )
}
Storage ::Watchtower { .. } = > {
unimplemented! ( ) ;
}
} ;
if funding_txo . is_none ( ) | | ( prevout . txid = = funding_txo . as_ref ( ) . unwrap ( ) . 0. txid & & prevout . vout = = funding_txo . as_ref ( ) . unwrap ( ) . 0. index as u32 ) {
2019-08-02 16:29:12 -04:00
if ( tx . input [ 0 ] . sequence > > 8 * 3 ) as u8 = = 0x80 & & ( tx . lock_time > > 8 * 3 ) as u8 = = 0x20 {
2019-11-18 16:40:05 -05:00
let ( remote_txn , new_outputs , mut spendable_output ) = self . check_spend_remote_transaction ( & tx , height , fee_estimator ) ;
2019-08-02 16:29:12 -04:00
txn = remote_txn ;
2018-11-26 19:54:00 -05:00
spendable_outputs . append ( & mut spendable_output ) ;
if ! new_outputs . 1. is_empty ( ) {
watch_outputs . push ( new_outputs ) ;
}
2019-08-02 16:29:12 -04:00
if txn . is_empty ( ) {
2019-11-18 16:40:05 -05:00
let ( local_txn , mut spendable_output , new_outputs ) = self . check_spend_local_transaction ( & tx , height ) ;
2019-08-02 16:29:12 -04:00
spendable_outputs . append ( & mut spendable_output ) ;
txn = local_txn ;
if ! new_outputs . 1. is_empty ( ) {
watch_outputs . push ( new_outputs ) ;
}
}
2018-04-24 00:19:52 -04:00
}
2018-11-26 19:50:16 -05:00
if ! funding_txo . is_none ( ) & & txn . is_empty ( ) {
2019-11-18 16:40:05 -05:00
if let Some ( spendable_output ) = self . check_spend_closing_transaction ( & tx ) {
2018-11-16 20:52:33 -05:00
spendable_outputs . push ( spendable_output ) ;
}
}
2018-09-11 01:40:53 +00:00
} else {
2018-10-24 13:59:03 -04:00
if let Some ( & ( commitment_number , _ ) ) = self . remote_commitment_txn_on_chain . get ( & prevout . txid ) {
2019-11-18 16:40:05 -05:00
let ( tx , spendable_output ) = self . check_spend_remote_htlc ( & tx , commitment_number , height , fee_estimator ) ;
2018-10-19 02:44:40 +00:00
if let Some ( tx ) = tx {
2018-09-13 11:34:26 -04:00
txn . push ( tx ) ;
}
2018-10-19 02:44:40 +00:00
if let Some ( spendable_output ) = spendable_output {
spendable_outputs . push ( spendable_output ) ;
}
2017-12-25 01:05:27 -05:00
}
}
2018-09-11 01:40:53 +00:00
for tx in txn . iter ( ) {
2019-11-22 17:44:30 -05:00
log_trace! ( self , " Broadcast onchain {} " , log_tx! ( tx ) ) ;
2018-09-11 01:40:53 +00:00
broadcaster . broadcast_transaction ( tx ) ;
}
2019-01-06 15:14:43 -05:00
}
// While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
// can also be resolved in a few other ways which can have more than one output. Thus,
// we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
2019-11-18 16:40:05 -05:00
let mut updated = self . is_resolving_htlc_output ( & tx , height ) ;
2019-01-06 15:14:43 -05:00
if updated . len ( ) > 0 {
htlc_updated . append ( & mut updated ) ;
2017-12-25 01:05:27 -05:00
}
2019-12-09 22:18:20 -05:00
// Scan all input to verify is one of the outpoint spent is of interest for us
2019-12-09 22:14:47 -05:00
let mut claimed_outputs_material = Vec ::new ( ) ;
2019-03-30 22:12:55 -04:00
for inp in & tx . input {
2019-12-10 15:38:04 -05:00
if let Some ( first_claim_txid_height ) = self . claimable_outpoints . get ( & inp . previous_output ) {
2019-12-09 22:18:20 -05:00
// If outpoint has claim request pending on it...
2019-12-10 15:38:04 -05:00
if let Some ( claim_material ) = self . pending_claim_requests . get_mut ( & first_claim_txid_height . 0 ) {
2019-12-09 22:18:20 -05:00
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
2019-12-09 22:14:47 -05:00
let mut set_equality = true ;
if claim_material . per_input_material . len ( ) ! = tx . input . len ( ) {
set_equality = false ;
} else {
for ( claim_inp , tx_inp ) in claim_material . per_input_material . keys ( ) . zip ( tx . input . iter ( ) ) {
if * claim_inp ! = tx_inp . previous_output {
set_equality = false ;
}
2019-12-09 22:18:20 -05:00
}
}
2019-12-09 22:14:47 -05:00
2019-12-10 15:45:30 -05:00
macro_rules ! clean_claim_request_after_safety_delay {
( ) = > {
2019-12-10 15:38:04 -05:00
let new_event = OnchainEvent ::Claim { claim_request : first_claim_txid_height . 0. clone ( ) } ;
2019-12-10 15:45:30 -05:00
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
hash_map ::Entry ::Occupied ( mut entry ) = > {
if ! entry . get ( ) . contains ( & new_event ) {
entry . get_mut ( ) . push ( new_event ) ;
}
} ,
hash_map ::Entry ::Vacant ( entry ) = > {
entry . insert ( vec! [ new_event ] ) ;
2019-12-09 22:17:31 -05:00
}
2019-12-09 22:18:20 -05:00
}
}
2019-12-10 15:45:30 -05:00
}
// If this is our transaction (or our counterparty spent all the outputs
// before we could anyway with same inputs order than us), wait for
// ANTI_REORG_DELAY and clean the RBF tracking map.
if set_equality {
clean_claim_request_after_safety_delay! ( ) ;
2019-12-09 22:18:20 -05:00
} else { // If false, generate new claim request with update outpoint set
2019-12-09 22:14:47 -05:00
for input in tx . input . iter ( ) {
if let Some ( input_material ) = claim_material . per_input_material . remove ( & input . previous_output ) {
claimed_outputs_material . push ( ( input . previous_output , input_material ) ) ;
2019-12-09 16:59:08 -05:00
}
2019-12-10 15:45:30 -05:00
// If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
if claim_material . per_input_material . is_empty ( ) {
clean_claim_request_after_safety_delay! ( ) ;
}
2019-07-02 15:52:58 -04:00
}
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
2019-12-10 15:38:04 -05:00
bump_candidates . insert ( first_claim_txid_height . 0. clone ( ) ) ;
2019-03-30 22:12:55 -04:00
}
2019-12-09 16:59:08 -05:00
break ; //No need to iterate further, either tx is our or their
2019-12-09 22:18:20 -05:00
} else {
panic! ( " Inconsistencies between pending_claim_requests map and claimable_outpoints map " ) ;
2019-03-30 22:12:55 -04:00
}
}
}
2019-12-09 22:14:47 -05:00
for ( outpoint , input_material ) in claimed_outputs_material . drain ( .. ) {
2019-12-09 22:17:31 -05:00
let new_event = OnchainEvent ::ContentiousOutpoint { outpoint , input_material } ;
2019-12-09 16:59:08 -05:00
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
2019-12-09 22:17:31 -05:00
hash_map ::Entry ::Occupied ( mut entry ) = > {
if ! entry . get ( ) . contains ( & new_event ) {
entry . get_mut ( ) . push ( new_event ) ;
}
} ,
2019-12-09 16:59:08 -05:00
hash_map ::Entry ::Vacant ( entry ) = > {
2019-12-09 22:17:31 -05:00
entry . insert ( vec! [ new_event ] ) ;
2019-12-09 16:59:08 -05:00
}
}
}
2017-12-25 01:05:27 -05:00
}
2019-12-13 01:58:08 -05:00
let should_broadcast = if let Some ( _ ) = self . current_local_signed_commitment_tx {
self . would_broadcast_at_height ( height )
} else { false } ;
if let Some ( ref mut cur_local_tx ) = self . current_local_signed_commitment_tx {
if should_broadcast {
match self . key_storage {
Storage ::Local { ref funding_key , .. } = > {
cur_local_tx . tx . add_local_sig ( funding_key , self . funding_redeemscript . as_ref ( ) . unwrap ( ) , self . channel_value_satoshis . unwrap ( ) , & self . secp_ctx ) ;
} ,
_ = > { }
}
}
}
2018-04-24 00:19:52 -04:00
if let Some ( ref cur_local_tx ) = self . current_local_signed_commitment_tx {
2019-12-13 01:58:08 -05:00
if should_broadcast {
log_trace! ( self , " Broadcast onchain {} " , log_tx! ( cur_local_tx . tx . with_valid_witness ( ) ) ) ;
broadcaster . broadcast_transaction ( & cur_local_tx . tx . with_valid_witness ( ) ) ;
2018-10-19 02:44:40 +00:00
match self . key_storage {
2019-12-13 14:56:57 -05:00
Storage ::Local { ref delayed_payment_base_key , .. } = > {
let ( txs , mut spendable_output , new_outputs , _ ) = self . broadcast_by_local_state ( & cur_local_tx , delayed_payment_base_key , height ) ;
2018-11-26 19:54:00 -05:00
spendable_outputs . append ( & mut spendable_output ) ;
if ! new_outputs . is_empty ( ) {
watch_outputs . push ( ( cur_local_tx . txid . clone ( ) , new_outputs ) ) ;
}
2018-10-19 02:44:40 +00:00
for tx in txs {
2019-11-22 17:44:30 -05:00
log_trace! ( self , " Broadcast onchain {} " , log_tx! ( tx ) ) ;
2018-10-19 02:44:40 +00:00
broadcaster . broadcast_transaction ( & tx ) ;
}
} ,
2019-12-13 14:56:57 -05:00
Storage ::Watchtower { .. } = > { } ,
2018-04-24 00:19:52 -04:00
}
}
}
2019-03-30 21:56:51 -04:00
if let Some ( events ) = self . onchain_events_waiting_threshold_conf . remove ( & height ) {
for ev in events {
match ev {
2019-12-09 22:18:20 -05:00
OnchainEvent ::Claim { claim_request } = > {
2019-12-09 22:51:36 -05:00
// We may remove a whole set of claim outpoints here, as these one may have
// been aggregated in a single tx and claimed so atomically
if let Some ( bump_material ) = self . pending_claim_requests . remove ( & claim_request ) {
for outpoint in bump_material . per_input_material . keys ( ) {
self . claimable_outpoints . remove ( & outpoint ) ;
}
}
2019-03-30 21:56:51 -04:00
} ,
OnchainEvent ::HTLCUpdate { htlc_update } = > {
log_trace! ( self , " HTLC {} failure update has got enough confirmations to be passed upstream " , log_bytes! ( ( htlc_update . 1 ) . 0 ) ) ;
htlc_updated . push ( ( htlc_update . 0 , None , htlc_update . 1 ) ) ;
} ,
2019-12-09 16:59:08 -05:00
OnchainEvent ::ContentiousOutpoint { outpoint , .. } = > {
self . claimable_outpoints . remove ( & outpoint ) ;
}
2019-03-30 21:56:51 -04:00
}
2019-05-30 20:54:02 -04:00
}
}
2019-12-10 15:38:04 -05:00
for ( first_claim_txid , ref mut cached_claim_datas ) in self . pending_claim_requests . iter_mut ( ) {
2019-07-02 15:52:58 -04:00
if cached_claim_datas . height_timer = = height {
2019-12-10 15:38:04 -05:00
bump_candidates . insert ( first_claim_txid . clone ( ) ) ;
2019-07-02 15:52:58 -04:00
}
}
2019-12-10 15:38:04 -05:00
for first_claim_txid in bump_candidates . iter ( ) {
if let Some ( ( new_timer , new_feerate ) ) = {
if let Some ( claim_material ) = self . pending_claim_requests . get ( first_claim_txid ) {
if let Some ( ( new_timer , new_feerate , bump_tx ) ) = self . bump_claim_tx ( height , & claim_material , fee_estimator ) {
broadcaster . broadcast_transaction ( & bump_tx ) ;
Some ( ( new_timer , new_feerate ) )
} else { None }
} else { unreachable! ( ) ; }
} {
if let Some ( claim_material ) = self . pending_claim_requests . get_mut ( first_claim_txid ) {
claim_material . height_timer = new_timer ;
claim_material . feerate_previous = new_feerate ;
} else { unreachable! ( ) ; }
2019-07-02 15:52:58 -04:00
}
}
2018-10-24 11:14:12 -04:00
self . last_block_hash = block_hash . clone ( ) ;
2018-12-10 23:56:02 -05:00
( watch_outputs , spendable_outputs , htlc_updated )
2018-04-24 00:19:52 -04:00
}
2019-12-09 16:59:08 -05:00
fn block_disconnected ( & mut self , height : u32 , block_hash : & Sha256dHash , broadcaster : & BroadcasterInterface , fee_estimator : & FeeEstimator ) {
2020-01-17 14:20:23 -05:00
log_trace! ( self , " Block {} at height {} disconnected " , block_hash , height ) ;
2019-12-09 16:59:08 -05:00
let mut bump_candidates = HashMap ::new ( ) ;
if let Some ( events ) = self . onchain_events_waiting_threshold_conf . remove ( & ( height + ANTI_REORG_DELAY - 1 ) ) {
2019-03-30 21:56:51 -04:00
//We may discard:
//- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
//- our claim tx on a commitment tx output
2019-12-09 16:59:08 -05:00
//- resurect outpoint back in its claimable set and regenerate tx
for ev in events {
match ev {
OnchainEvent ::ContentiousOutpoint { outpoint , input_material } = > {
if let Some ( ancestor_claimable_txid ) = self . claimable_outpoints . get ( & outpoint ) {
if let Some ( claim_material ) = self . pending_claim_requests . get_mut ( & ancestor_claimable_txid . 0 ) {
claim_material . per_input_material . insert ( outpoint , input_material ) ;
// Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast
bump_candidates . insert ( ancestor_claimable_txid . clone ( ) , claim_material . clone ( ) ) ;
}
}
} ,
_ = > { } ,
}
}
}
for ( _ , claim_material ) in bump_candidates . iter_mut ( ) {
if let Some ( ( new_timer , new_feerate , bump_tx ) ) = self . bump_claim_tx ( height , & claim_material , fee_estimator ) {
claim_material . height_timer = new_timer ;
claim_material . feerate_previous = new_feerate ;
broadcaster . broadcast_transaction ( & bump_tx ) ;
}
}
for ( ancestor_claim_txid , claim_material ) in bump_candidates . drain ( ) {
self . pending_claim_requests . insert ( ancestor_claim_txid . 0 , claim_material ) ;
}
//TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
// right now if one of the outpoint get disconnected, just erase whole pending claim request.
let mut remove_request = Vec ::new ( ) ;
self . claimable_outpoints . retain ( | _ , ref v |
if v . 1 = = height {
remove_request . push ( v . 0. clone ( ) ) ;
false
} else { true } ) ;
for req in remove_request {
self . pending_claim_requests . remove ( & req ) ;
2019-02-04 21:21:11 -05:00
}
self . last_block_hash = block_hash . clone ( ) ;
}
2018-09-19 17:37:51 -04:00
pub ( super ) fn would_broadcast_at_height ( & self , height : u32 ) -> bool {
2019-01-04 14:38:05 -05:00
// We need to consider all HTLCs which are:
// * in any unrevoked remote commitment transaction, as they could broadcast said
// transactions and we'd end up in a race, or
// * are in our latest local commitment transaction, as this is the thing we will
// broadcast if we go on-chain.
2019-01-04 14:37:48 -05:00
// Note that we consider HTLCs which were below dust threshold here - while they don't
2018-12-12 14:42:09 -05:00
// strictly imply that we need to fail the channel, we need to go ahead and fail them back
// to the source, and if we don't fail the channel we will have to ensure that the next
// updates that peer sends us are update_fails, failing the channel if not. It's probably
// easier to just fail the channel as this case should be rare enough anyway.
2019-01-04 14:38:05 -05:00
macro_rules ! scan_commitment {
( $htlcs : expr , $local_tx : expr ) = > {
for ref htlc in $htlcs {
// For inbound HTLCs which we know the preimage for, we have to ensure we hit the
// chain with enough room to claim the HTLC without our counterparty being able to
// time out the HTLC first.
// For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
// concern is being able to claim the corresponding inbound HTLC (on another
// channel) before it expires. In fact, we don't even really care if our
// counterparty here claims such an outbound HTLC after it expired as long as we
// can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
// we give ourselves a few blocks of headroom after expiration before going
// on-chain for an expired HTLC.
// Note that, to avoid a potential attack whereby a node delays claiming an HTLC
// from us until we've reached the point where we go on-chain with the
// corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
// least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
2019-07-18 18:50:03 -04:00
// aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
2019-01-04 14:38:05 -05:00
// inbound_cltv == height + CLTV_CLAIM_BUFFER
2019-07-18 18:50:03 -04:00
// outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
// LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
2019-01-11 16:31:30 -05:00
// CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
2019-07-18 18:50:03 -04:00
// LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
2019-01-11 16:31:30 -05:00
// The final, above, condition is checked for statically in channelmanager
// with CHECK_CLTV_EXPIRY_SANITY_2.
2019-01-04 14:38:05 -05:00
let htlc_outbound = $local_tx = = htlc . offered ;
2019-07-18 18:50:03 -04:00
if ( htlc_outbound & & htlc . cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS < = height ) | |
2019-01-04 14:38:05 -05:00
( ! htlc_outbound & & htlc . cltv_expiry < = height + CLTV_CLAIM_BUFFER & & self . payment_preimages . contains_key ( & htlc . payment_hash ) ) {
log_info! ( self , " Force-closing channel due to {} HTLC timeout, HTLC expiry is {} " , if htlc_outbound { " outbound " } else { " inbound " } , htlc . cltv_expiry ) ;
return true ;
}
}
}
}
2018-04-24 00:19:52 -04:00
if let Some ( ref cur_local_tx ) = self . current_local_signed_commitment_tx {
2019-01-04 14:38:05 -05:00
scan_commitment! ( cur_local_tx . htlc_outputs . iter ( ) . map ( | & ( ref a , _ , _ ) | a ) , true ) ;
}
if let Storage ::Local { ref current_remote_commitment_txid , ref prev_remote_commitment_txid , .. } = self . key_storage {
if let & Some ( ref txid ) = current_remote_commitment_txid {
if let Some ( ref htlc_outputs ) = self . remote_claimable_outpoints . get ( txid ) {
scan_commitment! ( htlc_outputs . iter ( ) . map ( | & ( ref a , _ ) | a ) , false ) ;
}
}
if let & Some ( ref txid ) = prev_remote_commitment_txid {
if let Some ( ref htlc_outputs ) = self . remote_claimable_outpoints . get ( txid ) {
scan_commitment! ( htlc_outputs . iter ( ) . map ( | & ( ref a , _ ) | a ) , false ) ;
2018-04-24 00:19:52 -04:00
}
}
}
2019-01-04 14:38:05 -05:00
2018-04-24 00:19:52 -04:00
false
2017-12-25 01:05:27 -05:00
}
2018-12-10 23:56:02 -05:00
/// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a local
/// or remote commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
2019-02-06 20:02:38 -05:00
fn is_resolving_htlc_output ( & mut self , tx : & Transaction , height : u32 ) -> Vec < ( HTLCSource , Option < PaymentPreimage > , PaymentHash ) > {
2018-12-10 23:56:02 -05:00
let mut htlc_updated = Vec ::new ( ) ;
' outer_loop : for input in & tx . input {
let mut payment_data = None ;
2020-01-14 13:47:01 -05:00
let revocation_sig_claim = ( input . witness . len ( ) = = 3 & & HTLCType ::scriptlen_to_htlctype ( input . witness [ 2 ] . len ( ) ) = = Some ( HTLCType ::OfferedHTLC ) & & input . witness [ 1 ] . len ( ) = = 33 )
| | ( input . witness . len ( ) = = 3 & & HTLCType ::scriptlen_to_htlctype ( input . witness [ 2 ] . len ( ) ) = = Some ( HTLCType ::AcceptedHTLC ) & & input . witness [ 1 ] . len ( ) = = 33 ) ;
let accepted_preimage_claim = input . witness . len ( ) = = 5 & & HTLCType ::scriptlen_to_htlctype ( input . witness [ 4 ] . len ( ) ) = = Some ( HTLCType ::AcceptedHTLC ) ;
let offered_preimage_claim = input . witness . len ( ) = = 3 & & HTLCType ::scriptlen_to_htlctype ( input . witness [ 2 ] . len ( ) ) = = Some ( HTLCType ::OfferedHTLC ) ;
2019-01-06 17:01:29 -05:00
macro_rules ! log_claim {
2019-01-04 14:37:48 -05:00
( $tx_info : expr , $local_tx : expr , $htlc : expr , $source_avail : expr ) = > {
2019-01-06 17:01:29 -05:00
// We found the output in question, but aren't failing it backwards
2019-03-14 20:57:23 -04:00
// as we have no corresponding source and no valid remote commitment txid
// to try a weak source binding with same-hash, same-value still-valid offered HTLC.
// This implies either it is an inbound HTLC or an outbound HTLC on a revoked transaction.
2019-01-04 14:37:48 -05:00
let outbound_htlc = $local_tx = = $htlc . offered ;
2019-01-06 17:01:29 -05:00
if ( $local_tx & & revocation_sig_claim ) | |
2019-01-04 14:37:48 -05:00
( outbound_htlc & & ! $source_avail & & ( accepted_preimage_claim | | offered_preimage_claim ) ) {
2019-01-06 17:01:29 -05:00
log_error! ( self , " Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}! " ,
2019-01-04 14:37:48 -05:00
$tx_info , input . previous_output . txid , input . previous_output . vout , tx . txid ( ) ,
if outbound_htlc { " outbound " } else { " inbound " } , log_bytes! ( $htlc . payment_hash . 0 ) ,
2019-01-06 17:01:29 -05:00
if revocation_sig_claim { " revocation sig " } else { " preimage claim after we'd passed the HTLC resolution back " } ) ;
} else {
log_info! ( self , " Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {} " ,
2019-01-04 14:37:48 -05:00
$tx_info , input . previous_output . txid , input . previous_output . vout , tx . txid ( ) ,
if outbound_htlc { " outbound " } else { " inbound " } , log_bytes! ( $htlc . payment_hash . 0 ) ,
2019-01-06 17:01:29 -05:00
if revocation_sig_claim { " revocation sig " } else if accepted_preimage_claim | | offered_preimage_claim { " preimage " } else { " timeout " } ) ;
}
}
}
2018-12-10 23:56:02 -05:00
2019-03-14 20:57:23 -04:00
macro_rules ! check_htlc_valid_remote {
( $remote_txid : expr , $htlc_output : expr ) = > {
if let & Some ( txid ) = $remote_txid {
for & ( ref pending_htlc , ref pending_source ) in self . remote_claimable_outpoints . get ( & txid ) . unwrap ( ) {
if pending_htlc . payment_hash = = $htlc_output . payment_hash & & pending_htlc . amount_msat = = $htlc_output . amount_msat {
if let & Some ( ref source ) = pending_source {
log_claim! ( " revoked remote commitment tx " , false , pending_htlc , true ) ;
payment_data = Some ( ( ( * * source ) . clone ( ) , $htlc_output . payment_hash ) ) ;
break ;
}
}
}
}
}
}
2018-12-10 23:56:02 -05:00
macro_rules ! scan_commitment {
2019-01-04 14:37:48 -05:00
( $htlcs : expr , $tx_info : expr , $local_tx : expr ) = > {
for ( ref htlc_output , source_option ) in $htlcs {
if Some ( input . previous_output . vout ) = = htlc_output . transaction_output_index {
if let Some ( ref source ) = source_option {
log_claim! ( $tx_info , $local_tx , htlc_output , true ) ;
// We have a resolution of an HTLC either from one of our latest
// local commitment transactions or an unrevoked remote commitment
// transaction. This implies we either learned a preimage, the HTLC
// has timed out, or we screwed up. In any case, we should now
// resolve the source HTLC with the original sender.
payment_data = Some ( ( ( * source ) . clone ( ) , htlc_output . payment_hash ) ) ;
2019-03-14 20:57:23 -04:00
} else if ! $local_tx {
if let Storage ::Local { ref current_remote_commitment_txid , .. } = self . key_storage {
check_htlc_valid_remote! ( current_remote_commitment_txid , htlc_output ) ;
}
if payment_data . is_none ( ) {
if let Storage ::Local { ref prev_remote_commitment_txid , .. } = self . key_storage {
check_htlc_valid_remote! ( prev_remote_commitment_txid , htlc_output ) ;
}
}
}
if payment_data . is_none ( ) {
2019-01-04 14:37:48 -05:00
log_claim! ( $tx_info , $local_tx , htlc_output , false ) ;
2018-12-30 14:21:58 -05:00
continue 'outer_loop ;
2018-12-10 23:56:02 -05:00
}
}
}
}
}
if let Some ( ref current_local_signed_commitment_tx ) = self . current_local_signed_commitment_tx {
if input . previous_output . txid = = current_local_signed_commitment_tx . txid {
2019-01-04 14:37:48 -05:00
scan_commitment! ( current_local_signed_commitment_tx . htlc_outputs . iter ( ) . map ( | & ( ref a , _ , ref b ) | ( a , b . as_ref ( ) ) ) ,
2019-01-06 17:01:29 -05:00
" our latest local commitment tx " , true ) ;
2018-12-10 23:56:02 -05:00
}
}
if let Some ( ref prev_local_signed_commitment_tx ) = self . prev_local_signed_commitment_tx {
if input . previous_output . txid = = prev_local_signed_commitment_tx . txid {
2019-01-04 14:37:48 -05:00
scan_commitment! ( prev_local_signed_commitment_tx . htlc_outputs . iter ( ) . map ( | & ( ref a , _ , ref b ) | ( a , b . as_ref ( ) ) ) ,
2019-01-06 17:01:29 -05:00
" our previous local commitment tx " , true ) ;
2018-12-10 23:56:02 -05:00
}
}
2019-01-04 14:37:48 -05:00
if let Some ( ref htlc_outputs ) = self . remote_claimable_outpoints . get ( & input . previous_output . txid ) {
scan_commitment! ( htlc_outputs . iter ( ) . map ( | & ( ref a , ref b ) | ( a , ( b . as_ref ( ) . clone ( ) ) . map ( | boxed | & * * boxed ) ) ) ,
" remote commitment tx " , false ) ;
2018-12-10 23:56:02 -05:00
}
2019-01-06 17:01:29 -05:00
// Check that scan_commitment, above, decided there is some source worth relaying an
// HTLC resolution backwards to and figure out whether we learned a preimage from it.
2018-12-10 23:56:02 -05:00
if let Some ( ( source , payment_hash ) ) = payment_data {
2018-11-22 21:18:16 -05:00
let mut payment_preimage = PaymentPreimage ( [ 0 ; 32 ] ) ;
2019-01-06 17:01:29 -05:00
if accepted_preimage_claim {
2019-01-06 15:14:43 -05:00
payment_preimage . 0. copy_from_slice ( & input . witness [ 3 ] ) ;
2018-12-10 23:56:02 -05:00
htlc_updated . push ( ( source , Some ( payment_preimage ) , payment_hash ) ) ;
2019-01-06 17:01:29 -05:00
} else if offered_preimage_claim {
2019-01-06 15:14:43 -05:00
payment_preimage . 0. copy_from_slice ( & input . witness [ 1 ] ) ;
2018-12-10 23:56:02 -05:00
htlc_updated . push ( ( source , Some ( payment_preimage ) , payment_hash ) ) ;
} else {
2019-07-18 18:50:03 -04:00
log_info! ( self , " Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{}) " , log_bytes! ( payment_hash . 0 ) , height + ANTI_REORG_DELAY - 1 ) ;
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
2019-02-06 20:02:38 -05:00
hash_map ::Entry ::Occupied ( mut entry ) = > {
let e = entry . get_mut ( ) ;
2019-03-30 21:56:51 -04:00
e . retain ( | ref event | {
match * * event {
OnchainEvent ::HTLCUpdate { ref htlc_update } = > {
return htlc_update . 0 ! = source
} ,
_ = > return true
}
} ) ;
e . push ( OnchainEvent ::HTLCUpdate { htlc_update : ( source , payment_hash ) } ) ;
2019-02-06 20:02:38 -05:00
}
hash_map ::Entry ::Vacant ( entry ) = > {
2019-03-30 21:56:51 -04:00
entry . insert ( vec! [ OnchainEvent ::HTLCUpdate { htlc_update : ( source , payment_hash ) } ] ) ;
2019-02-06 20:02:38 -05:00
}
}
2018-12-10 23:56:02 -05:00
}
}
}
htlc_updated
}
2019-07-02 15:52:58 -04:00
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
/// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
fn bump_claim_tx ( & self , height : u32 , cached_claim_datas : & ClaimTxBumpMaterial , fee_estimator : & FeeEstimator ) -> Option < ( u32 , u64 , Transaction ) > {
if cached_claim_datas . per_input_material . len ( ) = = 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
let mut inputs = Vec ::new ( ) ;
for outp in cached_claim_datas . per_input_material . keys ( ) {
inputs . push ( TxIn {
previous_output : * outp ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
} ) ;
}
let mut bumped_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : inputs ,
output : vec ! [ TxOut {
script_pubkey : self . destination_script . clone ( ) ,
value : 0
} ] ,
} ;
macro_rules ! RBF_bump {
( $amount : expr , $old_feerate : expr , $fee_estimator : expr , $predicted_weight : expr ) = > {
{
let mut used_feerate ;
// If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
let new_fee = if $old_feerate < $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::HighPriority ) {
let mut value = $amount ;
if subtract_high_prio_fee! ( self , $fee_estimator , value , $predicted_weight , used_feerate ) {
// Overflow check is done in subtract_high_prio_fee
$amount - value
} else {
log_trace! ( self , " Can't new-estimation bump new claiming tx, amount {} is too small " , $amount ) ;
return None ;
}
// ...else just increase the previous feerate by 25% (because that's a nice number)
} else {
let fee = $old_feerate * $predicted_weight / 750 ;
if $amount < = fee {
log_trace! ( self , " Can't 25% bump new claiming tx, amount {} is too small " , $amount ) ;
return None ;
}
fee
} ;
let previous_fee = $old_feerate * $predicted_weight / 1000 ;
let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * $predicted_weight / 1000 ;
// BIP 125 Opt-in Full Replace-by-Fee Signaling
// * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
// * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
let new_fee = if new_fee < previous_fee + min_relay_fee {
new_fee + previous_fee + min_relay_fee - new_fee
} else {
new_fee
} ;
Some ( ( new_fee , new_fee * 1000 / $predicted_weight ) )
}
}
}
let new_timer = Self ::get_height_timer ( height , cached_claim_datas . soonest_timelock ) ;
let mut inputs_witnesses_weight = 0 ;
let mut amt = 0 ;
for per_outp_material in cached_claim_datas . per_input_material . values ( ) {
match per_outp_material {
& InputMaterial ::Revoked { ref script , ref is_htlc , ref amount , .. } = > {
2020-01-14 13:47:01 -05:00
log_trace! ( self , " Is HLTC ? {} " , is_htlc ) ;
inputs_witnesses_weight + = Self ::get_witnesses_weight ( if ! is_htlc { & [ InputDescriptors ::RevokedOutput ] } else if HTLCType ::scriptlen_to_htlctype ( script . len ( ) ) = = Some ( HTLCType ::OfferedHTLC ) { & [ InputDescriptors ::RevokedOfferedHTLC ] } else if HTLCType ::scriptlen_to_htlctype ( script . len ( ) ) = = Some ( HTLCType ::AcceptedHTLC ) { & [ InputDescriptors ::RevokedReceivedHTLC ] } else { unreachable! ( ) } ) ;
2019-07-02 15:52:58 -04:00
amt + = * amount ;
} ,
2019-07-03 10:26:17 -04:00
& InputMaterial ::RemoteHTLC { ref preimage , ref amount , .. } = > {
inputs_witnesses_weight + = Self ::get_witnesses_weight ( if preimage . is_some ( ) { & [ InputDescriptors ::OfferedHTLC ] } else { & [ InputDescriptors ::ReceivedHTLC ] } ) ;
amt + = * amount ;
} ,
2019-07-02 15:52:58 -04:00
& InputMaterial ::LocalHTLC { .. } = > { return None ; }
}
}
let predicted_weight = bumped_tx . get_weight ( ) + inputs_witnesses_weight ;
let new_feerate ;
if let Some ( ( new_fee , feerate ) ) = RBF_bump! ( amt , cached_claim_datas . feerate_previous , fee_estimator , predicted_weight as u64 ) {
// If new computed fee is superior at the whole claimable amount burn all in fees
if new_fee > amt {
bumped_tx . output [ 0 ] . value = 0 ;
} else {
bumped_tx . output [ 0 ] . value = amt - new_fee ;
}
new_feerate = feerate ;
} else {
return None ;
}
assert! ( new_feerate ! = 0 ) ;
for ( i , ( outp , per_outp_material ) ) in cached_claim_datas . per_input_material . iter ( ) . enumerate ( ) {
match per_outp_material {
& InputMaterial ::Revoked { ref script , ref pubkey , ref key , ref is_htlc , ref amount } = > {
let sighash_parts = bip143 ::SighashComponents ::new ( & bumped_tx ) ;
let sighash = hash_to_message! ( & sighash_parts . sighash_all ( & bumped_tx . input [ i ] , & script , * amount ) [ .. ] ) ;
let sig = self . secp_ctx . sign ( & sighash , & key ) ;
bumped_tx . input [ i ] . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
bumped_tx . input [ i ] . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
if * is_htlc {
bumped_tx . input [ i ] . witness . push ( pubkey . unwrap ( ) . clone ( ) . serialize ( ) . to_vec ( ) ) ;
} else {
bumped_tx . input [ i ] . witness . push ( vec! ( 1 ) ) ;
}
bumped_tx . input [ i ] . witness . push ( script . clone ( ) . into_bytes ( ) ) ;
2020-01-14 13:47:01 -05:00
log_trace! ( self , " Going to broadcast bumped Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {} " , bumped_tx . txid ( ) , if ! is_htlc { " to_local " } else if HTLCType ::scriptlen_to_htlctype ( script . len ( ) ) = = Some ( HTLCType ::OfferedHTLC ) { " offered " } else if HTLCType ::scriptlen_to_htlctype ( script . len ( ) ) = = Some ( HTLCType ::AcceptedHTLC ) { " received " } else { " " } , outp . vout , outp . txid , new_feerate ) ;
2019-07-02 15:52:58 -04:00
} ,
2019-07-03 10:26:17 -04:00
& InputMaterial ::RemoteHTLC { ref script , ref key , ref preimage , ref amount , ref locktime } = > {
if ! preimage . is_some ( ) { bumped_tx . lock_time = * locktime } ;
let sighash_parts = bip143 ::SighashComponents ::new ( & bumped_tx ) ;
let sighash = hash_to_message! ( & sighash_parts . sighash_all ( & bumped_tx . input [ i ] , & script , * amount ) [ .. ] ) ;
let sig = self . secp_ctx . sign ( & sighash , & key ) ;
bumped_tx . input [ i ] . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
bumped_tx . input [ i ] . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
if let & Some ( preimage ) = preimage {
bumped_tx . input [ i ] . witness . push ( preimage . clone ( ) . 0. to_vec ( ) ) ;
} else {
bumped_tx . input [ i ] . witness . push ( vec! [ 0 ] ) ;
}
bumped_tx . input [ i ] . witness . push ( script . clone ( ) . into_bytes ( ) ) ;
log_trace! ( self , " Going to broadcast bumped Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {} " , bumped_tx . txid ( ) , if preimage . is_some ( ) { " offered " } else { " received " } , outp . vout , outp . txid , new_feerate ) ;
} ,
2019-07-02 15:52:58 -04:00
& InputMaterial ::LocalHTLC { .. } = > {
//TODO : Given that Local Commitment Transaction and HTLC-Timeout/HTLC-Success are counter-signed by peer, we can't
// RBF them. Need a Lightning specs change and package relay modification :
// https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
return None ;
}
}
}
assert! ( predicted_weight > = bumped_tx . get_weight ( ) ) ;
Some ( ( new_timer , new_feerate , bumped_tx ) )
}
2017-12-25 01:05:27 -05:00
}
2018-10-24 10:34:16 -04:00
const MAX_ALLOC_SIZE : usize = 64 * 1024 ;
2018-10-24 11:14:12 -04:00
impl < R : ::std ::io ::Read > ReadableArgs < R , Arc < Logger > > for ( Sha256dHash , ChannelMonitor ) {
2018-10-18 15:00:12 -04:00
fn read ( reader : & mut R , logger : Arc < Logger > ) -> Result < Self , DecodeError > {
2018-09-19 13:17:16 -04:00
let secp_ctx = Secp256k1 ::new ( ) ;
macro_rules ! unwrap_obj {
( $key : expr ) = > {
match $key {
Ok ( res ) = > res ,
Err ( _ ) = > return Err ( DecodeError ::InvalidValue ) ,
}
}
}
2018-10-24 10:34:16 -04:00
let _ver : u8 = Readable ::read ( reader ) ? ;
let min_ver : u8 = Readable ::read ( reader ) ? ;
2018-09-19 13:17:16 -04:00
if min_ver > SERIALIZATION_VERSION {
return Err ( DecodeError ::UnknownVersion ) ;
}
2018-10-24 10:34:16 -04:00
let commitment_transaction_number_obscure_factor = < U48 as Readable < R > > ::read ( reader ) ? . 0 ;
2018-09-19 13:17:16 -04:00
2018-10-24 10:34:16 -04:00
let key_storage = match < u8 as Readable < R > > ::read ( reader ) ? {
2018-09-19 13:17:16 -04:00
0 = > {
2019-12-13 01:58:08 -05:00
let funding_key = Readable ::read ( reader ) ? ;
2018-10-24 10:34:16 -04:00
let revocation_base_key = Readable ::read ( reader ) ? ;
let htlc_base_key = Readable ::read ( reader ) ? ;
let delayed_payment_base_key = Readable ::read ( reader ) ? ;
2018-11-01 03:22:56 +00:00
let payment_base_key = Readable ::read ( reader ) ? ;
2018-11-16 20:52:33 -05:00
let shutdown_pubkey = Readable ::read ( reader ) ? ;
2018-11-26 19:50:16 -05:00
// Technically this can fail and serialize fail a round-trip, but only for serialization of
// barely-init'd ChannelMonitors that we can't do anything with.
let outpoint = OutPoint {
txid : Readable ::read ( reader ) ? ,
index : Readable ::read ( reader ) ? ,
} ;
let funding_info = Some ( ( outpoint , Readable ::read ( reader ) ? ) ) ;
2019-01-23 11:26:32 -05:00
let current_remote_commitment_txid = Readable ::read ( reader ) ? ;
let prev_remote_commitment_txid = Readable ::read ( reader ) ? ;
2018-11-26 19:50:16 -05:00
Storage ::Local {
2019-12-13 01:58:08 -05:00
funding_key ,
2018-10-19 02:44:40 +00:00
revocation_base_key ,
htlc_base_key ,
delayed_payment_base_key ,
2018-11-01 03:22:56 +00:00
payment_base_key ,
2018-11-16 20:52:33 -05:00
shutdown_pubkey ,
2018-11-26 19:50:16 -05:00
funding_info ,
2018-12-10 23:56:34 -05:00
current_remote_commitment_txid ,
prev_remote_commitment_txid ,
2018-09-19 13:17:16 -04:00
}
} ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2018-10-24 10:34:16 -04:00
let their_htlc_base_key = Some ( Readable ::read ( reader ) ? ) ;
let their_delayed_payment_base_key = Some ( Readable ::read ( reader ) ? ) ;
2019-12-12 14:56:28 -05:00
let funding_redeemscript = Some ( Readable ::read ( reader ) ? ) ;
let channel_value_satoshis = Some ( Readable ::read ( reader ) ? ) ;
2018-09-19 13:17:16 -04:00
let their_cur_revocation_points = {
2018-10-24 10:34:16 -04:00
let first_idx = < U48 as Readable < R > > ::read ( reader ) ? . 0 ;
2018-09-19 13:17:16 -04:00
if first_idx = = 0 {
None
} else {
2018-10-24 10:34:16 -04:00
let first_point = Readable ::read ( reader ) ? ;
let second_point_slice : [ u8 ; 33 ] = Readable ::read ( reader ) ? ;
2018-09-19 13:17:16 -04:00
if second_point_slice [ 0 .. 32 ] = = [ 0 ; 32 ] & & second_point_slice [ 32 ] = = 0 {
Some ( ( first_idx , first_point , None ) )
} else {
2019-01-16 15:45:05 -05:00
Some ( ( first_idx , first_point , Some ( unwrap_obj! ( PublicKey ::from_slice ( & second_point_slice ) ) ) ) )
2018-09-19 13:17:16 -04:00
}
}
} ;
2018-10-24 10:34:16 -04:00
let our_to_self_delay : u16 = Readable ::read ( reader ) ? ;
let their_to_self_delay : Option < u16 > = Some ( Readable ::read ( reader ) ? ) ;
2018-09-19 13:17:16 -04:00
let mut old_secrets = [ ( [ 0 ; 32 ] , 1 < < 48 ) ; 49 ] ;
for & mut ( ref mut secret , ref mut idx ) in old_secrets . iter_mut ( ) {
2018-10-24 10:34:16 -04:00
* secret = Readable ::read ( reader ) ? ;
* idx = Readable ::read ( reader ) ? ;
2018-09-19 13:17:16 -04:00
}
macro_rules ! read_htlc_in_commitment {
( ) = > {
{
2018-10-24 10:34:16 -04:00
let offered : bool = Readable ::read ( reader ) ? ;
let amount_msat : u64 = Readable ::read ( reader ) ? ;
let cltv_expiry : u32 = Readable ::read ( reader ) ? ;
2018-11-22 21:18:16 -05:00
let payment_hash : PaymentHash = Readable ::read ( reader ) ? ;
2019-01-23 11:26:32 -05:00
let transaction_output_index : Option < u32 > = Readable ::read ( reader ) ? ;
2018-09-19 13:17:16 -04:00
HTLCOutputInCommitment {
offered , amount_msat , cltv_expiry , payment_hash , transaction_output_index
}
}
}
}
2018-10-24 10:34:16 -04:00
let remote_claimable_outpoints_len : u64 = Readable ::read ( reader ) ? ;
let mut remote_claimable_outpoints = HashMap ::with_capacity ( cmp ::min ( remote_claimable_outpoints_len as usize , MAX_ALLOC_SIZE / 64 ) ) ;
2018-09-19 13:17:16 -04:00
for _ in 0 .. remote_claimable_outpoints_len {
2018-10-24 10:34:16 -04:00
let txid : Sha256dHash = Readable ::read ( reader ) ? ;
2019-01-04 14:37:48 -05:00
let htlcs_count : u64 = Readable ::read ( reader ) ? ;
let mut htlcs = Vec ::with_capacity ( cmp ::min ( htlcs_count as usize , MAX_ALLOC_SIZE / 32 ) ) ;
for _ in 0 .. htlcs_count {
2019-01-23 11:26:32 -05:00
htlcs . push ( ( read_htlc_in_commitment! ( ) , < Option < HTLCSource > as Readable < R > > ::read ( reader ) ? . map ( | o : HTLCSource | Box ::new ( o ) ) ) ) ;
2018-11-30 10:58:44 -05:00
}
2019-01-04 14:37:48 -05:00
if let Some ( _ ) = remote_claimable_outpoints . insert ( txid , htlcs ) {
2018-09-19 13:17:16 -04:00
return Err ( DecodeError ::InvalidValue ) ;
}
}
2018-10-24 10:34:16 -04:00
let remote_commitment_txn_on_chain_len : u64 = Readable ::read ( reader ) ? ;
let mut remote_commitment_txn_on_chain = HashMap ::with_capacity ( cmp ::min ( remote_commitment_txn_on_chain_len as usize , MAX_ALLOC_SIZE / 32 ) ) ;
2018-09-19 13:17:16 -04:00
for _ in 0 .. remote_commitment_txn_on_chain_len {
2018-10-24 10:34:16 -04:00
let txid : Sha256dHash = Readable ::read ( reader ) ? ;
let commitment_number = < U48 as Readable < R > > ::read ( reader ) ? . 0 ;
2018-10-24 13:59:03 -04:00
let outputs_count = < u64 as Readable < R > > ::read ( reader ) ? ;
let mut outputs = Vec ::with_capacity ( cmp ::min ( outputs_count as usize , MAX_ALLOC_SIZE / 8 ) ) ;
for _ in 0 .. outputs_count {
outputs . push ( Readable ::read ( reader ) ? ) ;
}
if let Some ( _ ) = remote_commitment_txn_on_chain . insert ( txid , ( commitment_number , outputs ) ) {
2018-09-19 13:17:16 -04:00
return Err ( DecodeError ::InvalidValue ) ;
}
}
2018-10-24 10:34:16 -04:00
let remote_hash_commitment_number_len : u64 = Readable ::read ( reader ) ? ;
let mut remote_hash_commitment_number = HashMap ::with_capacity ( cmp ::min ( remote_hash_commitment_number_len as usize , MAX_ALLOC_SIZE / 32 ) ) ;
2018-09-19 13:17:16 -04:00
for _ in 0 .. remote_hash_commitment_number_len {
2018-11-22 21:18:16 -05:00
let payment_hash : PaymentHash = Readable ::read ( reader ) ? ;
2018-10-24 10:34:16 -04:00
let commitment_number = < U48 as Readable < R > > ::read ( reader ) ? . 0 ;
2018-11-22 21:18:16 -05:00
if let Some ( _ ) = remote_hash_commitment_number . insert ( payment_hash , commitment_number ) {
2018-09-19 13:17:16 -04:00
return Err ( DecodeError ::InvalidValue ) ;
}
}
macro_rules ! read_local_tx {
( ) = > {
{
2019-12-13 01:58:08 -05:00
let tx = < LocalCommitmentTransaction as Readable < R > > ::read ( reader ) ? ;
2018-10-24 10:34:16 -04:00
let revocation_key = Readable ::read ( reader ) ? ;
let a_htlc_key = Readable ::read ( reader ) ? ;
let b_htlc_key = Readable ::read ( reader ) ? ;
let delayed_payment_key = Readable ::read ( reader ) ? ;
2019-12-13 14:56:57 -05:00
let per_commitment_point = Readable ::read ( reader ) ? ;
2018-10-24 10:34:16 -04:00
let feerate_per_kw : u64 = Readable ::read ( reader ) ? ;
2018-09-19 13:17:16 -04:00
2019-01-04 14:37:48 -05:00
let htlcs_len : u64 = Readable ::read ( reader ) ? ;
let mut htlcs = Vec ::with_capacity ( cmp ::min ( htlcs_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. htlcs_len {
let htlc = read_htlc_in_commitment! ( ) ;
let sigs = match < u8 as Readable < R > > ::read ( reader ) ? {
0 = > None ,
2019-12-13 14:56:57 -05:00
1 = > Some ( Readable ::read ( reader ) ? ) ,
2019-01-04 14:37:48 -05:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2019-01-23 11:26:32 -05:00
htlcs . push ( ( htlc , sigs , Readable ::read ( reader ) ? ) ) ;
2018-09-19 13:17:16 -04:00
}
LocalSignedTx {
txid : tx . txid ( ) ,
2019-12-13 14:56:57 -05:00
tx , revocation_key , a_htlc_key , b_htlc_key , delayed_payment_key , per_commitment_point , feerate_per_kw ,
2019-01-04 14:37:48 -05:00
htlc_outputs : htlcs
2018-09-19 13:17:16 -04:00
}
}
}
}
2018-10-24 10:34:16 -04:00
let prev_local_signed_commitment_tx = match < u8 as Readable < R > > ::read ( reader ) ? {
2018-09-19 13:17:16 -04:00
0 = > None ,
1 = > {
Some ( read_local_tx! ( ) )
} ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2018-10-24 10:34:16 -04:00
let current_local_signed_commitment_tx = match < u8 as Readable < R > > ::read ( reader ) ? {
2018-09-19 13:17:16 -04:00
0 = > None ,
1 = > {
Some ( read_local_tx! ( ) )
} ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2018-10-25 12:56:02 -04:00
let current_remote_commitment_number = < U48 as Readable < R > > ::read ( reader ) ? . 0 ;
2018-10-24 10:34:16 -04:00
let payment_preimages_len : u64 = Readable ::read ( reader ) ? ;
let mut payment_preimages = HashMap ::with_capacity ( cmp ::min ( payment_preimages_len as usize , MAX_ALLOC_SIZE / 32 ) ) ;
2018-09-19 13:17:16 -04:00
for _ in 0 .. payment_preimages_len {
2018-11-22 21:18:16 -05:00
let preimage : PaymentPreimage = Readable ::read ( reader ) ? ;
2018-12-17 23:58:02 -05:00
let hash = PaymentHash ( Sha256 ::hash ( & preimage . 0 [ .. ] ) . into_inner ( ) ) ;
2018-09-19 13:17:16 -04:00
if let Some ( _ ) = payment_preimages . insert ( hash , preimage ) {
return Err ( DecodeError ::InvalidValue ) ;
}
}
2018-10-24 11:14:12 -04:00
let last_block_hash : Sha256dHash = Readable ::read ( reader ) ? ;
2018-10-24 10:34:16 -04:00
let destination_script = Readable ::read ( reader ) ? ;
2019-08-02 16:29:12 -04:00
let to_remote_rescue = match < u8 as Readable < R > > ::read ( reader ) ? {
0 = > None ,
1 = > {
let to_remote_script = Readable ::read ( reader ) ? ;
let local_key = Readable ::read ( reader ) ? ;
Some ( ( to_remote_script , local_key ) )
}
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2018-09-19 13:17:16 -04:00
2019-12-09 22:18:20 -05:00
let pending_claim_requests_len : u64 = Readable ::read ( reader ) ? ;
let mut pending_claim_requests = HashMap ::with_capacity ( cmp ::min ( pending_claim_requests_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. pending_claim_requests_len {
pending_claim_requests . insert ( Readable ::read ( reader ) ? , Readable ::read ( reader ) ? ) ;
}
let claimable_outpoints_len : u64 = Readable ::read ( reader ) ? ;
let mut claimable_outpoints = HashMap ::with_capacity ( cmp ::min ( pending_claim_requests_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. claimable_outpoints_len {
2019-03-30 22:12:55 -04:00
let outpoint = Readable ::read ( reader ) ? ;
2019-12-09 22:18:20 -05:00
let ancestor_claim_txid = Readable ::read ( reader ) ? ;
2019-07-18 17:27:48 -04:00
let height = Readable ::read ( reader ) ? ;
2019-12-09 22:18:20 -05:00
claimable_outpoints . insert ( outpoint , ( ancestor_claim_txid , height ) ) ;
2019-03-30 22:12:55 -04:00
}
2019-05-30 20:54:02 -04:00
let waiting_threshold_conf_len : u64 = Readable ::read ( reader ) ? ;
2019-03-30 21:56:51 -04:00
let mut onchain_events_waiting_threshold_conf = HashMap ::with_capacity ( cmp ::min ( waiting_threshold_conf_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
2019-05-30 20:54:02 -04:00
for _ in 0 .. waiting_threshold_conf_len {
let height_target = Readable ::read ( reader ) ? ;
2019-03-30 21:56:51 -04:00
let events_len : u64 = Readable ::read ( reader ) ? ;
let mut events = Vec ::with_capacity ( cmp ::min ( events_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. events_len {
let ev = match < u8 as Readable < R > > ::read ( reader ) ? {
0 = > {
2019-12-09 22:18:20 -05:00
let claim_request = Readable ::read ( reader ) ? ;
2019-03-30 21:56:51 -04:00
OnchainEvent ::Claim {
2019-12-09 22:18:20 -05:00
claim_request
2019-03-30 21:56:51 -04:00
}
} ,
1 = > {
let htlc_source = Readable ::read ( reader ) ? ;
let hash = Readable ::read ( reader ) ? ;
OnchainEvent ::HTLCUpdate {
htlc_update : ( htlc_source , hash )
}
} ,
2019-12-09 16:59:08 -05:00
2 = > {
let outpoint = Readable ::read ( reader ) ? ;
let input_material = Readable ::read ( reader ) ? ;
OnchainEvent ::ContentiousOutpoint {
outpoint ,
input_material
}
}
2019-03-30 21:56:51 -04:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
events . push ( ev ) ;
2019-05-30 20:54:02 -04:00
}
2019-03-30 21:56:51 -04:00
onchain_events_waiting_threshold_conf . insert ( height_target , events ) ;
2019-05-30 20:54:02 -04:00
}
2018-10-24 11:14:12 -04:00
Ok ( ( last_block_hash . clone ( ) , ChannelMonitor {
2018-09-19 13:17:16 -04:00
commitment_transaction_number_obscure_factor ,
key_storage ,
their_htlc_base_key ,
their_delayed_payment_base_key ,
2019-12-12 14:56:28 -05:00
funding_redeemscript ,
channel_value_satoshis ,
2018-09-19 13:17:16 -04:00
their_cur_revocation_points ,
our_to_self_delay ,
their_to_self_delay ,
old_secrets ,
remote_claimable_outpoints ,
2018-10-24 13:59:03 -04:00
remote_commitment_txn_on_chain ,
2018-09-19 13:17:16 -04:00
remote_hash_commitment_number ,
prev_local_signed_commitment_tx ,
current_local_signed_commitment_tx ,
2018-10-25 12:56:02 -04:00
current_remote_commitment_number ,
2018-09-19 13:17:16 -04:00
payment_preimages ,
destination_script ,
2019-08-02 16:29:12 -04:00
to_remote_rescue ,
2019-05-30 20:54:02 -04:00
2019-12-09 22:18:20 -05:00
pending_claim_requests ,
claimable_outpoints ,
2019-03-30 22:12:55 -04:00
2019-03-30 21:56:51 -04:00
onchain_events_waiting_threshold_conf ,
2019-05-30 20:54:02 -04:00
2018-10-24 11:14:12 -04:00
last_block_hash ,
2018-09-19 13:17:16 -04:00
secp_ctx ,
2018-10-18 15:00:12 -04:00
logger ,
2018-10-24 11:14:12 -04:00
} ) )
2018-09-19 13:17:16 -04:00
}
}
2017-12-25 01:05:27 -05:00
#[ cfg(test) ]
mod tests {
2019-04-10 18:56:22 -04:00
use bitcoin ::blockdata ::script ::{ Script , Builder } ;
use bitcoin ::blockdata ::opcodes ;
use bitcoin ::blockdata ::transaction ::{ Transaction , TxIn , TxOut , SigHashType } ;
use bitcoin ::blockdata ::transaction ::OutPoint as BitcoinOutPoint ;
use bitcoin ::util ::bip143 ;
2018-12-17 23:58:02 -05:00
use bitcoin_hashes ::Hash ;
use bitcoin_hashes ::sha256 ::Hash as Sha256 ;
2019-04-10 18:56:22 -04:00
use bitcoin_hashes ::sha256d ::Hash as Sha256dHash ;
use bitcoin_hashes ::hex ::FromHex ;
2018-07-27 17:06:14 -07:00
use hex ;
2018-11-22 21:18:16 -05:00
use ln ::channelmanager ::{ PaymentPreimage , PaymentHash } ;
2019-04-10 18:56:22 -04:00
use ln ::channelmonitor ::{ ChannelMonitor , InputDescriptors } ;
use ln ::chan_utils ;
2019-12-13 01:58:08 -05:00
use ln ::chan_utils ::{ HTLCOutputInCommitment , TxCreationKeys , LocalCommitmentTransaction } ;
2018-10-18 15:00:12 -04:00
use util ::test_utils ::TestLogger ;
2017-12-25 01:05:27 -05:00
use secp256k1 ::key ::{ SecretKey , PublicKey } ;
2019-01-04 14:37:48 -05:00
use secp256k1 ::Secp256k1 ;
2018-06-09 01:18:00 +02:00
use rand ::{ thread_rng , Rng } ;
2018-10-18 15:00:12 -04:00
use std ::sync ::Arc ;
2017-12-25 01:05:27 -05:00
#[ test ]
fn test_per_commitment_storage ( ) {
// Test vectors from BOLT 3:
let mut secrets : Vec < [ u8 ; 32 ] > = Vec ::new ( ) ;
let mut monitor : ChannelMonitor ;
let secp_ctx = Secp256k1 ::new ( ) ;
2018-10-18 15:00:12 -04:00
let logger = Arc ::new ( TestLogger ::new ( ) ) ;
2017-12-25 01:05:27 -05:00
macro_rules ! test_secrets {
( ) = > {
let mut idx = 281474976710655 ;
for secret in secrets . iter ( ) {
assert_eq! ( monitor . get_secret ( idx ) . unwrap ( ) , * secret ) ;
idx - = 1 ;
}
assert_eq! ( monitor . get_min_seen_secret ( ) , idx + 1 ) ;
2018-11-15 07:47:07 -05:00
assert! ( monitor . get_secret ( idx ) . is_none ( ) ) ;
2017-12-25 01:05:27 -05:00
} ;
}
{
// insert_secret correct sequence
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710651 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710650 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710649 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710648 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
}
{
// insert_secret #1 incorrect
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #2 incorrect (#1 derived from incorrect)
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #3 incorrect
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #4 incorrect (1,2,3 derived from incorrect)
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710651 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710650 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710649 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710648 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #5 incorrect
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710651 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710650 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #6 incorrect (5 derived from incorrect)
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710651 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710650 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710649 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710648 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #7 incorrect
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710651 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710650 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710649 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710648 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
{
// insert_secret #8 incorrect
2019-12-13 01:58:08 -05:00
monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2017-12-25 01:05:27 -05:00
secrets . clear ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710651 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710650 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710649 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
test_secrets! ( ) ;
secrets . push ( [ 0 ; 32 ] ) ;
2018-07-27 17:06:14 -07:00
secrets . last_mut ( ) . unwrap ( ) [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4 " ) . unwrap ( ) ) ;
2018-11-15 07:47:07 -05:00
assert_eq! ( monitor . provide_secret ( 281474976710648 , secrets . last ( ) . unwrap ( ) . clone ( ) ) . unwrap_err ( ) . 0 ,
2017-12-25 01:05:27 -05:00
" Previous secret did not match new one " ) ;
}
}
2018-04-24 00:19:52 -04:00
2018-06-29 16:42:44 -04:00
#[ test ]
fn test_prune_preimages ( ) {
let secp_ctx = Secp256k1 ::new ( ) ;
2018-10-18 15:00:12 -04:00
let logger = Arc ::new ( TestLogger ::new ( ) ) ;
2018-06-09 01:18:00 +02:00
2019-01-16 15:45:05 -05:00
let dummy_key = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
2018-06-29 16:42:44 -04:00
macro_rules ! dummy_keys {
( ) = > {
2018-08-20 17:13:07 -04:00
{
TxCreationKeys {
per_commitment_point : dummy_key . clone ( ) ,
revocation_key : dummy_key . clone ( ) ,
a_htlc_key : dummy_key . clone ( ) ,
b_htlc_key : dummy_key . clone ( ) ,
a_delayed_payment_key : dummy_key . clone ( ) ,
b_payment_key : dummy_key . clone ( ) ,
}
2018-06-29 16:42:44 -04:00
}
2018-06-09 01:18:00 +02:00
}
}
2018-06-29 16:42:44 -04:00
let dummy_tx = Transaction { version : 0 , lock_time : 0 , input : Vec ::new ( ) , output : Vec ::new ( ) } ;
2018-06-09 01:18:00 +02:00
2018-06-29 16:42:44 -04:00
let mut preimages = Vec ::new ( ) ;
{
let mut rng = thread_rng ( ) ;
for _ in 0 .. 20 {
2018-11-22 21:18:16 -05:00
let mut preimage = PaymentPreimage ( [ 0 ; 32 ] ) ;
rng . fill_bytes ( & mut preimage . 0 [ .. ] ) ;
2018-12-17 23:58:02 -05:00
let hash = PaymentHash ( Sha256 ::hash ( & preimage . 0 [ .. ] ) . into_inner ( ) ) ;
2018-06-29 16:42:44 -04:00
preimages . push ( ( preimage , hash ) ) ;
}
}
2018-06-09 01:18:00 +02:00
2018-06-29 16:42:44 -04:00
macro_rules ! preimages_slice_to_htlc_outputs {
( $preimages_slice : expr ) = > {
{
let mut res = Vec ::new ( ) ;
for ( idx , preimage ) in $preimages_slice . iter ( ) . enumerate ( ) {
2019-01-04 14:37:48 -05:00
res . push ( ( HTLCOutputInCommitment {
2018-06-29 16:42:44 -04:00
offered : true ,
amount_msat : 0 ,
cltv_expiry : 0 ,
payment_hash : preimage . 1. clone ( ) ,
2019-01-06 17:02:53 -05:00
transaction_output_index : Some ( idx as u32 ) ,
2019-01-04 14:37:48 -05:00
} , None ) ) ;
2018-06-09 01:18:00 +02:00
}
2018-06-29 16:42:44 -04:00
res
2018-06-09 01:18:00 +02:00
}
}
}
2018-06-29 16:42:44 -04:00
macro_rules ! preimages_to_local_htlcs {
( $preimages_slice : expr ) = > {
{
let mut inp = preimages_slice_to_htlc_outputs! ( $preimages_slice ) ;
2019-01-04 14:37:48 -05:00
let res : Vec < _ > = inp . drain ( .. ) . map ( | e | { ( e . 0 , None , e . 1 ) } ) . collect ( ) ;
2018-06-29 16:42:44 -04:00
res
}
2018-06-09 01:18:00 +02:00
}
}
2018-06-29 16:42:44 -04:00
macro_rules ! test_preimages_exist {
( $preimages_slice : expr , $monitor : expr ) = > {
for preimage in $preimages_slice {
assert! ( $monitor . payment_preimages . contains_key ( & preimage . 1 ) ) ;
}
2018-06-09 01:18:00 +02:00
}
}
2018-06-29 16:42:44 -04:00
// Prune with one old state and a local commitment tx holding a few overlaps with the
// old state.
2019-12-13 01:58:08 -05:00
let mut monitor = ChannelMonitor ::new ( & SecretKey ::from_slice ( & [ 41 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 43 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & SecretKey ::from_slice ( & [ 44 ; 32 ] ) . unwrap ( ) , & PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 45 ; 32 ] ) . unwrap ( ) ) , 0 , Script ::new ( ) , logger . clone ( ) ) ;
2019-12-12 14:56:28 -05:00
monitor . their_to_self_delay = Some ( 10 ) ;
2018-06-29 16:42:44 -04:00
2019-12-13 01:58:08 -05:00
monitor . provide_latest_local_commitment_tx_info ( LocalCommitmentTransaction ::dummy ( ) , dummy_keys! ( ) , 0 , preimages_to_local_htlcs! ( preimages [ 0 .. 10 ] ) ) ;
2019-01-04 14:37:48 -05:00
monitor . provide_latest_remote_commitment_tx_info ( & dummy_tx , preimages_slice_to_htlc_outputs! ( preimages [ 5 .. 15 ] ) , 281474976710655 , dummy_key ) ;
monitor . provide_latest_remote_commitment_tx_info ( & dummy_tx , preimages_slice_to_htlc_outputs! ( preimages [ 15 .. 20 ] ) , 281474976710654 , dummy_key ) ;
monitor . provide_latest_remote_commitment_tx_info ( & dummy_tx , preimages_slice_to_htlc_outputs! ( preimages [ 17 .. 20 ] ) , 281474976710653 , dummy_key ) ;
monitor . provide_latest_remote_commitment_tx_info ( & dummy_tx , preimages_slice_to_htlc_outputs! ( preimages [ 18 .. 20 ] ) , 281474976710652 , dummy_key ) ;
2018-06-29 16:42:44 -04:00
for & ( ref preimage , ref hash ) in preimages . iter ( ) {
monitor . provide_payment_preimage ( hash , preimage ) ;
2018-06-09 01:18:00 +02:00
}
2018-06-29 16:42:44 -04:00
// Now provide a secret, pruning preimages 10-15
let mut secret = [ 0 ; 32 ] ;
2018-07-27 17:06:14 -07:00
secret [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710655 , secret . clone ( ) ) . unwrap ( ) ;
2018-06-29 16:42:44 -04:00
assert_eq! ( monitor . payment_preimages . len ( ) , 15 ) ;
test_preimages_exist! ( & preimages [ 0 .. 10 ] , monitor ) ;
test_preimages_exist! ( & preimages [ 15 .. 20 ] , monitor ) ;
// Now provide a further secret, pruning preimages 15-17
2018-07-27 17:06:14 -07:00
secret [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710654 , secret . clone ( ) ) . unwrap ( ) ;
2018-06-29 16:42:44 -04:00
assert_eq! ( monitor . payment_preimages . len ( ) , 13 ) ;
test_preimages_exist! ( & preimages [ 0 .. 10 ] , monitor ) ;
test_preimages_exist! ( & preimages [ 17 .. 20 ] , monitor ) ;
// Now update local commitment tx info, pruning only element 18 as we still care about the
// previous commitment tx's preimages too
2019-12-13 01:58:08 -05:00
monitor . provide_latest_local_commitment_tx_info ( LocalCommitmentTransaction ::dummy ( ) , dummy_keys! ( ) , 0 , preimages_to_local_htlcs! ( preimages [ 0 .. 5 ] ) ) ;
2018-07-27 17:06:14 -07:00
secret [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710653 , secret . clone ( ) ) . unwrap ( ) ;
2018-06-29 16:42:44 -04:00
assert_eq! ( monitor . payment_preimages . len ( ) , 12 ) ;
test_preimages_exist! ( & preimages [ 0 .. 10 ] , monitor ) ;
test_preimages_exist! ( & preimages [ 18 .. 20 ] , monitor ) ;
// But if we do it again, we'll prune 5-10
2019-12-13 01:58:08 -05:00
monitor . provide_latest_local_commitment_tx_info ( LocalCommitmentTransaction ::dummy ( ) , dummy_keys! ( ) , 0 , preimages_to_local_htlcs! ( preimages [ 0 .. 3 ] ) ) ;
2018-07-27 17:06:14 -07:00
secret [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116 " ) . unwrap ( ) ) ;
2018-11-01 03:19:37 +00:00
monitor . provide_secret ( 281474976710652 , secret . clone ( ) ) . unwrap ( ) ;
2018-06-29 16:42:44 -04:00
assert_eq! ( monitor . payment_preimages . len ( ) , 5 ) ;
test_preimages_exist! ( & preimages [ 0 .. 5 ] , monitor ) ;
2018-06-09 01:18:00 +02:00
}
2019-04-10 18:56:22 -04:00
#[ test ]
fn test_claim_txn_weight_computation ( ) {
// We test Claim txn weight, knowing that we want expected weigth and
// not actual case to avoid sigs and time-lock delays hell variances.
let secp_ctx = Secp256k1 ::new ( ) ;
let privkey = SecretKey ::from_slice ( & hex ::decode ( " 0101010101010101010101010101010101010101010101010101010101010101 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
let pubkey = PublicKey ::from_secret_key ( & secp_ctx , & privkey ) ;
2019-08-23 19:12:55 -04:00
let mut sum_actual_sigs = 0 ;
2019-04-10 18:56:22 -04:00
macro_rules ! sign_input {
( $sighash_parts : expr , $input : expr , $idx : expr , $amount : expr , $input_type : expr , $sum_actual_sigs : expr ) = > {
let htlc = HTLCOutputInCommitment {
offered : if * $input_type = = InputDescriptors ::RevokedOfferedHTLC | | * $input_type = = InputDescriptors ::OfferedHTLC { true } else { false } ,
amount_msat : 0 ,
cltv_expiry : 2 < < 16 ,
payment_hash : PaymentHash ( [ 1 ; 32 ] ) ,
transaction_output_index : Some ( $idx ) ,
} ;
let redeem_script = if * $input_type = = InputDescriptors ::RevokedOutput { chan_utils ::get_revokeable_redeemscript ( & pubkey , 256 , & pubkey ) } else { chan_utils ::get_htlc_redeemscript_with_explicit_keys ( & htlc , & pubkey , & pubkey , & pubkey ) } ;
let sighash = hash_to_message! ( & $sighash_parts . sighash_all ( & $input , & redeem_script , $amount ) [ .. ] ) ;
let sig = secp_ctx . sign ( & sighash , & privkey ) ;
$input . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
$input . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
2019-08-23 19:12:55 -04:00
sum_actual_sigs + = $input . witness [ 0 ] . len ( ) ;
2019-04-10 18:56:22 -04:00
if * $input_type = = InputDescriptors ::RevokedOutput {
$input . witness . push ( vec! ( 1 ) ) ;
} else if * $input_type = = InputDescriptors ::RevokedOfferedHTLC | | * $input_type = = InputDescriptors ::RevokedReceivedHTLC {
$input . witness . push ( pubkey . clone ( ) . serialize ( ) . to_vec ( ) ) ;
} else if * $input_type = = InputDescriptors ::ReceivedHTLC {
$input . witness . push ( vec! [ 0 ] ) ;
} else {
$input . witness . push ( PaymentPreimage ( [ 1 ; 32 ] ) . 0. to_vec ( ) ) ;
}
$input . witness . push ( redeem_script . into_bytes ( ) ) ;
println! ( " witness[0] {} " , $input . witness [ 0 ] . len ( ) ) ;
println! ( " witness[1] {} " , $input . witness [ 1 ] . len ( ) ) ;
println! ( " witness[2] {} " , $input . witness [ 2 ] . len ( ) ) ;
}
}
let script_pubkey = Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_RETURN ) . into_script ( ) ;
let txid = Sha256dHash ::from_hex ( " 56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d " ) . unwrap ( ) ;
// Justice tx with 1 to_local, 2 revoked offered HTLCs, 1 revoked received HTLCs
let mut claim_tx = Transaction { version : 0 , lock_time : 0 , input : Vec ::new ( ) , output : Vec ::new ( ) } ;
for i in 0 .. 4 {
claim_tx . input . push ( TxIn {
previous_output : BitcoinOutPoint {
txid ,
vout : i ,
} ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
} ) ;
}
claim_tx . output . push ( TxOut {
script_pubkey : script_pubkey . clone ( ) ,
value : 0 ,
} ) ;
let base_weight = claim_tx . get_weight ( ) ;
let sighash_parts = bip143 ::SighashComponents ::new ( & claim_tx ) ;
let inputs_des = vec! [ InputDescriptors ::RevokedOutput , InputDescriptors ::RevokedOfferedHTLC , InputDescriptors ::RevokedOfferedHTLC , InputDescriptors ::RevokedReceivedHTLC ] ;
for ( idx , inp ) in claim_tx . input . iter_mut ( ) . zip ( inputs_des . iter ( ) ) . enumerate ( ) {
sign_input! ( sighash_parts , inp . 0 , idx as u32 , 0 , inp . 1 , sum_actual_sigs ) ;
}
2019-08-23 19:12:55 -04:00
assert_eq! ( base_weight + ChannelMonitor ::get_witnesses_weight ( & inputs_des [ .. ] ) , claim_tx . get_weight ( ) + /* max_length_sig */ ( 73 * inputs_des . len ( ) - sum_actual_sigs ) ) ;
2019-04-10 18:56:22 -04:00
// Claim tx with 1 offered HTLCs, 3 received HTLCs
claim_tx . input . clear ( ) ;
sum_actual_sigs = 0 ;
for i in 0 .. 4 {
claim_tx . input . push ( TxIn {
previous_output : BitcoinOutPoint {
txid ,
vout : i ,
} ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
} ) ;
}
let base_weight = claim_tx . get_weight ( ) ;
let sighash_parts = bip143 ::SighashComponents ::new ( & claim_tx ) ;
let inputs_des = vec! [ InputDescriptors ::OfferedHTLC , InputDescriptors ::ReceivedHTLC , InputDescriptors ::ReceivedHTLC , InputDescriptors ::ReceivedHTLC ] ;
for ( idx , inp ) in claim_tx . input . iter_mut ( ) . zip ( inputs_des . iter ( ) ) . enumerate ( ) {
sign_input! ( sighash_parts , inp . 0 , idx as u32 , 0 , inp . 1 , sum_actual_sigs ) ;
}
2019-08-23 19:12:55 -04:00
assert_eq! ( base_weight + ChannelMonitor ::get_witnesses_weight ( & inputs_des [ .. ] ) , claim_tx . get_weight ( ) + /* max_length_sig */ ( 73 * inputs_des . len ( ) - sum_actual_sigs ) ) ;
2019-04-10 18:56:22 -04:00
// Justice tx with 1 revoked HTLC-Success tx output
claim_tx . input . clear ( ) ;
sum_actual_sigs = 0 ;
claim_tx . input . push ( TxIn {
previous_output : BitcoinOutPoint {
txid ,
vout : 0 ,
} ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
} ) ;
let base_weight = claim_tx . get_weight ( ) ;
let sighash_parts = bip143 ::SighashComponents ::new ( & claim_tx ) ;
let inputs_des = vec! [ InputDescriptors ::RevokedOutput ] ;
for ( idx , inp ) in claim_tx . input . iter_mut ( ) . zip ( inputs_des . iter ( ) ) . enumerate ( ) {
sign_input! ( sighash_parts , inp . 0 , idx as u32 , 0 , inp . 1 , sum_actual_sigs ) ;
}
2019-08-23 19:12:55 -04:00
assert_eq! ( base_weight + ChannelMonitor ::get_witnesses_weight ( & inputs_des [ .. ] ) , claim_tx . get_weight ( ) + /* max_length_isg */ ( 73 * inputs_des . len ( ) - sum_actual_sigs ) ) ;
2019-04-10 18:56:22 -04:00
}
2018-04-24 00:19:52 -04:00
// Further testing is done in the ChannelManager integration tests.
2017-12-25 01:05:27 -05:00
}