2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2020-01-24 11:57:52 -05:00
//! The logic to build claims and bump in-flight transactions until confirmations.
//!
//! OnchainTxHandler objetcs are fully-part of ChannelMonitor and encapsulates all
//! building, tracking, bumping and notifications functions.
use bitcoin ::blockdata ::transaction ::{ Transaction , TxIn , TxOut , SigHashType } ;
use bitcoin ::blockdata ::transaction ::OutPoint as BitcoinOutPoint ;
use bitcoin ::blockdata ::script ::Script ;
2020-04-27 17:53:13 +02:00
use bitcoin ::hash_types ::Txid ;
2020-01-24 11:57:52 -05:00
2020-04-27 16:51:59 +02:00
use bitcoin ::secp256k1 ::{ Secp256k1 , Signature } ;
use bitcoin ::secp256k1 ;
2020-01-24 11:57:52 -05:00
use ln ::msgs ::DecodeError ;
2020-03-09 18:15:35 -04:00
use ln ::channelmanager ::PaymentPreimage ;
2020-03-23 21:50:23 -04:00
use ln ::chan_utils ;
2020-09-06 20:07:11 -04:00
use ln ::chan_utils ::{ TxCreationKeys , HolderCommitmentTransaction } ;
2020-01-24 11:57:52 -05:00
use chain ::chaininterface ::{ FeeEstimator , BroadcasterInterface , ConfirmationTarget , MIN_RELAY_FEE_SAT_PER_1000_WEIGHT } ;
2020-08-07 10:58:15 -07:00
use chain ::channelmonitor ::{ ANTI_REORG_DELAY , CLTV_SHARED_CLAIM_BUFFER , InputMaterial , ClaimRequest } ;
2020-03-18 00:29:26 -04:00
use chain ::keysinterface ::ChannelKeys ;
2020-01-24 11:57:52 -05:00
use util ::logger ::Logger ;
2020-03-02 12:55:53 -05:00
use util ::ser ::{ Readable , Writer , Writeable } ;
2020-01-24 11:57:52 -05:00
use util ::byte_utils ;
2020-03-09 18:15:35 -04:00
use std ::collections ::{ HashMap , hash_map } ;
2020-01-24 11:57:52 -05:00
use std ::cmp ;
use std ::ops ::Deref ;
const MAX_ALLOC_SIZE : usize = 64 * 1024 ;
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
#[ derive(Clone, PartialEq) ]
enum OnchainEvent {
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
/// bump-txn candidate buffer.
Claim {
2020-04-27 17:53:13 +02:00
claim_request : Txid ,
2020-01-24 11:57:52 -05:00
} ,
2020-09-06 19:51:21 -04:00
/// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
2020-01-24 11:57:52 -05:00
/// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
/// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
ContentiousOutpoint {
outpoint : BitcoinOutPoint ,
input_material : InputMaterial ,
}
}
/// Higher-level cache structure needed to re-generate bumped claim txn if needed
#[ derive(Clone, PartialEq) ]
pub struct ClaimTxBumpMaterial {
// At every block tick, used to check if pending claiming tx is taking too
// much time for confirmation and we need to bump it.
2020-03-03 18:51:50 -05:00
height_timer : Option < u32 > ,
2020-01-24 11:57:52 -05:00
// Tracked in case of reorg to wipe out now-superflous bump material
2020-06-15 17:28:01 -04:00
feerate_previous : u32 ,
2020-01-24 11:57:52 -05:00
// Soonest timelocks among set of outpoints claimed, used to compute
// a priority of not feerate
soonest_timelock : u32 ,
// Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey.
per_input_material : HashMap < BitcoinOutPoint , InputMaterial > ,
}
impl Writeable for ClaimTxBumpMaterial {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
2020-03-03 18:51:50 -05:00
self . height_timer . write ( writer ) ? ;
2020-06-15 17:28:01 -04:00
writer . write_all ( & byte_utils ::be32_to_array ( self . feerate_previous ) ) ? ;
2020-01-24 11:57:52 -05:00
writer . write_all ( & byte_utils ::be32_to_array ( self . soonest_timelock ) ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( self . per_input_material . len ( ) as u64 ) ) ? ;
for ( outp , tx_material ) in self . per_input_material . iter ( ) {
outp . write ( writer ) ? ;
tx_material . write ( writer ) ? ;
}
Ok ( ( ) )
}
}
impl Readable for ClaimTxBumpMaterial {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
let height_timer = Readable ::read ( reader ) ? ;
let feerate_previous = Readable ::read ( reader ) ? ;
let soonest_timelock = Readable ::read ( reader ) ? ;
let per_input_material_len : u64 = Readable ::read ( reader ) ? ;
let mut per_input_material = HashMap ::with_capacity ( cmp ::min ( per_input_material_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. per_input_material_len {
let outpoint = Readable ::read ( reader ) ? ;
let input_material = Readable ::read ( reader ) ? ;
per_input_material . insert ( outpoint , input_material ) ;
}
Ok ( Self { height_timer , feerate_previous , soonest_timelock , per_input_material } )
}
}
2020-03-23 23:28:00 -04:00
#[ derive(PartialEq, Clone, Copy) ]
pub ( crate ) enum InputDescriptors {
2020-01-24 11:57:52 -05:00
RevokedOfferedHTLC ,
RevokedReceivedHTLC ,
OfferedHTLC ,
ReceivedHTLC ,
2020-09-06 19:51:21 -04:00
RevokedOutput , // either a revoked to_holder output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
2020-01-24 11:57:52 -05:00
}
2020-03-23 23:28:00 -04:00
impl Writeable for InputDescriptors {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& InputDescriptors ::RevokedOfferedHTLC = > {
writer . write_all ( & [ 0 ; 1 ] ) ? ;
} ,
& InputDescriptors ::RevokedReceivedHTLC = > {
writer . write_all ( & [ 1 ; 1 ] ) ? ;
} ,
& InputDescriptors ::OfferedHTLC = > {
writer . write_all ( & [ 2 ; 1 ] ) ? ;
} ,
& InputDescriptors ::ReceivedHTLC = > {
writer . write_all ( & [ 3 ; 1 ] ) ? ;
}
& InputDescriptors ::RevokedOutput = > {
writer . write_all ( & [ 4 ; 1 ] ) ? ;
}
}
Ok ( ( ) )
}
}
impl Readable for InputDescriptors {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
let input_descriptor = match < u8 as Readable > ::read ( reader ) ? {
0 = > {
InputDescriptors ::RevokedOfferedHTLC
} ,
1 = > {
InputDescriptors ::RevokedReceivedHTLC
} ,
2 = > {
InputDescriptors ::OfferedHTLC
} ,
3 = > {
InputDescriptors ::ReceivedHTLC
} ,
4 = > {
InputDescriptors ::RevokedOutput
}
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
Ok ( input_descriptor )
}
}
2020-01-24 11:57:52 -05:00
macro_rules ! subtract_high_prio_fee {
2020-03-02 12:55:53 -05:00
( $logger : ident , $fee_estimator : expr , $value : expr , $predicted_weight : expr , $used_feerate : expr ) = > {
2020-01-24 11:57:52 -05:00
{
2020-06-15 17:28:01 -04:00
$used_feerate = $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::HighPriority ) . into ( ) ;
let mut fee = $used_feerate as u64 * $predicted_weight / 1000 ;
2020-01-24 11:57:52 -05:00
if $value < = fee {
2020-06-15 17:28:01 -04:00
$used_feerate = $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::Normal ) . into ( ) ;
fee = $used_feerate as u64 * $predicted_weight / 1000 ;
if $value < = fee . into ( ) {
$used_feerate = $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::Background ) . into ( ) ;
fee = $used_feerate as u64 * $predicted_weight / 1000 ;
2020-01-24 11:57:52 -05:00
if $value < = fee {
2020-03-02 12:55:53 -05:00
log_error! ( $logger , " Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat) " ,
2020-01-24 11:57:52 -05:00
fee , $value ) ;
false
} else {
2020-03-02 12:55:53 -05:00
log_warn! ( $logger , " Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat) " ,
2020-01-24 11:57:52 -05:00
$value ) ;
$value - = fee ;
true
}
} else {
2020-03-02 12:55:53 -05:00
log_warn! ( $logger , " Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat) " ,
2020-01-24 11:57:52 -05:00
$value ) ;
$value - = fee ;
true
}
} else {
$value - = fee ;
true
}
}
}
}
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
impl Readable for Option < Vec < Option < ( usize , Signature ) > > > {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
match Readable ::read ( reader ) ? {
0 u8 = > Ok ( None ) ,
1 u8 = > {
let vlen : u64 = Readable ::read ( reader ) ? ;
let mut ret = Vec ::with_capacity ( cmp ::min ( vlen as usize , MAX_ALLOC_SIZE / ::std ::mem ::size_of ::< Option < ( usize , Signature ) > > ( ) ) ) ;
for _ in 0 .. vlen {
ret . push ( match Readable ::read ( reader ) ? {
0 u8 = > None ,
1 u8 = > Some ( ( < u64 as Readable > ::read ( reader ) ? as usize , Readable ::read ( reader ) ? ) ) ,
_ = > return Err ( DecodeError ::InvalidValue )
} ) ;
}
Ok ( Some ( ret ) )
} ,
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
impl Writeable for Option < Vec < Option < ( usize , Signature ) > > > {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& Some ( ref vec ) = > {
1 u8 . write ( writer ) ? ;
( vec . len ( ) as u64 ) . write ( writer ) ? ;
for opt in vec . iter ( ) {
match opt {
& Some ( ( ref idx , ref sig ) ) = > {
1 u8 . write ( writer ) ? ;
( * idx as u64 ) . write ( writer ) ? ;
sig . write ( writer ) ? ;
} ,
& None = > 0 u8 . write ( writer ) ? ,
}
}
} ,
& None = > 0 u8 . write ( writer ) ? ,
}
Ok ( ( ) )
}
}
2020-01-24 11:57:52 -05:00
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible.
2020-03-18 00:29:26 -04:00
pub struct OnchainTxHandler < ChanSigner : ChannelKeys > {
2020-01-24 11:57:52 -05:00
destination_script : Script ,
2020-09-06 20:07:11 -04:00
holder_commitment : Option < HolderCommitmentTransaction > ,
2020-09-06 19:51:21 -04:00
// holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
// transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
2020-09-06 20:07:11 -04:00
// the set of HTLCs in the HolderCommitmentTransaction (including those which do not appear in
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
// the commitment transaction).
2020-09-06 19:51:21 -04:00
holder_htlc_sigs : Option < Vec < Option < ( usize , Signature ) > > > ,
2020-09-06 20:07:11 -04:00
prev_holder_commitment : Option < HolderCommitmentTransaction > ,
2020-09-06 19:51:21 -04:00
prev_holder_htlc_sigs : Option < Vec < Option < ( usize , Signature ) > > > ,
on_holder_tx_csv : u16 ,
2020-01-24 11:57:52 -05:00
2020-03-18 00:29:26 -04:00
key_storage : ChanSigner ,
2020-01-24 11:57:52 -05:00
// Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
// it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
// another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
// same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
// block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
// equality between spending transaction and claim request. If true, it means transaction was one our claiming one
// after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
// we need to regenerate new claim request with reduced set of still-claimable outpoints.
// Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
#[ cfg(test) ] // Used in functional_test to verify sanitization
2020-04-27 17:53:13 +02:00
pub pending_claim_requests : HashMap < Txid , ClaimTxBumpMaterial > ,
2020-01-24 11:57:52 -05:00
#[ cfg(not(test)) ]
2020-04-27 17:53:13 +02:00
pending_claim_requests : HashMap < Txid , ClaimTxBumpMaterial > ,
2020-01-24 11:57:52 -05:00
// Used to link outpoints claimed in a connected block to a pending claim request.
// Key is outpoint than monitor parsing has detected we have keys/scripts to claim
// Value is (pending claim request identifier, confirmation_block), identifier
// is txid of the initial claiming transaction and is immutable until outpoint is
// post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
// block with output gets disconnected.
#[ cfg(test) ] // Used in functional_test to verify sanitization
2020-04-27 17:53:13 +02:00
pub claimable_outpoints : HashMap < BitcoinOutPoint , ( Txid , u32 ) > ,
2020-01-24 11:57:52 -05:00
#[ cfg(not(test)) ]
2020-04-27 17:53:13 +02:00
claimable_outpoints : HashMap < BitcoinOutPoint , ( Txid , u32 ) > ,
2020-01-24 11:57:52 -05:00
onchain_events_waiting_threshold_conf : HashMap < u32 , Vec < OnchainEvent > > ,
secp_ctx : Secp256k1 < secp256k1 ::All > ,
}
2020-03-18 00:29:26 -04:00
impl < ChanSigner : ChannelKeys + Writeable > OnchainTxHandler < ChanSigner > {
pub ( crate ) fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
2020-01-24 11:57:52 -05:00
self . destination_script . write ( writer ) ? ;
2020-09-06 19:51:21 -04:00
self . holder_commitment . write ( writer ) ? ;
self . holder_htlc_sigs . write ( writer ) ? ;
self . prev_holder_commitment . write ( writer ) ? ;
self . prev_holder_htlc_sigs . write ( writer ) ? ;
2020-01-24 11:57:52 -05:00
2020-09-06 19:51:21 -04:00
self . on_holder_tx_csv . write ( writer ) ? ;
2020-04-07 18:46:14 -04:00
2020-03-18 00:29:26 -04:00
self . key_storage . write ( writer ) ? ;
2020-01-24 11:57:52 -05:00
writer . write_all ( & byte_utils ::be64_to_array ( self . pending_claim_requests . len ( ) as u64 ) ) ? ;
for ( ref ancestor_claim_txid , claim_tx_data ) in self . pending_claim_requests . iter ( ) {
ancestor_claim_txid . write ( writer ) ? ;
claim_tx_data . write ( writer ) ? ;
}
writer . write_all ( & byte_utils ::be64_to_array ( self . claimable_outpoints . len ( ) as u64 ) ) ? ;
for ( ref outp , ref claim_and_height ) in self . claimable_outpoints . iter ( ) {
outp . write ( writer ) ? ;
claim_and_height . 0. write ( writer ) ? ;
claim_and_height . 1. write ( writer ) ? ;
}
writer . write_all ( & byte_utils ::be64_to_array ( self . onchain_events_waiting_threshold_conf . len ( ) as u64 ) ) ? ;
for ( ref target , ref events ) in self . onchain_events_waiting_threshold_conf . iter ( ) {
writer . write_all ( & byte_utils ::be32_to_array ( * * target ) ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( events . len ( ) as u64 ) ) ? ;
for ev in events . iter ( ) {
match * ev {
OnchainEvent ::Claim { ref claim_request } = > {
writer . write_all ( & [ 0 ; 1 ] ) ? ;
claim_request . write ( writer ) ? ;
} ,
OnchainEvent ::ContentiousOutpoint { ref outpoint , ref input_material } = > {
writer . write_all ( & [ 1 ; 1 ] ) ? ;
outpoint . write ( writer ) ? ;
input_material . write ( writer ) ? ;
}
}
}
}
Ok ( ( ) )
}
}
2020-03-02 12:55:53 -05:00
impl < ChanSigner : ChannelKeys + Readable > Readable for OnchainTxHandler < ChanSigner > {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
2020-01-24 11:57:52 -05:00
let destination_script = Readable ::read ( reader ) ? ;
2020-03-09 18:15:35 -04:00
2020-09-06 19:51:21 -04:00
let holder_commitment = Readable ::read ( reader ) ? ;
let holder_htlc_sigs = Readable ::read ( reader ) ? ;
let prev_holder_commitment = Readable ::read ( reader ) ? ;
let prev_holder_htlc_sigs = Readable ::read ( reader ) ? ;
2020-01-24 11:57:52 -05:00
2020-09-06 19:51:21 -04:00
let on_holder_tx_csv = Readable ::read ( reader ) ? ;
2020-04-07 18:46:14 -04:00
2020-03-18 00:29:26 -04:00
let key_storage = Readable ::read ( reader ) ? ;
2020-01-24 11:57:52 -05:00
let pending_claim_requests_len : u64 = Readable ::read ( reader ) ? ;
let mut pending_claim_requests = HashMap ::with_capacity ( cmp ::min ( pending_claim_requests_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. pending_claim_requests_len {
pending_claim_requests . insert ( Readable ::read ( reader ) ? , Readable ::read ( reader ) ? ) ;
}
let claimable_outpoints_len : u64 = Readable ::read ( reader ) ? ;
let mut claimable_outpoints = HashMap ::with_capacity ( cmp ::min ( pending_claim_requests_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. claimable_outpoints_len {
let outpoint = Readable ::read ( reader ) ? ;
let ancestor_claim_txid = Readable ::read ( reader ) ? ;
let height = Readable ::read ( reader ) ? ;
claimable_outpoints . insert ( outpoint , ( ancestor_claim_txid , height ) ) ;
}
let waiting_threshold_conf_len : u64 = Readable ::read ( reader ) ? ;
let mut onchain_events_waiting_threshold_conf = HashMap ::with_capacity ( cmp ::min ( waiting_threshold_conf_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. waiting_threshold_conf_len {
let height_target = Readable ::read ( reader ) ? ;
let events_len : u64 = Readable ::read ( reader ) ? ;
let mut events = Vec ::with_capacity ( cmp ::min ( events_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. events_len {
let ev = match < u8 as Readable > ::read ( reader ) ? {
0 = > {
let claim_request = Readable ::read ( reader ) ? ;
OnchainEvent ::Claim {
claim_request
}
} ,
1 = > {
let outpoint = Readable ::read ( reader ) ? ;
let input_material = Readable ::read ( reader ) ? ;
OnchainEvent ::ContentiousOutpoint {
outpoint ,
input_material
}
}
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
events . push ( ev ) ;
}
onchain_events_waiting_threshold_conf . insert ( height_target , events ) ;
}
Ok ( OnchainTxHandler {
destination_script ,
2020-09-06 19:51:21 -04:00
holder_commitment ,
holder_htlc_sigs ,
prev_holder_commitment ,
prev_holder_htlc_sigs ,
on_holder_tx_csv ,
2020-03-18 00:29:26 -04:00
key_storage ,
2020-01-24 11:57:52 -05:00
claimable_outpoints ,
pending_claim_requests ,
onchain_events_waiting_threshold_conf ,
secp_ctx : Secp256k1 ::new ( ) ,
} )
}
}
2020-03-18 00:29:26 -04:00
impl < ChanSigner : ChannelKeys > OnchainTxHandler < ChanSigner > {
2020-08-07 10:58:15 -07:00
pub ( crate ) fn new ( destination_script : Script , keys : ChanSigner , on_holder_tx_csv : u16 ) -> Self {
2020-03-18 00:29:26 -04:00
let key_storage = keys ;
2020-01-24 11:57:52 -05:00
OnchainTxHandler {
destination_script ,
2020-09-06 19:51:21 -04:00
holder_commitment : None ,
holder_htlc_sigs : None ,
prev_holder_commitment : None ,
prev_holder_htlc_sigs : None ,
on_holder_tx_csv ,
2020-03-18 00:29:26 -04:00
key_storage ,
2020-01-24 11:57:52 -05:00
pending_claim_requests : HashMap ::new ( ) ,
claimable_outpoints : HashMap ::new ( ) ,
onchain_events_waiting_threshold_conf : HashMap ::new ( ) ,
secp_ctx : Secp256k1 ::new ( ) ,
}
}
2020-08-07 10:58:15 -07:00
pub ( crate ) fn get_witnesses_weight ( inputs : & [ InputDescriptors ] ) -> usize {
2020-01-24 11:57:52 -05:00
let mut tx_weight = 2 ; // count segwit flags
for inp in inputs {
// We use expected weight (and not actual) as signatures and time lock delays may vary
tx_weight + = match inp {
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
& InputDescriptors ::RevokedOfferedHTLC = > {
1 + 1 + 73 + 1 + 33 + 1 + 133
} ,
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
& InputDescriptors ::RevokedReceivedHTLC = > {
1 + 1 + 73 + 1 + 33 + 1 + 139
} ,
2020-09-06 19:51:21 -04:00
// number_of_witness_elements + sig_length + counterpartyhtlc_sig + preimage_length + preimage + witness_script_length + witness_script
2020-01-24 11:57:52 -05:00
& InputDescriptors ::OfferedHTLC = > {
1 + 1 + 73 + 1 + 32 + 1 + 133
} ,
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
& InputDescriptors ::ReceivedHTLC = > {
1 + 1 + 73 + 1 + 1 + 1 + 139
} ,
// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
& InputDescriptors ::RevokedOutput = > {
1 + 1 + 73 + 1 + 1 + 1 + 77
} ,
} ;
}
tx_weight
}
2020-02-18 13:11:56 -05:00
/// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
/// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
/// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
/// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
/// frequency of the bump and so increase our bets of success.
2020-01-24 11:57:52 -05:00
fn get_height_timer ( current_height : u32 , timelock_expiration : u32 ) -> u32 {
if timelock_expiration < = current_height + 3 {
return current_height + 1
} else if timelock_expiration - current_height < = 15 {
return current_height + 3
}
current_height + 15
}
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
/// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
2020-06-15 17:28:01 -04:00
fn generate_claim_tx < F : Deref , L : Deref > ( & mut self , height : u32 , cached_claim_datas : & ClaimTxBumpMaterial , fee_estimator : F , logger : L ) -> Option < ( Option < u32 > , u32 , Transaction ) >
2020-03-02 12:55:53 -05:00
where F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-01-24 11:57:52 -05:00
{
if cached_claim_datas . per_input_material . len ( ) = = 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
let mut inputs = Vec ::new ( ) ;
for outp in cached_claim_datas . per_input_material . keys ( ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Outpoint {}:{} " , outp . txid , outp . vout ) ;
2020-01-24 11:57:52 -05:00
inputs . push ( TxIn {
previous_output : * outp ,
script_sig : Script ::new ( ) ,
sequence : 0xfffffffd ,
witness : Vec ::new ( ) ,
} ) ;
}
let mut bumped_tx = Transaction {
version : 2 ,
lock_time : 0 ,
input : inputs ,
output : vec ! [ TxOut {
script_pubkey : self . destination_script . clone ( ) ,
value : 0
} ] ,
} ;
macro_rules ! RBF_bump {
( $amount : expr , $old_feerate : expr , $fee_estimator : expr , $predicted_weight : expr ) = > {
{
2020-06-15 17:28:01 -04:00
let mut used_feerate : u32 ;
2020-01-24 11:57:52 -05:00
// If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
let new_fee = if $old_feerate < $fee_estimator . get_est_sat_per_1000_weight ( ConfirmationTarget ::HighPriority ) {
let mut value = $amount ;
2020-03-02 12:55:53 -05:00
if subtract_high_prio_fee! ( logger , $fee_estimator , value , $predicted_weight , used_feerate ) {
2020-01-24 11:57:52 -05:00
// Overflow check is done in subtract_high_prio_fee
2020-06-15 17:28:01 -04:00
( $amount - value )
2020-01-24 11:57:52 -05:00
} else {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Can't new-estimation bump new claiming tx, amount {} is too small " , $amount ) ;
2020-01-24 11:57:52 -05:00
return None ;
}
// ...else just increase the previous feerate by 25% (because that's a nice number)
} else {
2020-06-15 17:28:01 -04:00
let fee = $old_feerate as u64 * ( $predicted_weight as u64 ) / 750 ;
2020-01-24 11:57:52 -05:00
if $amount < = fee {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Can't 25% bump new claiming tx, amount {} is too small " , $amount ) ;
2020-01-24 11:57:52 -05:00
return None ;
}
fee
} ;
2020-06-15 17:28:01 -04:00
let previous_fee = $old_feerate as u64 * ( $predicted_weight as u64 ) / 1000 ;
let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * ( $predicted_weight as u64 ) / 1000 ;
2020-01-24 11:57:52 -05:00
// BIP 125 Opt-in Full Replace-by-Fee Signaling
// * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
// * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
let new_fee = if new_fee < previous_fee + min_relay_fee {
new_fee + previous_fee + min_relay_fee - new_fee
} else {
new_fee
} ;
2020-06-15 17:28:01 -04:00
Some ( ( new_fee , new_fee * 1000 / ( $predicted_weight as u64 ) ) )
2020-01-24 11:57:52 -05:00
}
}
}
2020-02-18 13:11:56 -05:00
// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
2020-03-03 18:51:50 -05:00
let new_timer = Some ( Self ::get_height_timer ( height , cached_claim_datas . soonest_timelock ) ) ;
2020-01-24 11:57:52 -05:00
let mut inputs_witnesses_weight = 0 ;
let mut amt = 0 ;
2020-03-03 18:51:50 -05:00
let mut dynamic_fee = true ;
2020-01-24 11:57:52 -05:00
for per_outp_material in cached_claim_datas . per_input_material . values ( ) {
match per_outp_material {
2020-03-23 23:28:00 -04:00
& InputMaterial ::Revoked { ref input_descriptor , ref amount , .. } = > {
inputs_witnesses_weight + = Self ::get_witnesses_weight ( & [ * input_descriptor ] ) ;
2020-01-24 11:57:52 -05:00
amt + = * amount ;
} ,
2020-09-06 19:51:21 -04:00
& InputMaterial ::CounterpartyHTLC { ref preimage , ref htlc , .. } = > {
2020-01-24 11:57:52 -05:00
inputs_witnesses_weight + = Self ::get_witnesses_weight ( if preimage . is_some ( ) { & [ InputDescriptors ::OfferedHTLC ] } else { & [ InputDescriptors ::ReceivedHTLC ] } ) ;
2020-05-18 18:37:06 -04:00
amt + = htlc . amount_msat / 1000 ;
2020-01-24 11:57:52 -05:00
} ,
2020-09-06 19:51:21 -04:00
& InputMaterial ::HolderHTLC { .. } = > {
2020-03-09 18:15:35 -04:00
dynamic_fee = false ;
} ,
2020-03-03 18:51:50 -05:00
& InputMaterial ::Funding { .. } = > {
dynamic_fee = false ;
2020-01-24 11:57:52 -05:00
}
2020-03-03 18:51:50 -05:00
}
2020-01-24 11:57:52 -05:00
}
2020-03-03 18:51:50 -05:00
if dynamic_fee {
2020-06-15 17:28:01 -04:00
let predicted_weight = ( bumped_tx . get_weight ( ) + inputs_witnesses_weight ) as u64 ;
2020-03-03 18:51:50 -05:00
let mut new_feerate ;
// If old feerate is 0, first iteration of this claim, use normal fee calculation
if cached_claim_datas . feerate_previous ! = 0 {
2020-06-15 17:28:01 -04:00
if let Some ( ( new_fee , feerate ) ) = RBF_bump! ( amt , cached_claim_datas . feerate_previous , fee_estimator , predicted_weight ) {
2020-03-03 18:51:50 -05:00
// If new computed fee is superior at the whole claimable amount burn all in fees
2020-06-15 17:28:01 -04:00
if new_fee as u64 > amt {
2020-03-03 18:51:50 -05:00
bumped_tx . output [ 0 ] . value = 0 ;
2020-01-24 11:57:52 -05:00
} else {
2020-06-15 17:28:01 -04:00
bumped_tx . output [ 0 ] . value = amt - new_fee as u64 ;
2020-01-24 11:57:52 -05:00
}
2020-03-03 18:51:50 -05:00
new_feerate = feerate ;
} else { return None ; }
} else {
2020-03-02 12:55:53 -05:00
if subtract_high_prio_fee! ( logger , fee_estimator , amt , predicted_weight , new_feerate ) {
2020-03-03 18:51:50 -05:00
bumped_tx . output [ 0 ] . value = amt ;
} else { return None ; }
}
assert! ( new_feerate ! = 0 ) ;
for ( i , ( outp , per_outp_material ) ) in cached_claim_datas . per_input_material . iter ( ) . enumerate ( ) {
match per_outp_material {
2020-09-06 19:51:21 -04:00
& InputMaterial ::Revoked { ref per_commitment_point , ref counterparty_delayed_payment_base_key , ref counterparty_htlc_base_key , ref per_commitment_key , ref input_descriptor , ref amount , ref htlc , ref on_counterparty_tx_csv } = > {
if let Ok ( chan_keys ) = TxCreationKeys ::derive_new ( & self . secp_ctx , & per_commitment_point , counterparty_delayed_payment_base_key , counterparty_htlc_base_key , & self . key_storage . pubkeys ( ) . revocation_basepoint , & self . key_storage . pubkeys ( ) . htlc_basepoint ) {
2020-03-23 21:50:23 -04:00
2020-05-18 18:37:06 -04:00
let witness_script = if let Some ( ref htlc ) = * htlc {
2020-05-28 20:32:46 -04:00
chan_utils ::get_htlc_redeemscript_with_explicit_keys ( & htlc , & chan_keys . broadcaster_htlc_key , & chan_keys . countersignatory_htlc_key , & chan_keys . revocation_key )
2020-03-23 21:50:23 -04:00
} else {
2020-09-06 19:51:21 -04:00
chan_utils ::get_revokeable_redeemscript ( & chan_keys . revocation_key , * on_counterparty_tx_csv , & chan_keys . broadcaster_delayed_payment_key )
2020-03-23 21:50:23 -04:00
} ;
2020-07-23 06:54:49 -07:00
if let Ok ( sig ) = self . key_storage . sign_justice_transaction ( & bumped_tx , i , * amount , & per_commitment_key , htlc , & self . secp_ctx ) {
2020-03-24 14:22:16 -04:00
bumped_tx . input [ i ] . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
bumped_tx . input [ i ] . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
2020-05-18 18:37:06 -04:00
if htlc . is_some ( ) {
2020-03-24 14:22:16 -04:00
bumped_tx . input [ i ] . witness . push ( chan_keys . revocation_key . clone ( ) . serialize ( ) . to_vec ( ) ) ;
} else {
bumped_tx . input [ i ] . witness . push ( vec! ( 1 ) ) ;
}
bumped_tx . input [ i ] . witness . push ( witness_script . clone ( ) . into_bytes ( ) ) ;
} else { return None ; }
//TODO: panic ?
2020-09-06 19:51:21 -04:00
log_trace! ( logger , " Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}... " , bumped_tx . txid ( ) , if * input_descriptor = = InputDescriptors ::RevokedOutput { " to_holder " } else if * input_descriptor = = InputDescriptors ::RevokedOfferedHTLC { " offered " } else if * input_descriptor = = InputDescriptors ::RevokedReceivedHTLC { " received " } else { " " } , outp . vout , outp . txid , new_feerate ) ;
2020-03-03 18:51:50 -05:00
}
} ,
2020-09-06 19:51:21 -04:00
& InputMaterial ::CounterpartyHTLC { ref per_commitment_point , ref counterparty_delayed_payment_base_key , ref counterparty_htlc_base_key , ref preimage , ref htlc } = > {
if let Ok ( chan_keys ) = TxCreationKeys ::derive_new ( & self . secp_ctx , & per_commitment_point , counterparty_delayed_payment_base_key , counterparty_htlc_base_key , & self . key_storage . pubkeys ( ) . revocation_basepoint , & self . key_storage . pubkeys ( ) . htlc_basepoint ) {
2020-05-28 20:32:46 -04:00
let witness_script = chan_utils ::get_htlc_redeemscript_with_explicit_keys ( & htlc , & chan_keys . broadcaster_htlc_key , & chan_keys . countersignatory_htlc_key , & chan_keys . revocation_key ) ;
2020-03-24 14:47:37 -04:00
2020-05-18 18:37:06 -04:00
if ! preimage . is_some ( ) { bumped_tx . lock_time = htlc . cltv_expiry } ; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
2020-08-31 15:31:19 -04:00
if let Ok ( sig ) = self . key_storage . sign_counterparty_htlc_transaction ( & bumped_tx , i , & htlc . amount_msat / 1000 , & per_commitment_point , htlc , & self . secp_ctx ) {
2020-03-24 15:04:36 -04:00
bumped_tx . input [ i ] . witness . push ( sig . serialize_der ( ) . to_vec ( ) ) ;
bumped_tx . input [ i ] . witness [ 0 ] . push ( SigHashType ::All as u8 ) ;
if let & Some ( preimage ) = preimage {
bumped_tx . input [ i ] . witness . push ( preimage . 0. to_vec ( ) ) ;
} else {
// Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
bumped_tx . input [ i ] . witness . push ( vec! [ ] ) ;
}
bumped_tx . input [ i ] . witness . push ( witness_script . clone ( ) . into_bytes ( ) ) ;
2020-03-24 14:47:37 -04:00
}
2020-09-06 19:51:21 -04:00
log_trace! ( logger , " Going to broadcast Claim Transaction {} claiming counterparty {} htlc output {} from {} with new feerate {}... " , bumped_tx . txid ( ) , if preimage . is_some ( ) { " offered " } else { " received " } , outp . vout , outp . txid , new_feerate ) ;
2020-03-03 18:51:50 -05:00
}
} ,
_ = > unreachable! ( )
}
}
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...with timer {} " , new_timer . unwrap ( ) ) ;
2020-06-15 17:28:01 -04:00
assert! ( predicted_weight > = bumped_tx . get_weight ( ) as u64 ) ;
return Some ( ( new_timer , new_feerate as u32 , bumped_tx ) )
2020-03-03 18:51:50 -05:00
} else {
for ( _ , ( outp , per_outp_material ) ) in cached_claim_datas . per_input_material . iter ( ) . enumerate ( ) {
match per_outp_material {
2020-09-06 19:51:21 -04:00
& InputMaterial ::HolderHTLC { ref preimage , ref amount } = > {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let htlc_tx = self . get_fully_signed_htlc_tx ( outp , preimage ) ;
2020-03-09 18:15:35 -04:00
if let Some ( htlc_tx ) = htlc_tx {
let feerate = ( amount - htlc_tx . output [ 0 ] . value ) * 1000 / htlc_tx . get_weight ( ) as u64 ;
// Timer set to $NEVER given we can't bump tx without anchor outputs
2020-09-06 19:51:21 -04:00
log_trace! ( logger , " Going to broadcast Holder HTLC-{} claiming HTLC output {} from {}... " , if preimage . is_some ( ) { " Success " } else { " Timeout " } , outp . vout , outp . txid ) ;
2020-06-15 17:28:01 -04:00
return Some ( ( None , feerate as u32 , htlc_tx ) ) ;
2020-03-09 18:15:35 -04:00
}
2020-03-03 18:51:50 -05:00
return None ;
} ,
2020-04-19 17:26:41 -04:00
& InputMaterial ::Funding { ref funding_redeemscript } = > {
2020-09-06 19:51:21 -04:00
let signed_tx = self . get_fully_signed_holder_tx ( funding_redeemscript ) . unwrap ( ) ;
2020-04-06 18:32:57 -04:00
// Timer set to $NEVER given we can't bump tx without anchor outputs
2020-09-06 19:51:21 -04:00
log_trace! ( logger , " Going to broadcast Holder Transaction {} claiming funding output {} from {}... " , signed_tx . txid ( ) , outp . vout , outp . txid ) ;
return Some ( ( None , self . holder_commitment . as_ref ( ) . unwrap ( ) . feerate_per_kw , signed_tx ) ) ;
2020-01-24 11:57:52 -05:00
}
2020-03-03 18:51:50 -05:00
_ = > unreachable! ( )
2020-01-24 11:57:52 -05:00
}
}
}
2020-03-03 18:51:50 -05:00
None
2020-01-24 11:57:52 -05:00
}
2020-08-07 15:13:57 -07:00
pub ( crate ) fn block_connected < B : Deref , F : Deref , L : Deref > ( & mut self , txn_matched : & [ & Transaction ] , claimable_outpoints : Vec < ClaimRequest > , height : u32 , broadcaster : B , fee_estimator : F , logger : L )
2020-01-24 11:57:52 -05:00
where B ::Target : BroadcasterInterface ,
2020-03-02 12:55:53 -05:00
F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-01-24 11:57:52 -05:00
{
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Block at height {} connected with {} claim requests " , height , claimable_outpoints . len ( ) ) ;
2020-01-24 11:57:52 -05:00
let mut new_claims = Vec ::new ( ) ;
let mut aggregated_claim = HashMap ::new ( ) ;
let mut aggregated_soonest = ::std ::u32 ::MAX ;
2020-03-04 17:53:16 -05:00
// Try to aggregate outputs if their timelock expiration isn't imminent (absolute_timelock
// <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
2020-03-04 17:27:03 -05:00
for req in claimable_outpoints {
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
2020-03-02 12:55:53 -05:00
if let Some ( _ ) = self . claimable_outpoints . get ( & req . outpoint ) { log_trace! ( logger , " Bouncing off outpoint {}:{}, already registered its claiming request " , req . outpoint . txid , req . outpoint . vout ) ; } else {
log_trace! ( logger , " Test if outpoint can be aggregated with expiration {} against {} " , req . absolute_timelock , height + CLTV_SHARED_CLAIM_BUFFER ) ;
2020-03-04 17:27:03 -05:00
if req . absolute_timelock < = height + CLTV_SHARED_CLAIM_BUFFER | | ! req . aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
let mut single_input = HashMap ::new ( ) ;
single_input . insert ( req . outpoint , req . witness_data ) ;
new_claims . push ( ( req . absolute_timelock , single_input ) ) ;
} else {
aggregated_claim . insert ( req . outpoint , req . witness_data ) ;
if req . absolute_timelock < aggregated_soonest {
aggregated_soonest = req . absolute_timelock ;
2020-01-24 11:57:52 -05:00
}
}
}
}
new_claims . push ( ( aggregated_soonest , aggregated_claim ) ) ;
// Generate claim transactions and track them to bump if necessary at
// height timer expiration (i.e in how many blocks we're going to take action).
2020-04-19 01:16:20 -04:00
for ( soonest_timelock , claim ) in new_claims . drain ( .. ) {
let mut claim_material = ClaimTxBumpMaterial { height_timer : None , feerate_previous : 0 , soonest_timelock , per_input_material : claim } ;
2020-03-02 12:55:53 -05:00
if let Some ( ( new_timer , new_feerate , tx ) ) = self . generate_claim_tx ( height , & claim_material , & * fee_estimator , & * logger ) {
2020-01-24 11:57:52 -05:00
claim_material . height_timer = new_timer ;
claim_material . feerate_previous = new_feerate ;
let txid = tx . txid ( ) ;
2020-04-19 01:16:20 -04:00
for k in claim_material . per_input_material . keys ( ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Registering claiming request for {}:{} " , k . txid , k . vout ) ;
2020-01-24 11:57:52 -05:00
self . claimable_outpoints . insert ( k . clone ( ) , ( txid , height ) ) ;
}
2020-04-19 01:16:20 -04:00
self . pending_claim_requests . insert ( txid , claim_material ) ;
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Broadcast onchain {} " , log_tx! ( tx ) ) ;
2020-01-24 11:57:52 -05:00
broadcaster . broadcast_transaction ( & tx ) ;
}
}
2020-03-09 18:15:35 -04:00
let mut bump_candidates = HashMap ::new ( ) ;
2020-08-07 15:13:57 -07:00
for tx in txn_matched {
2020-01-24 11:57:52 -05:00
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec ::new ( ) ;
for inp in & tx . input {
if let Some ( first_claim_txid_height ) = self . claimable_outpoints . get ( & inp . previous_output ) {
// If outpoint has claim request pending on it...
if let Some ( claim_material ) = self . pending_claim_requests . get_mut ( & first_claim_txid_height . 0 ) {
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
let mut set_equality = true ;
if claim_material . per_input_material . len ( ) ! = tx . input . len ( ) {
set_equality = false ;
} else {
for ( claim_inp , tx_inp ) in claim_material . per_input_material . keys ( ) . zip ( tx . input . iter ( ) ) {
if * claim_inp ! = tx_inp . previous_output {
set_equality = false ;
}
}
}
macro_rules ! clean_claim_request_after_safety_delay {
( ) = > {
let new_event = OnchainEvent ::Claim { claim_request : first_claim_txid_height . 0. clone ( ) } ;
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
hash_map ::Entry ::Occupied ( mut entry ) = > {
if ! entry . get ( ) . contains ( & new_event ) {
entry . get_mut ( ) . push ( new_event ) ;
}
} ,
hash_map ::Entry ::Vacant ( entry ) = > {
entry . insert ( vec! [ new_event ] ) ;
}
}
}
}
// If this is our transaction (or our counterparty spent all the outputs
// before we could anyway with same inputs order than us), wait for
// ANTI_REORG_DELAY and clean the RBF tracking map.
if set_equality {
clean_claim_request_after_safety_delay! ( ) ;
} else { // If false, generate new claim request with update outpoint set
2020-03-11 15:10:29 -04:00
let mut at_least_one_drop = false ;
2020-01-24 11:57:52 -05:00
for input in tx . input . iter ( ) {
if let Some ( input_material ) = claim_material . per_input_material . remove ( & input . previous_output ) {
claimed_outputs_material . push ( ( input . previous_output , input_material ) ) ;
2020-03-11 15:10:29 -04:00
at_least_one_drop = true ;
2020-01-24 11:57:52 -05:00
}
// If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
if claim_material . per_input_material . is_empty ( ) {
clean_claim_request_after_safety_delay! ( ) ;
}
}
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
2020-03-11 15:10:29 -04:00
if at_least_one_drop {
2020-03-09 18:15:35 -04:00
bump_candidates . insert ( first_claim_txid_height . 0. clone ( ) , claim_material . clone ( ) ) ;
2020-03-11 15:10:29 -04:00
}
2020-01-24 11:57:52 -05:00
}
break ; //No need to iterate further, either tx is our or their
} else {
panic! ( " Inconsistencies between pending_claim_requests map and claimable_outpoints map " ) ;
}
}
}
for ( outpoint , input_material ) in claimed_outputs_material . drain ( .. ) {
let new_event = OnchainEvent ::ContentiousOutpoint { outpoint , input_material } ;
match self . onchain_events_waiting_threshold_conf . entry ( height + ANTI_REORG_DELAY - 1 ) {
hash_map ::Entry ::Occupied ( mut entry ) = > {
if ! entry . get ( ) . contains ( & new_event ) {
entry . get_mut ( ) . push ( new_event ) ;
}
} ,
hash_map ::Entry ::Vacant ( entry ) = > {
entry . insert ( vec! [ new_event ] ) ;
}
}
}
}
// After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
if let Some ( events ) = self . onchain_events_waiting_threshold_conf . remove ( & height ) {
for ev in events {
match ev {
OnchainEvent ::Claim { claim_request } = > {
// We may remove a whole set of claim outpoints here, as these one may have
// been aggregated in a single tx and claimed so atomically
if let Some ( bump_material ) = self . pending_claim_requests . remove ( & claim_request ) {
for outpoint in bump_material . per_input_material . keys ( ) {
self . claimable_outpoints . remove ( & outpoint ) ;
}
}
} ,
OnchainEvent ::ContentiousOutpoint { outpoint , .. } = > {
self . claimable_outpoints . remove ( & outpoint ) ;
}
}
}
}
// Check if any pending claim request must be rescheduled
for ( first_claim_txid , ref claim_data ) in self . pending_claim_requests . iter ( ) {
2020-03-03 18:51:50 -05:00
if let Some ( h ) = claim_data . height_timer {
if h = = height {
2020-03-09 18:15:35 -04:00
bump_candidates . insert ( * first_claim_txid , ( * claim_data ) . clone ( ) ) ;
2020-03-03 18:51:50 -05:00
}
2020-01-24 11:57:52 -05:00
}
}
// Build, bump and rebroadcast tx accordingly
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Bumping {} candidates " , bump_candidates . len ( ) ) ;
2020-03-09 18:15:35 -04:00
for ( first_claim_txid , claim_material ) in bump_candidates . iter ( ) {
2020-03-02 12:55:53 -05:00
if let Some ( ( new_timer , new_feerate , bump_tx ) ) = self . generate_claim_tx ( height , & claim_material , & * fee_estimator , & * logger ) {
log_trace! ( logger , " Broadcast onchain {} " , log_tx! ( bump_tx ) ) ;
2020-03-09 18:15:35 -04:00
broadcaster . broadcast_transaction ( & bump_tx ) ;
2020-04-17 19:31:24 -04:00
if let Some ( claim_material ) = self . pending_claim_requests . get_mut ( first_claim_txid ) {
claim_material . height_timer = new_timer ;
claim_material . feerate_previous = new_feerate ;
}
2020-01-24 11:57:52 -05:00
}
}
}
2020-08-07 10:58:15 -07:00
pub ( crate ) fn block_disconnected < B : Deref , F : Deref , L : Deref > ( & mut self , height : u32 , broadcaster : B , fee_estimator : F , logger : L )
2020-01-24 11:57:52 -05:00
where B ::Target : BroadcasterInterface ,
2020-03-02 12:55:53 -05:00
F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-01-24 11:57:52 -05:00
{
let mut bump_candidates = HashMap ::new ( ) ;
if let Some ( events ) = self . onchain_events_waiting_threshold_conf . remove ( & ( height + ANTI_REORG_DELAY - 1 ) ) {
//- our claim tx on a commitment tx output
//- resurect outpoint back in its claimable set and regenerate tx
for ev in events {
match ev {
OnchainEvent ::ContentiousOutpoint { outpoint , input_material } = > {
if let Some ( ancestor_claimable_txid ) = self . claimable_outpoints . get ( & outpoint ) {
if let Some ( claim_material ) = self . pending_claim_requests . get_mut ( & ancestor_claimable_txid . 0 ) {
claim_material . per_input_material . insert ( outpoint , input_material ) ;
// Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast
bump_candidates . insert ( ancestor_claimable_txid . clone ( ) , claim_material . clone ( ) ) ;
}
}
} ,
_ = > { } ,
}
}
}
for ( _ , claim_material ) in bump_candidates . iter_mut ( ) {
2020-03-02 12:55:53 -05:00
if let Some ( ( new_timer , new_feerate , bump_tx ) ) = self . generate_claim_tx ( height , & claim_material , & * fee_estimator , & * logger ) {
2020-01-24 11:57:52 -05:00
claim_material . height_timer = new_timer ;
claim_material . feerate_previous = new_feerate ;
broadcaster . broadcast_transaction ( & bump_tx ) ;
}
}
for ( ancestor_claim_txid , claim_material ) in bump_candidates . drain ( ) {
self . pending_claim_requests . insert ( ancestor_claim_txid . 0 , claim_material ) ;
}
//TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
// right now if one of the outpoint get disconnected, just erase whole pending claim request.
let mut remove_request = Vec ::new ( ) ;
self . claimable_outpoints . retain ( | _ , ref v |
if v . 1 = = height {
remove_request . push ( v . 0. clone ( ) ) ;
false
} else { true } ) ;
for req in remove_request {
self . pending_claim_requests . remove ( & req ) ;
}
}
2020-03-20 20:26:23 -04:00
2020-08-07 10:58:15 -07:00
pub ( crate ) fn provide_latest_holder_tx ( & mut self , tx : HolderCommitmentTransaction ) {
2020-09-06 19:51:21 -04:00
self . prev_holder_commitment = self . holder_commitment . take ( ) ;
self . holder_commitment = Some ( tx ) ;
2020-03-20 20:26:23 -04:00
}
2020-03-20 22:41:12 -04:00
2020-09-06 19:51:21 -04:00
fn sign_latest_holder_htlcs ( & mut self ) {
if let Some ( ref holder_commitment ) = self . holder_commitment {
if let Ok ( sigs ) = self . key_storage . sign_holder_commitment_htlc_transactions ( holder_commitment , & self . secp_ctx ) {
self . holder_htlc_sigs = Some ( Vec ::new ( ) ) ;
let ret = self . holder_htlc_sigs . as_mut ( ) . unwrap ( ) ;
for ( htlc_idx , ( holder_sig , & ( ref htlc , _ ) ) ) in sigs . iter ( ) . zip ( holder_commitment . per_htlc . iter ( ) ) . enumerate ( ) {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
if let Some ( tx_idx ) = htlc . transaction_output_index {
if ret . len ( ) < = tx_idx as usize { ret . resize ( tx_idx as usize + 1 , None ) ; }
2020-09-06 19:51:21 -04:00
ret [ tx_idx as usize ] = Some ( ( htlc_idx , holder_sig . expect ( " Did not receive a signature for a non-dust HTLC " ) ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
} else {
2020-09-06 19:51:21 -04:00
assert! ( holder_sig . is_none ( ) , " Received a signature for a dust HTLC " ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
}
}
}
}
2020-09-06 19:51:21 -04:00
fn sign_prev_holder_htlcs ( & mut self ) {
if let Some ( ref holder_commitment ) = self . prev_holder_commitment {
if let Ok ( sigs ) = self . key_storage . sign_holder_commitment_htlc_transactions ( holder_commitment , & self . secp_ctx ) {
self . prev_holder_htlc_sigs = Some ( Vec ::new ( ) ) ;
let ret = self . prev_holder_htlc_sigs . as_mut ( ) . unwrap ( ) ;
for ( htlc_idx , ( holder_sig , & ( ref htlc , _ ) ) ) in sigs . iter ( ) . zip ( holder_commitment . per_htlc . iter ( ) ) . enumerate ( ) {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
if let Some ( tx_idx ) = htlc . transaction_output_index {
if ret . len ( ) < = tx_idx as usize { ret . resize ( tx_idx as usize + 1 , None ) ; }
2020-09-06 19:51:21 -04:00
ret [ tx_idx as usize ] = Some ( ( htlc_idx , holder_sig . expect ( " Did not receive a signature for a non-dust HTLC " ) ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
} else {
2020-09-06 19:51:21 -04:00
assert! ( holder_sig . is_none ( ) , " Received a signature for a dust HTLC " ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
}
}
}
}
2020-09-06 19:51:21 -04:00
//TODO: getting lastest holder transactions should be infaillible and result in us "force-closing the channel", but we may
// have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
2020-04-06 18:32:57 -04:00
// before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
// to monitor before.
2020-08-07 10:58:15 -07:00
pub ( crate ) fn get_fully_signed_holder_tx ( & mut self , funding_redeemscript : & Script ) -> Option < Transaction > {
2020-09-06 19:51:21 -04:00
if let Some ( ref mut holder_commitment ) = self . holder_commitment {
match self . key_storage . sign_holder_commitment ( holder_commitment , & self . secp_ctx ) {
2020-09-06 20:07:11 -04:00
Ok ( sig ) = > Some ( holder_commitment . add_holder_sig ( funding_redeemscript , sig ) ) ,
2020-04-19 17:26:41 -04:00
Err ( _ ) = > return None ,
}
2020-04-23 15:43:21 -04:00
} else {
None
2020-03-20 22:41:12 -04:00
}
}
2020-07-03 13:41:21 -07:00
#[ cfg(any(test, feature= " unsafe_revoked_tx_signing " )) ]
2020-08-07 10:58:15 -07:00
pub ( crate ) fn get_fully_signed_copy_holder_tx ( & mut self , funding_redeemscript : & Script ) -> Option < Transaction > {
2020-09-06 19:51:21 -04:00
if let Some ( ref mut holder_commitment ) = self . holder_commitment {
let holder_commitment = holder_commitment . clone ( ) ;
match self . key_storage . sign_holder_commitment ( & holder_commitment , & self . secp_ctx ) {
2020-09-06 20:07:11 -04:00
Ok ( sig ) = > Some ( holder_commitment . add_holder_sig ( funding_redeemscript , sig ) ) ,
2020-04-19 17:26:41 -04:00
Err ( _ ) = > return None ,
}
2020-04-23 15:43:21 -04:00
} else {
None
2020-03-20 22:41:12 -04:00
}
}
2020-03-23 01:30:48 -04:00
2020-08-07 10:58:15 -07:00
pub ( crate ) fn get_fully_signed_htlc_tx ( & mut self , outp : & ::bitcoin ::OutPoint , preimage : & Option < PaymentPreimage > ) -> Option < Transaction > {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let mut htlc_tx = None ;
2020-09-06 19:51:21 -04:00
if self . holder_commitment . is_some ( ) {
let commitment_txid = self . holder_commitment . as_ref ( ) . unwrap ( ) . txid ( ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
if commitment_txid = = outp . txid {
2020-09-06 19:51:21 -04:00
self . sign_latest_holder_htlcs ( ) ;
if let & Some ( ref htlc_sigs ) = & self . holder_htlc_sigs {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let & ( ref htlc_idx , ref htlc_sig ) = htlc_sigs [ outp . vout as usize ] . as_ref ( ) . unwrap ( ) ;
2020-09-06 19:51:21 -04:00
htlc_tx = Some ( self . holder_commitment . as_ref ( ) . unwrap ( )
. get_signed_htlc_tx ( * htlc_idx , htlc_sig , preimage , self . on_holder_tx_csv ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
2020-03-23 01:30:48 -04:00
}
}
2020-09-06 19:51:21 -04:00
if self . prev_holder_commitment . is_some ( ) {
let commitment_txid = self . prev_holder_commitment . as_ref ( ) . unwrap ( ) . txid ( ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
if commitment_txid = = outp . txid {
2020-09-06 19:51:21 -04:00
self . sign_prev_holder_htlcs ( ) ;
if let & Some ( ref htlc_sigs ) = & self . prev_holder_htlc_sigs {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let & ( ref htlc_idx , ref htlc_sig ) = htlc_sigs [ outp . vout as usize ] . as_ref ( ) . unwrap ( ) ;
2020-09-06 19:51:21 -04:00
htlc_tx = Some ( self . prev_holder_commitment . as_ref ( ) . unwrap ( )
. get_signed_htlc_tx ( * htlc_idx , htlc_sig , preimage , self . on_holder_tx_csv ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
}
}
htlc_tx
}
2020-07-03 13:41:21 -07:00
#[ cfg(any(test,feature = " unsafe_revoked_tx_signing " )) ]
2020-08-07 10:58:15 -07:00
pub ( crate ) fn unsafe_get_fully_signed_htlc_tx ( & mut self , outp : & ::bitcoin ::OutPoint , preimage : & Option < PaymentPreimage > ) -> Option < Transaction > {
2020-09-06 19:51:21 -04:00
let latest_had_sigs = self . holder_htlc_sigs . is_some ( ) ;
let prev_had_sigs = self . prev_holder_htlc_sigs . is_some ( ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let ret = self . get_fully_signed_htlc_tx ( outp , preimage ) ;
if ! latest_had_sigs {
2020-09-06 19:51:21 -04:00
self . holder_htlc_sigs = None ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
if ! prev_had_sigs {
2020-09-06 19:51:21 -04:00
self . prev_holder_htlc_sigs = None ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
ret
2020-03-23 01:30:48 -04:00
}
2020-01-24 11:57:52 -05:00
}