2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2020-01-24 11:57:52 -05:00
//! The logic to build claims and bump in-flight transactions until confirmations.
//!
2020-10-15 13:45:18 +02:00
//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
2020-01-24 11:57:52 -05:00
//! building, tracking, bumping and notifications functions.
2021-05-20 12:44:41 -04:00
use bitcoin ::blockdata ::transaction ::Transaction ;
2020-01-24 11:57:52 -05:00
use bitcoin ::blockdata ::transaction ::OutPoint as BitcoinOutPoint ;
use bitcoin ::blockdata ::script ::Script ;
2020-04-27 17:53:13 +02:00
use bitcoin ::hash_types ::Txid ;
2020-01-24 11:57:52 -05:00
2020-04-27 16:51:59 +02:00
use bitcoin ::secp256k1 ::{ Secp256k1 , Signature } ;
use bitcoin ::secp256k1 ;
2020-01-24 11:57:52 -05:00
use ln ::msgs ::DecodeError ;
2021-04-28 17:28:10 -04:00
use ln ::PaymentPreimage ;
2021-05-20 12:44:41 -04:00
use ln ::chan_utils ::{ ChannelTransactionParameters , HolderCommitmentTransaction } ;
use chain ::chaininterface ::{ FeeEstimator , BroadcasterInterface } ;
use chain ::channelmonitor ::{ ANTI_REORG_DELAY , CLTV_SHARED_CLAIM_BUFFER } ;
2021-02-16 16:30:08 -05:00
use chain ::keysinterface ::{ Sign , KeysInterface } ;
2021-04-18 19:18:30 -04:00
use chain ::package ::PackageTemplate ;
2020-01-24 11:57:52 -05:00
use util ::logger ::Logger ;
2020-11-25 16:18:12 -05:00
use util ::ser ::{ Readable , ReadableArgs , Writer , Writeable , VecWriter } ;
2020-01-24 11:57:52 -05:00
use util ::byte_utils ;
2021-05-19 04:21:39 +00:00
use prelude ::* ;
2021-05-19 21:47:42 +00:00
use alloc ::collections ::BTreeMap ;
2021-05-23 23:22:46 +00:00
use core ::cmp ;
use core ::ops ::Deref ;
use core ::mem ::replace ;
2020-01-24 11:57:52 -05:00
const MAX_ALLOC_SIZE : usize = 64 * 1024 ;
2021-03-31 13:54:01 -04:00
/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
/// transaction causing it.
2021-03-31 13:23:57 -04:00
///
/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
#[ derive(PartialEq) ]
struct OnchainEventEntry {
2021-03-31 13:54:01 -04:00
txid : Txid ,
2021-03-31 13:23:57 -04:00
height : u32 ,
event : OnchainEvent ,
}
impl OnchainEventEntry {
fn confirmation_threshold ( & self ) -> u32 {
self . height + ANTI_REORG_DELAY - 1
}
fn has_reached_confirmation_threshold ( & self , height : u32 ) -> bool {
2021-04-09 18:38:03 -07:00
height > = self . confirmation_threshold ( )
2021-03-31 13:23:57 -04:00
}
}
2020-01-24 11:57:52 -05:00
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
2021-03-31 13:23:57 -04:00
#[ derive(PartialEq) ]
2020-01-24 11:57:52 -05:00
enum OnchainEvent {
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
/// bump-txn candidate buffer.
Claim {
2020-04-27 17:53:13 +02:00
claim_request : Txid ,
2020-01-24 11:57:52 -05:00
} ,
2020-09-06 19:51:21 -04:00
/// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
2020-01-24 11:57:52 -05:00
/// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
/// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
ContentiousOutpoint {
2021-05-20 12:44:41 -04:00
package : PackageTemplate ,
2020-01-24 11:57:52 -05:00
}
}
2021-05-31 16:41:02 +00:00
impl_writeable_tlv_based! ( OnchainEventEntry , {
( 0 , txid ) ,
( 2 , height ) ,
( 4 , event ) ,
} , { } , { } ) ;
impl_writeable_tlv_based_enum! ( OnchainEvent ,
( 0 , Claim ) = > {
( 0 , claim_request ) ,
} , { } , { } ,
( 1 , ContentiousOutpoint ) = > {
( 0 , package ) ,
} , { } , { } ,
; ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
impl Readable for Option < Vec < Option < ( usize , Signature ) > > > {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
match Readable ::read ( reader ) ? {
0 u8 = > Ok ( None ) ,
1 u8 = > {
let vlen : u64 = Readable ::read ( reader ) ? ;
2021-05-23 23:22:46 +00:00
let mut ret = Vec ::with_capacity ( cmp ::min ( vlen as usize , MAX_ALLOC_SIZE / ::core ::mem ::size_of ::< Option < ( usize , Signature ) > > ( ) ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
for _ in 0 .. vlen {
ret . push ( match Readable ::read ( reader ) ? {
0 u8 = > None ,
1 u8 = > Some ( ( < u64 as Readable > ::read ( reader ) ? as usize , Readable ::read ( reader ) ? ) ) ,
_ = > return Err ( DecodeError ::InvalidValue )
} ) ;
}
Ok ( Some ( ret ) )
} ,
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
impl Writeable for Option < Vec < Option < ( usize , Signature ) > > > {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& Some ( ref vec ) = > {
1 u8 . write ( writer ) ? ;
( vec . len ( ) as u64 ) . write ( writer ) ? ;
for opt in vec . iter ( ) {
match opt {
& Some ( ( ref idx , ref sig ) ) = > {
1 u8 . write ( writer ) ? ;
( * idx as u64 ) . write ( writer ) ? ;
sig . write ( writer ) ? ;
} ,
& None = > 0 u8 . write ( writer ) ? ,
}
}
} ,
& None = > 0 u8 . write ( writer ) ? ,
}
Ok ( ( ) )
}
}
2020-01-24 11:57:52 -05:00
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible.
2021-02-18 21:05:47 -05:00
pub struct OnchainTxHandler < ChannelSigner : Sign > {
2020-01-24 11:57:52 -05:00
destination_script : Script ,
2021-01-14 13:45:31 -08:00
holder_commitment : HolderCommitmentTransaction ,
2020-09-06 19:51:21 -04:00
// holder_htlc_sigs and prev_holder_htlc_sigs are in the order as they appear in the commitment
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
// transaction outputs (hence the Option<>s inside the Vec). The first usize is the index in
2020-10-15 13:45:18 +02:00
// the set of HTLCs in the HolderCommitmentTransaction.
2020-09-06 19:51:21 -04:00
holder_htlc_sigs : Option < Vec < Option < ( usize , Signature ) > > > ,
2020-09-06 20:07:11 -04:00
prev_holder_commitment : Option < HolderCommitmentTransaction > ,
2020-09-06 19:51:21 -04:00
prev_holder_htlc_sigs : Option < Vec < Option < ( usize , Signature ) > > > ,
2020-01-24 11:57:52 -05:00
2020-06-15 20:11:01 -04:00
pub ( super ) signer : ChannelSigner ,
2020-10-15 13:45:18 +02:00
pub ( crate ) channel_transaction_parameters : ChannelTransactionParameters ,
2020-03-18 00:29:26 -04:00
2020-01-24 11:57:52 -05:00
// Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
// it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
// another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
// same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
// block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
// equality between spending transaction and claim request. If true, it means transaction was one our claiming one
// after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
// we need to regenerate new claim request with reduced set of still-claimable outpoints.
// Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
#[ cfg(test) ] // Used in functional_test to verify sanitization
2021-05-20 12:44:41 -04:00
pub ( crate ) pending_claim_requests : HashMap < Txid , PackageTemplate > ,
2020-01-24 11:57:52 -05:00
#[ cfg(not(test)) ]
2021-05-20 12:44:41 -04:00
pending_claim_requests : HashMap < Txid , PackageTemplate > ,
2020-01-24 11:57:52 -05:00
// Used to link outpoints claimed in a connected block to a pending claim request.
// Key is outpoint than monitor parsing has detected we have keys/scripts to claim
// Value is (pending claim request identifier, confirmation_block), identifier
// is txid of the initial claiming transaction and is immutable until outpoint is
// post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
// block with output gets disconnected.
#[ cfg(test) ] // Used in functional_test to verify sanitization
2020-04-27 17:53:13 +02:00
pub claimable_outpoints : HashMap < BitcoinOutPoint , ( Txid , u32 ) > ,
2020-01-24 11:57:52 -05:00
#[ cfg(not(test)) ]
2020-04-27 17:53:13 +02:00
claimable_outpoints : HashMap < BitcoinOutPoint , ( Txid , u32 ) > ,
2020-01-24 11:57:52 -05:00
2021-05-19 21:47:42 +00:00
locktimed_packages : BTreeMap < u32 , Vec < PackageTemplate > > ,
2020-01-24 11:57:52 -05:00
2021-05-19 21:47:42 +00:00
onchain_events_awaiting_threshold_conf : Vec < OnchainEventEntry > ,
2020-11-05 16:37:24 -05:00
2020-06-15 20:11:01 -04:00
pub ( super ) secp_ctx : Secp256k1 < secp256k1 ::All > ,
2020-01-24 11:57:52 -05:00
}
2021-05-05 22:56:42 +00:00
const SERIALIZATION_VERSION : u8 = 1 ;
const MIN_SERIALIZATION_VERSION : u8 = 1 ;
2021-02-18 21:05:47 -05:00
impl < ChannelSigner : Sign > OnchainTxHandler < ChannelSigner > {
2020-03-18 00:29:26 -04:00
pub ( crate ) fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
2021-05-05 22:56:42 +00:00
write_ver_prefix! ( writer , SERIALIZATION_VERSION , MIN_SERIALIZATION_VERSION ) ;
2020-01-24 11:57:52 -05:00
self . destination_script . write ( writer ) ? ;
2020-09-06 19:51:21 -04:00
self . holder_commitment . write ( writer ) ? ;
self . holder_htlc_sigs . write ( writer ) ? ;
self . prev_holder_commitment . write ( writer ) ? ;
self . prev_holder_htlc_sigs . write ( writer ) ? ;
2020-01-24 11:57:52 -05:00
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . write ( writer ) ? ;
2020-03-18 00:29:26 -04:00
2020-11-25 16:18:12 -05:00
let mut key_data = VecWriter ( Vec ::new ( ) ) ;
2021-02-18 21:32:58 -05:00
self . signer . write ( & mut key_data ) ? ;
2021-05-23 23:22:46 +00:00
assert! ( key_data . 0. len ( ) < core ::usize ::MAX ) ;
assert! ( key_data . 0. len ( ) < core ::u32 ::MAX as usize ) ;
2020-11-25 16:18:12 -05:00
( key_data . 0. len ( ) as u32 ) . write ( writer ) ? ;
writer . write_all ( & key_data . 0 [ .. ] ) ? ;
2020-01-24 11:57:52 -05:00
writer . write_all ( & byte_utils ::be64_to_array ( self . pending_claim_requests . len ( ) as u64 ) ) ? ;
2021-05-20 12:44:41 -04:00
for ( ref ancestor_claim_txid , request ) in self . pending_claim_requests . iter ( ) {
2020-01-24 11:57:52 -05:00
ancestor_claim_txid . write ( writer ) ? ;
2021-05-20 12:44:41 -04:00
request . write ( writer ) ? ;
2020-01-24 11:57:52 -05:00
}
writer . write_all ( & byte_utils ::be64_to_array ( self . claimable_outpoints . len ( ) as u64 ) ) ? ;
for ( ref outp , ref claim_and_height ) in self . claimable_outpoints . iter ( ) {
outp . write ( writer ) ? ;
claim_and_height . 0. write ( writer ) ? ;
claim_and_height . 1. write ( writer ) ? ;
}
2021-05-19 21:47:42 +00:00
writer . write_all ( & byte_utils ::be64_to_array ( self . locktimed_packages . len ( ) as u64 ) ) ? ;
for ( ref locktime , ref packages ) in self . locktimed_packages . iter ( ) {
locktime . write ( writer ) ? ;
writer . write_all ( & byte_utils ::be64_to_array ( packages . len ( ) as u64 ) ) ? ;
for ref package in packages . iter ( ) {
package . write ( writer ) ? ;
}
}
2021-04-12 11:47:23 -07:00
writer . write_all ( & byte_utils ::be64_to_array ( self . onchain_events_awaiting_threshold_conf . len ( ) as u64 ) ) ? ;
for ref entry in self . onchain_events_awaiting_threshold_conf . iter ( ) {
2021-05-31 16:41:02 +00:00
entry . write ( writer ) ? ;
2020-01-24 11:57:52 -05:00
}
2021-05-05 22:56:42 +00:00
2021-05-06 00:23:08 +00:00
write_tlv_fields! ( writer , { } , { } ) ;
2020-01-24 11:57:52 -05:00
Ok ( ( ) )
}
}
2021-02-16 16:30:08 -05:00
impl < ' a , K : KeysInterface > ReadableArgs < & ' a K > for OnchainTxHandler < K ::Signer > {
2020-11-25 16:18:12 -05:00
fn read < R : ::std ::io ::Read > ( reader : & mut R , keys_manager : & ' a K ) -> Result < Self , DecodeError > {
2021-05-05 22:56:42 +00:00
let _ver = read_ver_prefix! ( reader , SERIALIZATION_VERSION ) ;
2020-01-24 11:57:52 -05:00
let destination_script = Readable ::read ( reader ) ? ;
2020-03-09 18:15:35 -04:00
2020-09-06 19:51:21 -04:00
let holder_commitment = Readable ::read ( reader ) ? ;
let holder_htlc_sigs = Readable ::read ( reader ) ? ;
let prev_holder_commitment = Readable ::read ( reader ) ? ;
let prev_holder_htlc_sigs = Readable ::read ( reader ) ? ;
2020-01-24 11:57:52 -05:00
2020-10-15 13:45:18 +02:00
let channel_parameters = Readable ::read ( reader ) ? ;
2020-03-18 00:29:26 -04:00
2020-11-25 16:18:12 -05:00
let keys_len : u32 = Readable ::read ( reader ) ? ;
let mut keys_data = Vec ::with_capacity ( cmp ::min ( keys_len as usize , MAX_ALLOC_SIZE ) ) ;
while keys_data . len ( ) ! = keys_len as usize {
// Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
let mut data = [ 0 ; 1024 ] ;
let read_slice = & mut data [ 0 .. cmp ::min ( 1024 , keys_len as usize - keys_data . len ( ) ) ] ;
reader . read_exact ( read_slice ) ? ;
keys_data . extend_from_slice ( read_slice ) ;
}
2021-02-18 21:32:58 -05:00
let signer = keys_manager . read_chan_signer ( & keys_data ) ? ;
2020-11-25 16:18:12 -05:00
2020-01-24 11:57:52 -05:00
let pending_claim_requests_len : u64 = Readable ::read ( reader ) ? ;
let mut pending_claim_requests = HashMap ::with_capacity ( cmp ::min ( pending_claim_requests_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. pending_claim_requests_len {
pending_claim_requests . insert ( Readable ::read ( reader ) ? , Readable ::read ( reader ) ? ) ;
}
let claimable_outpoints_len : u64 = Readable ::read ( reader ) ? ;
let mut claimable_outpoints = HashMap ::with_capacity ( cmp ::min ( pending_claim_requests_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
for _ in 0 .. claimable_outpoints_len {
let outpoint = Readable ::read ( reader ) ? ;
let ancestor_claim_txid = Readable ::read ( reader ) ? ;
let height = Readable ::read ( reader ) ? ;
claimable_outpoints . insert ( outpoint , ( ancestor_claim_txid , height ) ) ;
}
2021-05-19 21:47:42 +00:00
let locktimed_packages_len : u64 = Readable ::read ( reader ) ? ;
let mut locktimed_packages = BTreeMap ::new ( ) ;
for _ in 0 .. locktimed_packages_len {
let locktime = Readable ::read ( reader ) ? ;
let packages_len : u64 = Readable ::read ( reader ) ? ;
let mut packages = Vec ::with_capacity ( cmp ::min ( packages_len as usize , MAX_ALLOC_SIZE / std ::mem ::size_of ::< PackageTemplate > ( ) ) ) ;
for _ in 0 .. packages_len {
packages . push ( Readable ::read ( reader ) ? ) ;
}
locktimed_packages . insert ( locktime , packages ) ;
}
2020-01-24 11:57:52 -05:00
let waiting_threshold_conf_len : u64 = Readable ::read ( reader ) ? ;
2021-04-12 11:47:23 -07:00
let mut onchain_events_awaiting_threshold_conf = Vec ::with_capacity ( cmp ::min ( waiting_threshold_conf_len as usize , MAX_ALLOC_SIZE / 128 ) ) ;
2020-01-24 11:57:52 -05:00
for _ in 0 .. waiting_threshold_conf_len {
2021-05-31 16:41:02 +00:00
onchain_events_awaiting_threshold_conf . push ( Readable ::read ( reader ) ? ) ;
2020-01-24 11:57:52 -05:00
}
2021-05-05 22:56:42 +00:00
read_tlv_fields! ( reader , { } , { } ) ;
2021-02-13 11:20:07 -05:00
let mut secp_ctx = Secp256k1 ::new ( ) ;
secp_ctx . seeded_randomize ( & keys_manager . get_secure_random_bytes ( ) ) ;
2020-01-24 11:57:52 -05:00
Ok ( OnchainTxHandler {
destination_script ,
2020-09-06 19:51:21 -04:00
holder_commitment ,
holder_htlc_sigs ,
prev_holder_commitment ,
prev_holder_htlc_sigs ,
2021-02-18 21:32:58 -05:00
signer ,
2020-10-15 13:45:18 +02:00
channel_transaction_parameters : channel_parameters ,
2020-01-24 11:57:52 -05:00
claimable_outpoints ,
2021-05-19 21:47:42 +00:00
locktimed_packages ,
2020-01-24 11:57:52 -05:00
pending_claim_requests ,
2021-04-12 11:47:23 -07:00
onchain_events_awaiting_threshold_conf ,
2021-02-13 11:20:07 -05:00
secp_ctx ,
2020-01-24 11:57:52 -05:00
} )
}
}
2021-02-18 21:05:47 -05:00
impl < ChannelSigner : Sign > OnchainTxHandler < ChannelSigner > {
2021-02-13 11:20:07 -05:00
pub ( crate ) fn new ( destination_script : Script , signer : ChannelSigner , channel_parameters : ChannelTransactionParameters , holder_commitment : HolderCommitmentTransaction , secp_ctx : Secp256k1 < secp256k1 ::All > ) -> Self {
2020-01-24 11:57:52 -05:00
OnchainTxHandler {
destination_script ,
2021-01-14 13:45:31 -08:00
holder_commitment ,
2020-09-06 19:51:21 -04:00
holder_htlc_sigs : None ,
prev_holder_commitment : None ,
prev_holder_htlc_sigs : None ,
2021-02-18 21:32:58 -05:00
signer ,
2020-10-15 13:45:18 +02:00
channel_transaction_parameters : channel_parameters ,
2020-01-24 11:57:52 -05:00
pending_claim_requests : HashMap ::new ( ) ,
claimable_outpoints : HashMap ::new ( ) ,
2021-05-19 21:47:42 +00:00
locktimed_packages : BTreeMap ::new ( ) ,
2021-04-12 11:47:23 -07:00
onchain_events_awaiting_threshold_conf : Vec ::new ( ) ,
2020-01-24 11:57:52 -05:00
2021-02-13 11:20:07 -05:00
secp_ctx ,
2020-01-24 11:57:52 -05:00
}
}
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
/// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
2021-01-05 11:50:54 -08:00
/// Panics if there are signing errors, because signing operations in reaction to on-chain events
/// are not expected to fail, and if they do, we may lose funds.
2021-05-20 12:44:41 -04:00
fn generate_claim_tx < F : Deref , L : Deref > ( & mut self , height : u32 , cached_request : & PackageTemplate , fee_estimator : & F , logger : & L ) -> Option < ( Option < u32 > , u64 , Transaction ) >
2020-03-02 12:55:53 -05:00
where F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-01-24 11:57:52 -05:00
{
2021-05-20 12:44:41 -04:00
if cached_request . outpoints ( ) . len ( ) = = 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
2020-01-24 11:57:52 -05:00
2020-02-18 13:11:56 -05:00
// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
2021-04-26 18:22:53 -04:00
let new_timer = Some ( cached_request . get_height_timer ( height ) ) ;
2021-05-20 12:44:41 -04:00
if cached_request . is_malleable ( ) {
let predicted_weight = cached_request . package_weight ( & self . destination_script ) ;
2021-05-26 15:47:29 +00:00
if let Some ( ( output_value , new_feerate ) ) = cached_request . compute_package_output ( predicted_weight , fee_estimator , logger ) {
2021-05-20 12:44:41 -04:00
assert! ( new_feerate ! = 0 ) ;
let transaction = cached_request . finalize_package ( self , output_value , self . destination_script . clone ( ) , logger ) . unwrap ( ) ;
log_trace! ( logger , " ...with timer {} and feerate {} " , new_timer . unwrap ( ) , new_feerate ) ;
assert! ( predicted_weight > = transaction . get_weight ( ) ) ;
return Some ( ( new_timer , new_feerate , transaction ) )
2020-03-03 18:51:50 -05:00
}
} else {
2021-05-20 12:44:41 -04:00
// Note: Currently, amounts of holder outputs spending witnesses aren't used
// as we can't malleate spending package to increase their feerate. This
// should change with the remaining anchor output patchset.
2021-05-26 15:47:29 +00:00
if let Some ( transaction ) = cached_request . finalize_package ( self , 0 , self . destination_script . clone ( ) , logger ) {
2021-05-20 12:44:41 -04:00
return Some ( ( None , 0 , transaction ) ) ;
2020-01-24 11:57:52 -05:00
}
}
2020-03-03 18:51:50 -05:00
None
2020-01-24 11:57:52 -05:00
}
2020-11-05 16:37:24 -05:00
/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
/// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
/// if we receive a preimage after force-close.
2021-05-19 21:47:30 +00:00
pub ( crate ) fn update_claims_view < B : Deref , F : Deref , L : Deref > ( & mut self , txn_matched : & [ & Transaction ] , requests : Vec < PackageTemplate > , height : u32 , broadcaster : & B , fee_estimator : & F , logger : & L )
2020-01-24 11:57:52 -05:00
where B ::Target : BroadcasterInterface ,
2020-03-02 12:55:53 -05:00
F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-01-24 11:57:52 -05:00
{
2021-05-20 12:44:41 -04:00
log_trace! ( logger , " Updating claims view at height {} with {} matched transactions and {} claim requests " , height , txn_matched . len ( ) , requests . len ( ) ) ;
let mut preprocessed_requests = Vec ::with_capacity ( requests . len ( ) ) ;
let mut aggregated_request = None ;
2020-01-24 11:57:52 -05:00
2021-05-20 12:44:41 -04:00
// Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
2020-03-04 17:53:16 -05:00
// <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
2021-05-20 12:44:41 -04:00
for req in requests {
2020-03-04 17:27:03 -05:00
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
2021-05-19 21:47:42 +00:00
if let Some ( _ ) = self . claimable_outpoints . get ( req . outpoints ( ) [ 0 ] ) {
log_trace! ( logger , " Ignoring second claim for outpoint {}:{}, already registered its claiming request " , req . outpoints ( ) [ 0 ] . txid , req . outpoints ( ) [ 0 ] . vout ) ;
} else {
let timelocked_equivalent_package = self . locktimed_packages . iter ( ) . map ( | v | v . 1. iter ( ) ) . flatten ( )
. find ( | locked_package | locked_package . outpoints ( ) = = req . outpoints ( ) ) ;
if let Some ( package ) = timelocked_equivalent_package {
log_trace! ( logger , " Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for. " ,
req . outpoints ( ) [ 0 ] . txid , req . outpoints ( ) [ 0 ] . vout , package . package_timelock ( ) ) ;
continue ;
}
if req . package_timelock ( ) > height + 1 {
log_debug! ( logger , " Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent: " , req . package_timelock ( ) , height ) ;
for outpoint in req . outpoints ( ) {
log_debug! ( logger , " Outpoint {} " , outpoint ) ;
}
self . locktimed_packages . entry ( req . package_timelock ( ) ) . or_insert ( Vec ::new ( ) ) . push ( req ) ;
continue ;
}
2021-05-20 12:44:41 -04:00
log_trace! ( logger , " Test if outpoint can be aggregated with expiration {} against {} " , req . timelock ( ) , height + CLTV_SHARED_CLAIM_BUFFER ) ;
if req . timelock ( ) < = height + CLTV_SHARED_CLAIM_BUFFER | | ! req . aggregable ( ) {
// Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
preprocessed_requests . push ( req ) ;
} else if aggregated_request . is_none ( ) {
aggregated_request = Some ( req ) ;
2020-03-04 17:27:03 -05:00
} else {
2021-05-20 12:44:41 -04:00
aggregated_request . as_mut ( ) . unwrap ( ) . merge_package ( req ) ;
2020-01-24 11:57:52 -05:00
}
}
}
2021-05-20 12:44:41 -04:00
if let Some ( req ) = aggregated_request {
preprocessed_requests . push ( req ) ;
}
2020-01-24 11:57:52 -05:00
2021-05-19 21:47:42 +00:00
// Claim everything up to and including height + 1
let remaining_locked_packages = self . locktimed_packages . split_off ( & ( height + 2 ) ) ;
for ( pop_height , mut entry ) in self . locktimed_packages . iter_mut ( ) {
log_trace! ( logger , " Restoring delayed claim of package(s) at their timelock at {}. " , pop_height ) ;
preprocessed_requests . append ( & mut entry ) ;
}
self . locktimed_packages = remaining_locked_packages ;
2020-01-24 11:57:52 -05:00
// Generate claim transactions and track them to bump if necessary at
// height timer expiration (i.e in how many blocks we're going to take action).
2021-05-20 12:44:41 -04:00
for mut req in preprocessed_requests {
if let Some ( ( new_timer , new_feerate , tx ) ) = self . generate_claim_tx ( height , & req , & * fee_estimator , & * logger ) {
req . set_timer ( new_timer ) ;
req . set_feerate ( new_feerate ) ;
2020-01-24 11:57:52 -05:00
let txid = tx . txid ( ) ;
2021-05-20 12:44:41 -04:00
for k in req . outpoints ( ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Registering claiming request for {}:{} " , k . txid , k . vout ) ;
2020-01-24 11:57:52 -05:00
self . claimable_outpoints . insert ( k . clone ( ) , ( txid , height ) ) ;
}
2021-05-20 12:44:41 -04:00
self . pending_claim_requests . insert ( txid , req ) ;
log_trace! ( logger , " Broadcasting onchain {} " , log_tx! ( tx ) ) ;
2020-01-24 11:57:52 -05:00
broadcaster . broadcast_transaction ( & tx ) ;
}
}
2020-03-09 18:15:35 -04:00
let mut bump_candidates = HashMap ::new ( ) ;
2020-08-07 15:13:57 -07:00
for tx in txn_matched {
2020-01-24 11:57:52 -05:00
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec ::new ( ) ;
for inp in & tx . input {
if let Some ( first_claim_txid_height ) = self . claimable_outpoints . get ( & inp . previous_output ) {
// If outpoint has claim request pending on it...
2021-05-20 12:44:41 -04:00
if let Some ( request ) = self . pending_claim_requests . get_mut ( & first_claim_txid_height . 0 ) {
2020-01-24 11:57:52 -05:00
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
let mut set_equality = true ;
2021-05-20 12:44:41 -04:00
if request . outpoints ( ) . len ( ) ! = tx . input . len ( ) {
2020-01-24 11:57:52 -05:00
set_equality = false ;
} else {
2021-05-20 12:44:41 -04:00
for ( claim_inp , tx_inp ) in request . outpoints ( ) . iter ( ) . zip ( tx . input . iter ( ) ) {
if * * claim_inp ! = tx_inp . previous_output {
2020-01-24 11:57:52 -05:00
set_equality = false ;
}
}
}
macro_rules ! clean_claim_request_after_safety_delay {
( ) = > {
2021-03-31 13:23:57 -04:00
let entry = OnchainEventEntry {
2021-03-31 13:54:01 -04:00
txid : tx . txid ( ) ,
2021-03-31 13:23:57 -04:00
height ,
event : OnchainEvent ::Claim { claim_request : first_claim_txid_height . 0. clone ( ) }
} ;
2021-04-12 11:47:23 -07:00
if ! self . onchain_events_awaiting_threshold_conf . contains ( & entry ) {
self . onchain_events_awaiting_threshold_conf . push ( entry ) ;
2020-01-24 11:57:52 -05:00
}
}
}
// If this is our transaction (or our counterparty spent all the outputs
// before we could anyway with same inputs order than us), wait for
// ANTI_REORG_DELAY and clean the RBF tracking map.
if set_equality {
clean_claim_request_after_safety_delay! ( ) ;
} else { // If false, generate new claim request with update outpoint set
2020-03-11 15:10:29 -04:00
let mut at_least_one_drop = false ;
2020-01-24 11:57:52 -05:00
for input in tx . input . iter ( ) {
2021-05-20 12:44:41 -04:00
if let Some ( package ) = request . split_package ( & input . previous_output ) {
claimed_outputs_material . push ( package ) ;
2020-03-11 15:10:29 -04:00
at_least_one_drop = true ;
2020-01-24 11:57:52 -05:00
}
// If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
2021-05-20 12:44:41 -04:00
if request . outpoints ( ) . is_empty ( ) {
2020-01-24 11:57:52 -05:00
clean_claim_request_after_safety_delay! ( ) ;
}
}
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
2020-03-11 15:10:29 -04:00
if at_least_one_drop {
2021-05-20 12:44:41 -04:00
bump_candidates . insert ( first_claim_txid_height . 0. clone ( ) , request . clone ( ) ) ;
2020-03-11 15:10:29 -04:00
}
2020-01-24 11:57:52 -05:00
}
break ; //No need to iterate further, either tx is our or their
} else {
panic! ( " Inconsistencies between pending_claim_requests map and claimable_outpoints map " ) ;
}
}
}
2021-05-20 12:44:41 -04:00
for package in claimed_outputs_material . drain ( .. ) {
2021-03-31 13:23:57 -04:00
let entry = OnchainEventEntry {
2021-03-31 13:54:01 -04:00
txid : tx . txid ( ) ,
2021-03-31 13:23:57 -04:00
height ,
2021-05-20 12:44:41 -04:00
event : OnchainEvent ::ContentiousOutpoint { package } ,
2021-03-31 13:23:57 -04:00
} ;
2021-04-12 11:47:23 -07:00
if ! self . onchain_events_awaiting_threshold_conf . contains ( & entry ) {
self . onchain_events_awaiting_threshold_conf . push ( entry ) ;
2020-01-24 11:57:52 -05:00
}
}
}
// After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
2021-04-12 11:47:23 -07:00
let onchain_events_awaiting_threshold_conf =
self . onchain_events_awaiting_threshold_conf . drain ( .. ) . collect ::< Vec < _ > > ( ) ;
for entry in onchain_events_awaiting_threshold_conf {
2021-03-31 13:23:57 -04:00
if entry . has_reached_confirmation_threshold ( height ) {
match entry . event {
2020-01-24 11:57:52 -05:00
OnchainEvent ::Claim { claim_request } = > {
// We may remove a whole set of claim outpoints here, as these one may have
// been aggregated in a single tx and claimed so atomically
2021-05-20 12:44:41 -04:00
if let Some ( request ) = self . pending_claim_requests . remove ( & claim_request ) {
for outpoint in request . outpoints ( ) {
2020-01-24 11:57:52 -05:00
self . claimable_outpoints . remove ( & outpoint ) ;
}
}
} ,
2021-05-20 12:44:41 -04:00
OnchainEvent ::ContentiousOutpoint { package } = > {
self . claimable_outpoints . remove ( & package . outpoints ( ) [ 0 ] ) ;
2020-01-24 11:57:52 -05:00
}
}
2021-03-31 13:23:57 -04:00
} else {
2021-04-12 11:47:23 -07:00
self . onchain_events_awaiting_threshold_conf . push ( entry ) ;
2020-01-24 11:57:52 -05:00
}
}
// Check if any pending claim request must be rescheduled
2021-05-20 12:44:41 -04:00
for ( first_claim_txid , ref request ) in self . pending_claim_requests . iter ( ) {
if let Some ( h ) = request . timer ( ) {
if height > = h {
bump_candidates . insert ( * first_claim_txid , ( * request ) . clone ( ) ) ;
2020-03-03 18:51:50 -05:00
}
2020-01-24 11:57:52 -05:00
}
}
// Build, bump and rebroadcast tx accordingly
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " Bumping {} candidates " , bump_candidates . len ( ) ) ;
2021-05-20 12:44:41 -04:00
for ( first_claim_txid , request ) in bump_candidates . iter ( ) {
if let Some ( ( new_timer , new_feerate , bump_tx ) ) = self . generate_claim_tx ( height , & request , & * fee_estimator , & * logger ) {
log_trace! ( logger , " Broadcasting onchain {} " , log_tx! ( bump_tx ) ) ;
2020-03-09 18:15:35 -04:00
broadcaster . broadcast_transaction ( & bump_tx ) ;
2021-05-20 12:44:41 -04:00
if let Some ( request ) = self . pending_claim_requests . get_mut ( first_claim_txid ) {
request . set_timer ( new_timer ) ;
request . set_feerate ( new_feerate ) ;
2020-04-17 19:31:24 -04:00
}
2020-01-24 11:57:52 -05:00
}
}
}
2021-03-27 16:26:45 -04:00
pub ( crate ) fn transaction_unconfirmed < B : Deref , F : Deref , L : Deref > (
& mut self ,
txid : & Txid ,
broadcaster : B ,
fee_estimator : F ,
logger : L ,
) where
B ::Target : BroadcasterInterface ,
F ::Target : FeeEstimator ,
L ::Target : Logger ,
{
let mut height = None ;
2021-04-12 11:47:23 -07:00
for entry in self . onchain_events_awaiting_threshold_conf . iter ( ) {
2021-03-27 16:26:45 -04:00
if entry . txid = = * txid {
height = Some ( entry . height ) ;
break ;
}
}
if let Some ( height ) = height {
self . block_disconnected ( height , broadcaster , fee_estimator , logger ) ;
}
}
2020-08-07 10:58:15 -07:00
pub ( crate ) fn block_disconnected < B : Deref , F : Deref , L : Deref > ( & mut self , height : u32 , broadcaster : B , fee_estimator : F , logger : L )
2020-01-24 11:57:52 -05:00
where B ::Target : BroadcasterInterface ,
2020-03-02 12:55:53 -05:00
F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-01-24 11:57:52 -05:00
{
let mut bump_candidates = HashMap ::new ( ) ;
2021-04-12 11:47:23 -07:00
let onchain_events_awaiting_threshold_conf =
self . onchain_events_awaiting_threshold_conf . drain ( .. ) . collect ::< Vec < _ > > ( ) ;
for entry in onchain_events_awaiting_threshold_conf {
2021-04-01 19:44:46 -07:00
if entry . height > = height {
2021-03-31 13:23:57 -04:00
//- our claim tx on a commitment tx output
//- resurect outpoint back in its claimable set and regenerate tx
match entry . event {
2021-05-20 12:44:41 -04:00
OnchainEvent ::ContentiousOutpoint { package } = > {
if let Some ( ancestor_claimable_txid ) = self . claimable_outpoints . get ( & package . outpoints ( ) [ 0 ] ) {
if let Some ( request ) = self . pending_claim_requests . get_mut ( & ancestor_claimable_txid . 0 ) {
request . merge_package ( package ) ;
2020-01-24 11:57:52 -05:00
// Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast
2021-05-20 12:44:41 -04:00
bump_candidates . insert ( ancestor_claimable_txid . clone ( ) , request . clone ( ) ) ;
2020-01-24 11:57:52 -05:00
}
}
} ,
_ = > { } ,
}
2021-03-31 13:23:57 -04:00
} else {
2021-04-12 11:47:23 -07:00
self . onchain_events_awaiting_threshold_conf . push ( entry ) ;
2020-01-24 11:57:52 -05:00
}
}
2021-05-20 12:44:41 -04:00
for ( _ , request ) in bump_candidates . iter_mut ( ) {
if let Some ( ( new_timer , new_feerate , bump_tx ) ) = self . generate_claim_tx ( height , & request , & & * fee_estimator , & & * logger ) {
request . set_timer ( new_timer ) ;
request . set_feerate ( new_feerate ) ;
2021-05-06 16:21:44 +00:00
log_info! ( logger , " Broadcasting onchain {} " , log_tx! ( bump_tx ) ) ;
2020-01-24 11:57:52 -05:00
broadcaster . broadcast_transaction ( & bump_tx ) ;
}
}
2021-05-20 12:44:41 -04:00
for ( ancestor_claim_txid , request ) in bump_candidates . drain ( ) {
self . pending_claim_requests . insert ( ancestor_claim_txid . 0 , request ) ;
2020-01-24 11:57:52 -05:00
}
//TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
// right now if one of the outpoint get disconnected, just erase whole pending claim request.
let mut remove_request = Vec ::new ( ) ;
self . claimable_outpoints . retain ( | _ , ref v |
2021-04-01 19:44:46 -07:00
if v . 1 > = height {
2020-01-24 11:57:52 -05:00
remove_request . push ( v . 0. clone ( ) ) ;
false
} else { true } ) ;
for req in remove_request {
self . pending_claim_requests . remove ( & req ) ;
}
}
2020-03-20 20:26:23 -04:00
2021-04-05 13:18:27 -07:00
pub ( crate ) fn get_relevant_txids ( & self ) -> Vec < Txid > {
2021-04-12 11:47:23 -07:00
let mut txids : Vec < Txid > = self . onchain_events_awaiting_threshold_conf
2021-04-05 13:18:27 -07:00
. iter ( )
. map ( | entry | entry . txid )
. collect ( ) ;
txids . sort_unstable ( ) ;
txids . dedup ( ) ;
txids
}
2020-08-07 10:58:15 -07:00
pub ( crate ) fn provide_latest_holder_tx ( & mut self , tx : HolderCommitmentTransaction ) {
2021-01-14 13:45:31 -08:00
self . prev_holder_commitment = Some ( replace ( & mut self . holder_commitment , tx ) ) ;
2021-01-05 11:50:54 -08:00
self . holder_htlc_sigs = None ;
2020-03-20 20:26:23 -04:00
}
2020-03-20 22:41:12 -04:00
2021-01-05 11:50:54 -08:00
// Normally holder HTLCs are signed at the same time as the holder commitment tx. However,
// in some configurations, the holder commitment tx has been signed and broadcast by a
// ChannelMonitor replica, so we handle that case here.
2020-09-06 19:51:21 -04:00
fn sign_latest_holder_htlcs ( & mut self ) {
2021-01-05 11:50:54 -08:00
if self . holder_htlc_sigs . is_none ( ) {
2021-02-18 21:32:58 -05:00
let ( _sig , sigs ) = self . signer . sign_holder_commitment_and_htlcs ( & self . holder_commitment , & self . secp_ctx ) . expect ( " sign holder commitment " ) ;
2021-01-14 13:45:31 -08:00
self . holder_htlc_sigs = Some ( Self ::extract_holder_sigs ( & self . holder_commitment , sigs ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
}
2020-10-15 13:45:18 +02:00
2021-01-05 11:50:54 -08:00
// Normally only the latest commitment tx and HTLCs need to be signed. However, in some
2021-01-14 13:45:31 -08:00
// configurations we may have updated our holder commitment but a replica of the ChannelMonitor
2021-01-05 11:50:54 -08:00
// broadcast the previous one before we sync with it. We handle that case here.
2020-09-06 19:51:21 -04:00
fn sign_prev_holder_htlcs ( & mut self ) {
2021-01-05 11:50:54 -08:00
if self . prev_holder_htlc_sigs . is_none ( ) {
if let Some ( ref holder_commitment ) = self . prev_holder_commitment {
2021-02-18 21:32:58 -05:00
let ( _sig , sigs ) = self . signer . sign_holder_commitment_and_htlcs ( holder_commitment , & self . secp_ctx ) . expect ( " sign previous holder commitment " ) ;
2020-10-15 13:45:18 +02:00
self . prev_holder_htlc_sigs = Some ( Self ::extract_holder_sigs ( holder_commitment , sigs ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
}
}
2020-10-15 13:45:18 +02:00
fn extract_holder_sigs ( holder_commitment : & HolderCommitmentTransaction , sigs : Vec < Signature > ) -> Vec < Option < ( usize , Signature ) > > {
let mut ret = Vec ::new ( ) ;
for ( htlc_idx , ( holder_sig , htlc ) ) in sigs . iter ( ) . zip ( holder_commitment . htlcs ( ) . iter ( ) ) . enumerate ( ) {
let tx_idx = htlc . transaction_output_index . unwrap ( ) ;
if ret . len ( ) < = tx_idx as usize { ret . resize ( tx_idx as usize + 1 , None ) ; }
ret [ tx_idx as usize ] = Some ( ( htlc_idx , holder_sig . clone ( ) ) ) ;
}
ret
}
//TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
2020-09-06 19:51:21 -04:00
// have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
2020-04-06 18:32:57 -04:00
// before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
// to monitor before.
2021-01-14 13:45:31 -08:00
pub ( crate ) fn get_fully_signed_holder_tx ( & mut self , funding_redeemscript : & Script ) -> Transaction {
2021-02-18 21:32:58 -05:00
let ( sig , htlc_sigs ) = self . signer . sign_holder_commitment_and_htlcs ( & self . holder_commitment , & self . secp_ctx ) . expect ( " signing holder commitment " ) ;
2021-01-14 13:45:31 -08:00
self . holder_htlc_sigs = Some ( Self ::extract_holder_sigs ( & self . holder_commitment , htlc_sigs ) ) ;
self . holder_commitment . add_holder_sig ( funding_redeemscript , sig )
2020-03-20 22:41:12 -04:00
}
2020-07-03 13:41:21 -07:00
#[ cfg(any(test, feature= " unsafe_revoked_tx_signing " )) ]
2021-01-14 13:45:31 -08:00
pub ( crate ) fn get_fully_signed_copy_holder_tx ( & mut self , funding_redeemscript : & Script ) -> Transaction {
2021-02-18 21:32:58 -05:00
let ( sig , htlc_sigs ) = self . signer . unsafe_sign_holder_commitment_and_htlcs ( & self . holder_commitment , & self . secp_ctx ) . expect ( " sign holder commitment " ) ;
2021-01-14 13:45:31 -08:00
self . holder_htlc_sigs = Some ( Self ::extract_holder_sigs ( & self . holder_commitment , htlc_sigs ) ) ;
self . holder_commitment . add_holder_sig ( funding_redeemscript , sig )
2020-03-20 22:41:12 -04:00
}
2020-03-23 01:30:48 -04:00
2020-08-07 10:58:15 -07:00
pub ( crate ) fn get_fully_signed_htlc_tx ( & mut self , outp : & ::bitcoin ::OutPoint , preimage : & Option < PaymentPreimage > ) -> Option < Transaction > {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let mut htlc_tx = None ;
2021-01-14 13:45:31 -08:00
let commitment_txid = self . holder_commitment . trust ( ) . txid ( ) ;
// Check if the HTLC spends from the current holder commitment
if commitment_txid = = outp . txid {
self . sign_latest_holder_htlcs ( ) ;
if let & Some ( ref htlc_sigs ) = & self . holder_htlc_sigs {
let & ( ref htlc_idx , ref htlc_sig ) = htlc_sigs [ outp . vout as usize ] . as_ref ( ) . unwrap ( ) ;
let trusted_tx = self . holder_commitment . trust ( ) ;
let counterparty_htlc_sig = self . holder_commitment . counterparty_htlc_sigs [ * htlc_idx ] ;
htlc_tx = Some ( trusted_tx
. get_signed_htlc_tx ( & self . channel_transaction_parameters . as_holder_broadcastable ( ) , * htlc_idx , & counterparty_htlc_sig , htlc_sig , preimage ) ) ;
2020-03-23 01:30:48 -04:00
}
}
2021-01-14 13:45:31 -08:00
// If the HTLC doesn't spend the current holder commitment, check if it spends the previous one
2021-01-05 11:50:54 -08:00
if htlc_tx . is_none ( ) & & self . prev_holder_commitment . is_some ( ) {
2020-10-15 13:45:18 +02:00
let commitment_txid = self . prev_holder_commitment . as_ref ( ) . unwrap ( ) . trust ( ) . txid ( ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
if commitment_txid = = outp . txid {
2020-09-06 19:51:21 -04:00
self . sign_prev_holder_htlcs ( ) ;
if let & Some ( ref htlc_sigs ) = & self . prev_holder_htlc_sigs {
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let & ( ref htlc_idx , ref htlc_sig ) = htlc_sigs [ outp . vout as usize ] . as_ref ( ) . unwrap ( ) ;
2020-10-15 13:45:18 +02:00
let holder_commitment = self . prev_holder_commitment . as_ref ( ) . unwrap ( ) ;
let trusted_tx = holder_commitment . trust ( ) ;
let counterparty_htlc_sig = holder_commitment . counterparty_htlc_sigs [ * htlc_idx ] ;
htlc_tx = Some ( trusted_tx
. get_signed_htlc_tx ( & self . channel_transaction_parameters . as_holder_broadcastable ( ) , * htlc_idx , & counterparty_htlc_sig , htlc_sig , preimage ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
}
}
htlc_tx
}
2020-07-03 13:41:21 -07:00
#[ cfg(any(test,feature = " unsafe_revoked_tx_signing " )) ]
2020-08-07 10:58:15 -07:00
pub ( crate ) fn unsafe_get_fully_signed_htlc_tx ( & mut self , outp : & ::bitcoin ::OutPoint , preimage : & Option < PaymentPreimage > ) -> Option < Transaction > {
2020-09-06 19:51:21 -04:00
let latest_had_sigs = self . holder_htlc_sigs . is_some ( ) ;
let prev_had_sigs = self . prev_holder_htlc_sigs . is_some ( ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
let ret = self . get_fully_signed_htlc_tx ( outp , preimage ) ;
if ! latest_had_sigs {
2020-09-06 19:51:21 -04:00
self . holder_htlc_sigs = None ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
if ! prev_had_sigs {
2020-09-06 19:51:21 -04:00
self . prev_holder_htlc_sigs = None ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
}
ret
2020-03-23 01:30:48 -04:00
}
2020-01-24 11:57:52 -05:00
}