2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2017-12-25 01:05:27 -05:00
use bitcoin ::blockdata ::script ::{ Script , Builder } ;
2022-05-05 17:59:38 +02:00
use bitcoin ::blockdata ::transaction ::{ Transaction , EcdsaSighashType } ;
use bitcoin ::util ::sighash ;
2019-12-12 22:42:08 -05:00
use bitcoin ::consensus ::encode ;
2017-12-25 01:05:27 -05:00
2020-12-29 19:26:49 -08:00
use bitcoin ::hashes ::Hash ;
2020-04-27 16:41:54 +02:00
use bitcoin ::hashes ::sha256 ::Hash as Sha256 ;
2021-05-06 01:15:35 +00:00
use bitcoin ::hashes ::sha256d ::Hash as Sha256d ;
2021-07-26 12:31:24 -04:00
use bitcoin ::hash_types ::{ Txid , BlockHash } ;
2018-12-17 23:58:02 -05:00
2021-07-26 12:31:24 -04:00
use bitcoin ::secp256k1 ::constants ::PUBLIC_KEY_SIZE ;
2022-05-05 17:59:38 +02:00
use bitcoin ::secp256k1 ::{ PublicKey , SecretKey } ;
use bitcoin ::secp256k1 ::{ Secp256k1 , ecdsa ::Signature } ;
2020-04-27 16:51:59 +02:00
use bitcoin ::secp256k1 ;
2017-12-25 01:05:27 -05:00
2021-04-28 17:28:10 -04:00
use ln ::{ PaymentPreimage , PaymentHash } ;
2021-09-17 17:32:36 +00:00
use ln ::features ::{ ChannelFeatures , ChannelTypeFeatures , InitFeatures } ;
2017-12-25 01:05:27 -05:00
use ln ::msgs ;
2020-01-06 17:54:02 -05:00
use ln ::msgs ::{ DecodeError , OptionalField , DataLossProtect } ;
2021-09-01 20:22:49 +00:00
use ln ::script ::{ self , ShutdownScript } ;
2021-09-06 03:25:27 +00:00
use ln ::channelmanager ::{ CounterpartyForwardingInfo , PendingHTLCStatus , HTLCSource , HTLCFailReason , HTLCFailureMsg , PendingHTLCInfo , RAACommitmentOrder , BREAKDOWN_TIMEOUT , MIN_CLTV_EXPIRY_DELTA , MAX_LOCAL_BREAKDOWN_TIMEOUT } ;
2022-01-05 13:40:08 -08:00
use ln ::chan_utils ::{ CounterpartyCommitmentSecrets , TxCreationKeys , HTLCOutputInCommitment , htlc_success_tx_weight , htlc_timeout_tx_weight , make_funding_redeemscript , ChannelPublicKeys , CommitmentTransaction , HolderCommitmentTransaction , ChannelTransactionParameters , CounterpartyChannelTransactionParameters , MAX_HTLCS , get_commitment_transaction_number_obscure_factor , ClosingTransaction } ;
2017-12-25 01:05:27 -05:00
use ln ::chan_utils ;
2021-07-03 01:58:30 +00:00
use chain ::BestBlock ;
2022-06-29 15:13:40 +02:00
use chain ::chaininterface ::{ FeeEstimator , ConfirmationTarget , LowerBoundedFeeEstimator } ;
2021-10-13 04:19:13 +00:00
use chain ::channelmonitor ::{ ChannelMonitor , ChannelMonitorUpdate , ChannelMonitorUpdateStep , LATENCY_GRACE_PERIOD_BLOCKS } ;
2020-09-09 12:16:09 -07:00
use chain ::transaction ::{ OutPoint , TransactionData } ;
2021-02-16 16:30:08 -05:00
use chain ::keysinterface ::{ Sign , KeysInterface } ;
2021-09-30 21:35:40 +00:00
use util ::events ::ClosureReason ;
2020-11-25 16:18:12 -05:00
use util ::ser ::{ Readable , ReadableArgs , Writeable , Writer , VecWriter } ;
2020-03-02 12:55:53 -05:00
use util ::logger ::Logger ;
2018-08-15 00:59:42 +09:00
use util ::errors ::APIError ;
2022-06-14 16:40:12 -07:00
use util ::config ::{ UserConfig , ChannelConfig , LegacyChannelConfig , ChannelHandshakeConfig , ChannelHandshakeLimits } ;
2021-03-09 15:42:45 -05:00
use util ::scid_utils ::scid_from_parts ;
2017-12-25 01:05:27 -05:00
2021-08-01 18:22:06 +02:00
use io ;
2021-05-19 04:21:39 +00:00
use prelude ::* ;
2021-05-23 23:22:46 +00:00
use core ::{ cmp , mem , fmt } ;
use core ::ops ::Deref ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing, debug_assertions)) ]
2021-07-19 15:01:58 +02:00
use sync ::Mutex ;
2020-07-13 13:16:32 +09:00
use bitcoin ::hashes ::hex ::ToHex ;
2017-12-25 01:05:27 -05:00
2018-10-01 18:11:33 +09:00
#[ cfg(test) ]
pub struct ChannelValueStat {
pub value_to_self_msat : u64 ,
pub channel_value_msat : u64 ,
pub channel_reserve_msat : u64 ,
pub pending_outbound_htlcs_amount_msat : u64 ,
pub pending_inbound_htlcs_amount_msat : u64 ,
pub holding_cell_outbound_amount_msat : u64 ,
2020-06-08 20:47:55 -04:00
pub counterparty_max_htlc_value_in_flight_msat : u64 , // outgoing
pub counterparty_dust_limit_msat : u64 ,
2018-10-01 18:11:33 +09:00
}
2022-04-27 16:11:47 +00:00
pub struct AvailableBalances {
/// The amount that would go to us if we close the channel, ignoring any on-chain fees.
pub balance_msat : u64 ,
/// Total amount available for our counterparty to send to us.
pub inbound_capacity_msat : u64 ,
/// Total amount available for us to send to our counterparty.
pub outbound_capacity_msat : u64 ,
/// The maximum value we can assign to the next outbound HTLC
pub next_outbound_htlc_limit_msat : u64 ,
}
2021-08-21 18:05:51 -04:00
#[ derive(Debug, Clone, Copy, PartialEq) ]
2021-07-12 15:39:27 +00:00
enum FeeUpdateState {
// Inbound states mirroring InboundHTLCState
RemoteAnnounced ,
AwaitingRemoteRevokeToAnnounce ,
// Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
// handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
// distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
// the fee update anywhere, we can simply consider the fee update `Committed` immediately
// instead of setting it to AwaitingAnnouncedRemoteRevoke.
// Outbound state can only be `LocalAnnounced` or `Committed`
Outbound ,
}
2018-10-15 14:38:19 -04:00
enum InboundHTLCRemovalReason {
FailRelay ( msgs ::OnionErrorPacket ) ,
FailMalformed ( ( [ u8 ; 32 ] , u16 ) ) ,
2018-11-22 21:18:16 -05:00
Fulfill ( PaymentPreimage ) ,
2018-10-15 14:38:19 -04:00
}
2018-09-09 12:53:57 -04:00
enum InboundHTLCState {
2020-05-31 15:59:49 -04:00
/// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
/// update_add_htlc message for this HTLC.
2018-10-15 14:38:19 -04:00
RemoteAnnounced ( PendingHTLCStatus ) ,
2020-05-31 15:59:49 -04:00
/// Included in a received commitment_signed message (implying we've
/// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
/// state (see the example below). We have not yet included this HTLC in a
/// commitment_signed message because we are waiting on the remote's
/// aforementioned state revocation. One reason this missing remote RAA
/// (revoke_and_ack) blocks us from constructing a commitment_signed message
/// is because every time we create a new "state", i.e. every time we sign a
/// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
/// which are provided one-at-a-time in each RAA. E.g., the last RAA they
/// sent provided the per_commitment_point for our current commitment tx.
/// The other reason we should not send a commitment_signed without their RAA
/// is because their RAA serves to ACK our previous commitment_signed.
///
/// Here's an example of how an HTLC could come to be in this state:
/// remote --> update_add_htlc(prev_htlc) --> local
/// remote --> commitment_signed(prev_htlc) --> local
/// remote <-- revoke_and_ack <-- local
/// remote <-- commitment_signed(prev_htlc) <-- local
/// [note that here, the remote does not respond with a RAA]
/// remote --> update_add_htlc(this_htlc) --> local
/// remote --> commitment_signed(prev_htlc, this_htlc) --> local
/// Now `this_htlc` will be assigned this state. It's unable to be officially
/// accepted, i.e. included in a commitment_signed, because we're missing the
/// RAA that provides our next per_commitment_point. The per_commitment_point
/// is used to derive commitment keys, which are used to construct the
/// signatures in a commitment_signed message.
/// Implies AwaitingRemoteRevoke.
2021-03-17 15:56:02 -04:00
///
2022-05-20 17:17:29 +02:00
/// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
2018-10-15 14:38:19 -04:00
AwaitingRemoteRevokeToAnnounce ( PendingHTLCStatus ) ,
2020-05-31 15:59:49 -04:00
/// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
/// We have also included this HTLC in our latest commitment_signed and are now just waiting
/// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
/// channel (before it can then get forwarded and/or removed).
/// Implies AwaitingRemoteRevoke.
2018-10-15 14:38:19 -04:00
AwaitingAnnouncedRemoteRevoke ( PendingHTLCStatus ) ,
2018-09-09 12:53:57 -04:00
Committed ,
/// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
/// we'll drop it.
/// Note that we have to keep an eye on the HTLC until we've received a broadcastable
/// commitment transaction without it as otherwise we'll have to force-close the channel to
/// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
/// anyway). That said, ChannelMonitor does this for us (see
2021-06-25 20:27:38 +00:00
/// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
/// our own local state before then, once we're sure that the next commitment_signed and
2020-10-15 13:45:18 +02:00
/// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
2018-10-15 14:38:19 -04:00
LocalRemoved ( InboundHTLCRemovalReason ) ,
2018-09-09 12:53:57 -04:00
}
struct InboundHTLCOutput {
htlc_id : u64 ,
amount_msat : u64 ,
cltv_expiry : u32 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ,
2018-09-09 12:53:57 -04:00
state : InboundHTLCState ,
}
enum OutboundHTLCState {
2018-04-04 11:56:54 -04:00
/// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
/// we will promote to Committed (note that they may not accept it until the next time we
2019-01-24 16:41:51 +02:00
/// revoke, but we don't really care about that:
2018-04-04 11:56:54 -04:00
/// * they've revoked, so worst case we can announce an old state and get our (option on)
2019-01-24 16:41:51 +02:00
/// money back (though we won't), and,
2018-04-04 11:56:54 -04:00
/// * we'll send them a revoke when they send a commitment_signed, and since only they're
/// allowed to remove it, the "can only be removed once committed on both sides" requirement
2019-01-24 16:41:51 +02:00
/// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
2018-04-04 11:56:54 -04:00
/// we'll never get out of sync).
2019-01-24 16:41:51 +02:00
/// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
2018-10-15 15:11:02 -04:00
/// OutboundHTLCOutput's size just for a temporary bit
LocalAnnounced ( Box < msgs ::OnionPacket > ) ,
2017-12-25 01:05:27 -05:00
Committed ,
2018-04-04 11:56:54 -04:00
/// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
/// the change (though they'll need to revoke before we fail the payment).
2022-01-18 14:17:52 +01:00
RemoteRemoved ( OutboundHTLCOutcome ) ,
2018-04-04 11:56:54 -04:00
/// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
/// the remote side hasn't yet revoked their previous state, which we need them to do before we
/// can do any backwards failing. Implies AwaitingRemoteRevoke.
/// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
/// remote revoke_and_ack on a previous state before we can do so.
2022-01-18 14:17:52 +01:00
AwaitingRemoteRevokeToRemove ( OutboundHTLCOutcome ) ,
2018-04-04 11:56:54 -04:00
/// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
/// the remote side hasn't yet revoked their previous state, which we need them to do before we
/// can do any backwards failing. Implies AwaitingRemoteRevoke.
/// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
/// revoke_and_ack to drop completely.
2022-01-18 14:17:52 +01:00
AwaitingRemovedRemoteRevoke ( OutboundHTLCOutcome ) ,
}
#[ derive(Clone) ]
enum OutboundHTLCOutcome {
Success ( Option < PaymentPreimage > ) ,
Failure ( HTLCFailReason ) ,
}
impl From < Option < HTLCFailReason > > for OutboundHTLCOutcome {
fn from ( o : Option < HTLCFailReason > ) -> Self {
match o {
None = > OutboundHTLCOutcome ::Success ( None ) ,
Some ( r ) = > OutboundHTLCOutcome ::Failure ( r )
}
}
}
impl < ' a > Into < Option < & ' a HTLCFailReason > > for & ' a OutboundHTLCOutcome {
fn into ( self ) -> Option < & ' a HTLCFailReason > {
match self {
OutboundHTLCOutcome ::Success ( _ ) = > None ,
OutboundHTLCOutcome ::Failure ( ref r ) = > Some ( r )
}
}
2017-12-25 01:05:27 -05:00
}
2018-09-09 12:53:57 -04:00
struct OutboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : u64 ,
amount_msat : u64 ,
cltv_expiry : u32 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ,
2018-09-09 12:53:57 -04:00
state : OutboundHTLCState ,
2018-09-11 14:20:40 -04:00
source : HTLCSource ,
2017-12-25 01:05:27 -05:00
}
/// See AwaitingRemoteRevoke ChannelState for more info
2018-04-04 11:56:54 -04:00
enum HTLCUpdateAwaitingACK {
2019-07-18 18:07:27 -04:00
AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
2018-04-04 11:56:54 -04:00
// always outbound
amount_msat : u64 ,
cltv_expiry : u32 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ,
2018-09-11 14:20:40 -04:00
source : HTLCSource ,
2018-04-04 11:56:54 -04:00
onion_routing_packet : msgs ::OnionPacket ,
} ,
ClaimHTLC {
2018-11-22 21:18:16 -05:00
payment_preimage : PaymentPreimage ,
2018-09-11 14:20:40 -04:00
htlc_id : u64 ,
2018-04-04 11:56:54 -04:00
} ,
FailHTLC {
2018-09-11 14:20:40 -04:00
htlc_id : u64 ,
2018-04-04 11:56:54 -04:00
err_packet : msgs ::OnionErrorPacket ,
} ,
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
/// There are a few "states" and then a number of flags which can be applied:
/// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
2022-05-30 14:39:04 -07:00
/// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
2018-09-08 16:01:29 -04:00
/// move on to ChannelFunded.
/// Note that PeerDisconnected can be set on both ChannelFunded and FundingSent.
/// ChannelFunded can then get all remaining flags set on it, until we finish shutdown, then we
/// move on to ShutdownComplete, at which point most calls into this channel are disallowed.
2017-12-25 01:05:27 -05:00
enum ChannelState {
/// Implies we have (or are prepared to) send our open_channel/accept_channel message
2020-06-28 13:21:45 -04:00
OurInitSent = 1 < < 0 ,
2017-12-25 01:05:27 -05:00
/// Implies we have received their open_channel/accept_channel message
2020-06-28 13:21:45 -04:00
TheirInitSent = 1 < < 1 ,
2017-12-25 01:05:27 -05:00
/// We have sent funding_created and are awaiting a funding_signed to advance to FundingSent.
/// Note that this is nonsense for an inbound channel as we immediately generate funding_signed
/// upon receipt of funding_created, so simply skip this state.
FundingCreated = 4 ,
/// Set when we have received/sent funding_created and funding_signed and are thus now waiting
2022-05-30 14:39:04 -07:00
/// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
2017-12-25 01:05:27 -05:00
/// and our counterparty consider the funding transaction confirmed.
FundingSent = 8 ,
2022-05-30 14:39:04 -07:00
/// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
/// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
TheirChannelReady = 1 < < 4 ,
/// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
/// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
OurChannelReady = 1 < < 5 ,
2017-12-25 01:05:27 -05:00
ChannelFunded = 64 ,
2018-09-08 16:01:29 -04:00
/// Flag which is set on ChannelFunded and FundingSent indicating remote side is considered
/// "disconnected" and no updates are allowed until after we've done a channel_reestablish
/// dance.
2020-06-28 13:21:45 -04:00
PeerDisconnected = 1 < < 7 ,
2019-07-29 13:45:35 -04:00
/// Flag which is set on ChannelFunded, FundingCreated, and FundingSent indicating the user has
/// told us they failed to update our ChannelMonitor somewhere and we should pause sending any
/// outbound messages until they've managed to do so.
2020-06-28 13:21:45 -04:00
MonitorUpdateFailed = 1 < < 8 ,
2017-12-25 01:05:27 -05:00
/// Flag which implies that we have sent a commitment_signed but are awaiting the responding
/// revoke_and_ack message. During this time period, we can't generate new commitment_signed
/// messages as then we will be unable to determine which HTLCs they included in their
/// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
/// later.
/// Flag is set on ChannelFunded.
2020-06-28 13:21:45 -04:00
AwaitingRemoteRevoke = 1 < < 9 ,
2018-03-26 16:48:18 -04:00
/// Flag which is set on ChannelFunded or FundingSent after receiving a shutdown message from
/// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
/// to respond with our own shutdown message when possible.
2020-06-28 13:21:45 -04:00
RemoteShutdownSent = 1 < < 10 ,
2018-03-26 16:48:18 -04:00
/// Flag which is set on ChannelFunded or FundingSent after sending a shutdown message. At this
/// point, we may not add any new HTLCs to the channel.
2020-06-28 13:21:45 -04:00
LocalShutdownSent = 1 < < 11 ,
2018-03-26 16:48:18 -04:00
/// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
/// to drop us, but we store this anyway.
2018-10-17 18:19:55 -04:00
ShutdownComplete = 4096 ,
2017-12-25 01:05:27 -05:00
}
2020-04-24 14:40:01 -04:00
const BOTH_SIDES_SHUTDOWN_MASK : u32 = ChannelState ::LocalShutdownSent as u32 | ChannelState ::RemoteShutdownSent as u32 ;
const MULTI_STATE_FLAGS : u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 ;
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
pub const INITIAL_COMMITMENT_NUMBER : u64 = ( 1 < < 48 ) - 1 ;
2018-09-14 15:54:06 -04:00
2021-05-13 15:33:54 +00:00
/// The "channel disabled" bit in channel_update must be set based on whether we are connected to
/// our counterparty or not. However, we don't want to announce updates right away to avoid
/// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
/// our channel_update message and track the current state here.
2021-05-07 20:56:10 +00:00
/// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
#[ derive(Clone, Copy, PartialEq) ]
2021-05-13 15:33:54 +00:00
pub ( super ) enum ChannelUpdateStatus {
2021-05-07 20:56:10 +00:00
/// We've announced the channel as enabled and are connected to our peer.
Enabled ,
/// Our channel is no longer live, but we haven't announced the channel as disabled yet.
2019-11-29 01:39:33 -05:00
DisabledStaged ,
2021-05-07 20:56:10 +00:00
/// Our channel is live again, but we haven't announced the channel as enabled yet.
EnabledStaged ,
/// We've announced the channel as disabled.
Disabled ,
2019-11-29 01:39:33 -05:00
}
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
/// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
#[ derive(PartialEq) ]
pub enum AnnouncementSigsState {
/// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
/// we sent the last `AnnouncementSignatures`.
NotSent ,
/// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
/// This state never appears on disk - instead we write `NotSent`.
MessageSent ,
/// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
/// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
/// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
/// they send back a `RevokeAndACK`.
/// This state never appears on disk - instead we write `NotSent`.
Committed ,
/// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
/// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
PeerReceived ,
}
2020-12-04 16:05:10 -05:00
/// An enum indicating whether the local or remote side offered a given HTLC.
enum HTLCInitiator {
LocalOffered ,
RemoteOffered ,
}
2021-07-28 19:54:20 -04:00
/// An enum gathering stats on pending HTLCs, either inbound or outbound side.
struct HTLCStats {
pending_htlcs : u32 ,
pending_htlcs_value_msat : u64 ,
on_counterparty_tx_dust_exposure_msat : u64 ,
on_holder_tx_dust_exposure_msat : u64 ,
2021-08-21 18:05:51 -04:00
holding_cell_msat : u64 ,
2021-11-21 21:42:58 -05:00
on_holder_tx_holding_cell_htlcs_count : u32 , // dust HTLCs *non*-included
2021-07-28 19:54:20 -04:00
}
2021-11-18 21:23:41 -05:00
/// An enum gathering stats on commitment transaction, either local or remote.
struct CommitmentStats < ' a > {
tx : CommitmentTransaction , // the transaction info
feerate_per_kw : u32 , // the feerate included to build the transaction
total_fee_sat : u64 , // the total fee included in the transaction
num_nondust_htlcs : usize , // the number of HTLC outputs (dust HTLCs *non*-included)
htlcs_included : Vec < ( HTLCOutputInCommitment , Option < & ' a HTLCSource > ) > , // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
local_balance_msat : u64 , // local balance before fees but considering dust limits
remote_balance_msat : u64 , // remote balance before fees but considering dust limits
2022-01-19 12:19:27 +01:00
preimages : Vec < PaymentPreimage > , // preimages for successful offered HTLCs since last commitment
2021-11-18 21:23:41 -05:00
}
2020-12-04 16:05:10 -05:00
/// Used when calculating whether we or the remote can afford an additional HTLC.
struct HTLCCandidate {
amount_msat : u64 ,
origin : HTLCInitiator ,
}
impl HTLCCandidate {
fn new ( amount_msat : u64 , origin : HTLCInitiator ) -> Self {
Self {
amount_msat ,
origin ,
}
}
}
2021-07-15 21:56:42 +00:00
/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
/// description
enum UpdateFulfillFetch {
NewClaim {
monitor_update : ChannelMonitorUpdate ,
2021-07-16 02:16:50 +00:00
htlc_value_msat : u64 ,
2021-07-15 21:56:42 +00:00
msg : Option < msgs ::UpdateFulfillHTLC > ,
} ,
DuplicateClaim { } ,
}
/// The return type of get_update_fulfill_htlc_and_commit.
pub enum UpdateFulfillCommitFetch {
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
/// previously placed in the holding cell (and has since been removed).
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
monitor_update : ChannelMonitorUpdate ,
2021-07-16 02:16:50 +00:00
/// The value of the HTLC which was claimed, in msat.
htlc_value_msat : u64 ,
2021-07-15 21:56:42 +00:00
/// The update_fulfill message and commitment_signed message (if the claim was not placed
/// in the holding cell).
msgs : Option < ( msgs ::UpdateFulfillHTLC , msgs ::CommitmentSigned ) > ,
} ,
/// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
/// or has been forgotten (presumably previously claimed).
DuplicateClaim { } ,
}
2021-10-02 01:59:38 +00:00
/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC
/// state.
pub ( super ) struct RAAUpdates {
pub commitment_update : Option < msgs ::CommitmentUpdate > ,
pub accepted_htlcs : Vec < ( PendingHTLCInfo , u64 ) > ,
pub failed_htlcs : Vec < ( HTLCSource , PaymentHash , HTLCFailReason ) > ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
pub finalized_claimed_htlcs : Vec < HTLCSource > ,
2021-10-02 01:59:38 +00:00
pub monitor_update : ChannelMonitorUpdate ,
pub holding_cell_failed_htlcs : Vec < ( HTLCSource , PaymentHash ) > ,
}
2021-10-10 23:56:11 +00:00
/// The return value of `monitor_updating_restored`
pub ( super ) struct MonitorRestoreUpdates {
pub raa : Option < msgs ::RevokeAndACK > ,
pub commitment_update : Option < msgs ::CommitmentUpdate > ,
pub order : RAACommitmentOrder ,
pub accepted_htlcs : Vec < ( PendingHTLCInfo , u64 ) > ,
pub failed_htlcs : Vec < ( HTLCSource , PaymentHash , HTLCFailReason ) > ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
pub finalized_claimed_htlcs : Vec < HTLCSource > ,
2021-10-10 23:56:11 +00:00
pub funding_broadcastable : Option < Transaction > ,
2022-05-30 14:39:04 -07:00
pub channel_ready : Option < msgs ::ChannelReady > ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub announcement_sigs : Option < msgs ::AnnouncementSignatures > ,
2021-10-10 23:56:11 +00:00
}
2021-11-13 22:47:42 +00:00
/// The return value of `channel_reestablish`
pub ( super ) struct ReestablishResponses {
2022-05-30 14:39:04 -07:00
pub channel_ready : Option < msgs ::ChannelReady > ,
2021-11-13 22:47:42 +00:00
pub raa : Option < msgs ::RevokeAndACK > ,
pub commitment_update : Option < msgs ::CommitmentUpdate > ,
pub order : RAACommitmentOrder ,
pub mon_update : Option < ChannelMonitorUpdate > ,
pub holding_cell_failed_htlcs : Vec < ( HTLCSource , PaymentHash ) > ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub announcement_sigs : Option < msgs ::AnnouncementSignatures > ,
2021-11-13 22:47:42 +00:00
pub shutdown_msg : Option < msgs ::Shutdown > ,
}
2021-06-30 03:09:04 +00:00
/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
/// balance but the fundee is unable to send a payment as the increase in fee more than drains
/// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
/// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
/// by this multiple without hitting this case, before sending.
/// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
/// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
/// HTLCs for days we may need this to suffice for feerate increases across days, but that may
/// leave the channel less usable as we hold a bigger reserve.
2021-11-09 21:25:33 +00:00
#[ cfg(any(fuzzing, test)) ]
2021-06-30 03:09:04 +00:00
pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE : u64 = 2 ;
2021-11-09 21:25:33 +00:00
#[ cfg(not(any(fuzzing, test))) ]
2021-06-30 03:09:04 +00:00
const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE : u64 = 2 ;
2021-09-19 23:49:57 +00:00
/// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
/// channel creation on an inbound channel, we simply force-close and move on.
/// This constant is the one suggested in BOLT 2.
pub ( crate ) const FUNDING_CONF_DEADLINE_BLOCKS : u32 = 2016 ;
2021-08-21 18:05:51 -04:00
/// In case of a concurrent update_add_htlc proposed by our counterparty, we might
/// not have enough balance value remaining to cover the onchain cost of this new
/// HTLC weight. If this happens, our counterparty fails the reception of our
/// commitment_signed including this new HTLC due to infringement on the channel
/// reserve.
/// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
/// size 2. However, if the number of concurrent update_add_htlc is higher, this still
/// leads to a channel force-close. Ultimately, this is an issue coming from the
/// design of LN state machines, allowing asynchronous updates.
2021-11-10 03:44:04 +00:00
pub ( crate ) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER : u32 = 2 ;
2021-08-21 18:05:51 -04:00
2021-11-10 01:36:26 +00:00
/// When a channel is opened, we check that the funding amount is enough to pay for relevant
/// commitment transaction fees, with at least this many HTLCs present on the commitment
/// transaction (not counting the value of the HTLCs themselves).
pub ( crate ) const MIN_AFFORDABLE_HTLC_COUNT : usize = 4 ;
2021-08-21 18:05:51 -04:00
2022-06-16 16:24:42 -07:00
/// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
/// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
/// ChannelUpdate prompted by the config update. This value was determined as follows:
///
/// * The expected interval between ticks (1 minute).
/// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
/// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
/// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
pub ( crate ) const EXPIRE_PREV_CONFIG_TICKS : usize = 5 ;
2018-02-27 23:38:52 +01:00
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
2017-12-25 01:05:27 -05:00
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
2018-07-14 02:08:14 +00:00
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
2017-12-25 01:05:27 -05:00
// inbound channel.
2020-09-06 20:16:42 -04:00
//
// Holder designates channel data owned for the benefice of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
2021-02-16 16:30:08 -05:00
pub ( super ) struct Channel < Signer : Sign > {
2022-06-09 14:01:56 -07:00
config : LegacyChannelConfig ,
2018-10-31 14:51:39 -04:00
2022-06-16 16:24:42 -07:00
// Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
// constructed using it. The second element in the tuple corresponds to the number of ticks that
// have elapsed since the update occurred.
prev_config : Option < ( ChannelConfig , usize ) > ,
2022-02-01 21:16:27 +00:00
inbound_handshake_limits_override : Option < ChannelHandshakeLimits > ,
2017-12-25 01:05:27 -05:00
user_id : u64 ,
2018-07-22 18:19:28 -04:00
channel_id : [ u8 ; 32 ] ,
2017-12-25 01:05:27 -05:00
channel_state : u32 ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
// our peer. However, we want to make sure they received it, or else rebroadcast it when we
// next connect.
// We do so here, see `AnnouncementSigsSent` for more details on the state(s).
// Note that a number of our tests were written prior to the behavior here which retransmits
// AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
// many tests.
#[ cfg(any(test, feature = " _test_utils " )) ]
pub ( crate ) announcement_sigs_state : AnnouncementSigsState ,
#[ cfg(not(any(test, feature = " _test_utils " ))) ]
announcement_sigs_state : AnnouncementSigsState ,
2018-08-20 17:13:07 -04:00
secp_ctx : Secp256k1 < secp256k1 ::All > ,
2017-12-25 01:05:27 -05:00
channel_value_satoshis : u64 ,
2020-02-05 19:39:31 -05:00
latest_monitor_update_id : u64 ,
2021-02-20 10:05:55 -05:00
holder_signer : Signer ,
2021-07-26 12:31:24 -04:00
shutdown_scriptpubkey : Option < ShutdownScript > ,
2020-02-08 17:45:40 -05:00
destination_script : Script ,
2017-12-25 01:05:27 -05:00
2018-07-19 17:17:06 -04:00
// Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
// generation start at 0 and count up...this simplifies some parts of implementation at the
// cost of others, but should really just be changed.
2020-06-08 20:47:55 -04:00
cur_holder_commitment_transaction_number : u64 ,
cur_counterparty_commitment_transaction_number : u64 ,
2017-12-25 01:05:27 -05:00
value_to_self_msat : u64 , // Excluding all pending_htlcs, excluding fees
2018-09-09 12:53:57 -04:00
pending_inbound_htlcs : Vec < InboundHTLCOutput > ,
pending_outbound_htlcs : Vec < OutboundHTLCOutput > ,
2018-04-04 11:56:54 -04:00
holding_cell_htlc_updates : Vec < HTLCUpdateAwaitingACK > ,
2018-09-26 19:54:28 -04:00
2019-03-05 15:36:11 -05:00
/// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
/// need to ensure we resend them in the order we originally generated them. Note that because
/// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
/// sufficient to simply set this to the opposite of any message we are generating as we
/// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
/// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
/// send it first.
resend_order : RAACommitmentOrder ,
2022-05-30 14:39:04 -07:00
monitor_pending_channel_ready : bool ,
2018-10-17 18:19:55 -04:00
monitor_pending_revoke_and_ack : bool ,
monitor_pending_commitment_signed : bool ,
2020-01-01 15:56:03 -05:00
monitor_pending_forwards : Vec < ( PendingHTLCInfo , u64 ) > ,
2018-11-22 21:18:16 -05:00
monitor_pending_failures : Vec < ( HTLCSource , PaymentHash , HTLCFailReason ) > ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
monitor_pending_finalized_fulfills : Vec < HTLCSource > ,
2018-10-17 18:19:55 -04:00
2021-08-16 17:47:36 +00:00
// pending_update_fee is filled when sending and receiving update_fee.
2018-09-26 19:54:28 -04:00
//
2021-08-16 17:47:36 +00:00
// Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
// or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
// generating new commitment transactions with exactly the same criteria as inbound/outbound
// HTLCs with similar state.
2021-07-12 15:39:27 +00:00
pending_update_fee : Option < ( u32 , FeeUpdateState ) > ,
2021-08-16 17:47:36 +00:00
// If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
// it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
// `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
// `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
// further `send_update_fee` calls, dropping the previous holding cell update entirely.
2020-06-15 17:28:01 -04:00
holding_cell_update_fee : Option < u32 > ,
2020-06-08 20:47:55 -04:00
next_holder_htlc_id : u64 ,
next_counterparty_htlc_id : u64 ,
2020-06-15 17:28:01 -04:00
feerate_per_kw : u32 ,
2017-12-25 01:05:27 -05:00
2021-11-16 20:55:10 +00:00
/// The timestamp set on our latest `channel_update` message for this channel. It is updated
/// when the channel is updated in ways which may impact the `channel_update` message or when a
/// new block is received, ensuring it's always at least moderately close to the current real
/// time.
update_time_counter : u32 ,
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
/// Max to_local and to_remote outputs in a locally-generated commitment transaction
2021-07-19 16:13:00 +02:00
holder_max_commitment_tx_output : Mutex < ( u64 , u64 ) > ,
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
/// Max to_local and to_remote outputs in a remote-generated commitment transaction
2021-07-19 16:13:00 +02:00
counterparty_max_commitment_tx_output : Mutex < ( u64 , u64 ) > ,
2018-10-01 17:48:22 -04:00
2021-07-20 03:19:01 +00:00
last_sent_closing_fee : Option < ( u64 , Signature ) > , // (fee, holder_sig)
target_closing_feerate_sats_per_kw : Option < u32 > ,
2018-03-26 16:48:18 -04:00
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
/// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
/// update, we need to delay processing it until later. We do that here by simply storing the
/// closing_signed message and handling it in `maybe_propose_closing_signed`.
pending_counterparty_closing_signed : Option < msgs ::ClosingSigned > ,
2021-09-09 01:09:41 +00:00
/// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
/// transaction. These are set once we reach `closing_negotiation_ready`.
2021-07-26 22:50:49 +00:00
#[ cfg(test) ]
pub ( crate ) closing_fee_limits : Option < ( u64 , u64 ) > ,
#[ cfg(not(test)) ]
closing_fee_limits : Option < ( u64 , u64 ) > ,
2022-01-26 00:21:22 +01:00
/// Flag that ensures that `accept_inbound_channel` must be called before `funding_created`
/// is executed successfully. The reason for this flag is that when the
/// `UserConfig::manually_accept_inbound_channels` config flag is set to true, inbound channels
/// are required to be manually accepted by the node operator before the `msgs::AcceptChannel`
/// message is created and sent out. During the manual accept process, `accept_inbound_channel`
/// is called by `ChannelManager::accept_inbound_channel`.
///
/// The flag counteracts that a counterparty node could theoretically send a
/// `msgs::FundingCreated` message before the node operator has manually accepted an inbound
/// channel request made by the counterparty node. That would execute `funding_created` before
/// `accept_inbound_channel`, and `funding_created` should therefore not execute successfully.
inbound_awaiting_accept : bool ,
2021-03-15 20:13:57 -04:00
/// The hash of the block in which the funding transaction was included.
2020-04-27 17:53:13 +02:00
funding_tx_confirmed_in : Option < BlockHash > ,
2021-03-22 17:01:04 -04:00
funding_tx_confirmation_height : u32 ,
2017-12-25 01:05:27 -05:00
short_channel_id : Option < u64 > ,
2021-09-19 23:49:57 +00:00
/// Either the height at which this channel was created or the height at which it was last
/// serialized if it was serialized by versions prior to 0.0.103.
/// We use this to close if funding is never broadcasted.
channel_creation_height : u32 ,
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
counterparty_dust_limit_satoshis : u64 ,
2021-11-09 21:25:33 +00:00
2018-12-21 15:16:46 -05:00
#[ cfg(test) ]
2020-06-08 20:47:55 -04:00
pub ( super ) holder_dust_limit_satoshis : u64 ,
2018-12-21 15:16:46 -05:00
#[ cfg(not(test)) ]
2020-06-08 20:47:55 -04:00
holder_dust_limit_satoshis : u64 ,
2021-11-09 21:25:33 +00:00
2019-01-15 11:52:02 +02:00
#[ cfg(test) ]
2020-06-08 20:47:55 -04:00
pub ( super ) counterparty_max_htlc_value_in_flight_msat : u64 ,
2019-01-15 11:52:02 +02:00
#[ cfg(not(test)) ]
2020-06-08 20:47:55 -04:00
counterparty_max_htlc_value_in_flight_msat : u64 ,
2021-11-09 21:25:33 +00:00
#[ cfg(test) ]
pub ( super ) holder_max_htlc_value_in_flight_msat : u64 ,
#[ cfg(not(test)) ]
2021-11-09 21:12:30 +00:00
holder_max_htlc_value_in_flight_msat : u64 ,
2020-05-01 18:39:18 -04:00
/// minimum channel reserve for self to maintain - set by them.
2021-07-03 15:27:12 +00:00
counterparty_selected_channel_reserve_satoshis : Option < u64 > ,
2021-11-09 21:25:33 +00:00
#[ cfg(test) ]
pub ( super ) holder_selected_channel_reserve_satoshis : u64 ,
#[ cfg(not(test)) ]
2021-11-09 21:12:30 +00:00
holder_selected_channel_reserve_satoshis : u64 ,
2021-11-09 21:25:33 +00:00
2020-06-08 20:47:55 -04:00
counterparty_htlc_minimum_msat : u64 ,
holder_htlc_minimum_msat : u64 ,
2019-01-15 11:52:02 +02:00
#[ cfg(test) ]
2020-06-08 20:47:55 -04:00
pub counterparty_max_accepted_htlcs : u16 ,
2019-01-15 11:52:02 +02:00
#[ cfg(not(test)) ]
2020-06-08 20:47:55 -04:00
counterparty_max_accepted_htlcs : u16 ,
//implied by OUR_MAX_HTLCS: max_accepted_htlcs: u16,
2021-07-03 15:27:12 +00:00
minimum_depth : Option < u32 > ,
2017-12-25 01:05:27 -05:00
2021-03-12 14:23:20 -05:00
counterparty_forwarding_info : Option < CounterpartyForwardingInfo > ,
2020-10-15 13:45:18 +02:00
pub ( crate ) channel_transaction_parameters : ChannelTransactionParameters ,
2021-03-26 18:07:24 -04:00
funding_transaction : Option < Transaction > ,
2020-01-17 14:31:29 -08:00
2020-06-08 20:47:55 -04:00
counterparty_cur_commitment_point : Option < PublicKey > ,
counterparty_prev_commitment_point : Option < PublicKey > ,
counterparty_node_id : PublicKey ,
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
counterparty_shutdown_scriptpubkey : Option < Script > ,
2018-03-26 16:48:18 -04:00
2020-02-07 17:48:46 -05:00
commitment_secrets : CounterpartyCommitmentSecrets ,
2018-07-25 02:34:51 +00:00
2021-05-13 15:33:54 +00:00
channel_update_status : ChannelUpdateStatus ,
2021-07-26 20:43:05 +00:00
/// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
/// not complete within a single timer tick (one minute), we should force-close the channel.
/// This prevents us from keeping unusable channels around forever if our counterparty wishes
/// to DoS us.
/// Note that this field is reset to false on deserialization to give us a chance to connect to
/// our peer and start the closing_signed negotiation fresh.
closing_signed_in_flight : bool ,
2021-02-05 13:09:23 -05:00
2021-05-06 01:15:35 +00:00
/// Our counterparty's channel_announcement signatures provided in announcement_signatures.
/// This can be used to rebroadcast the channel_announcement message later.
announcement_sigs : Option < ( Signature , Signature ) > ,
2021-02-05 13:09:23 -05:00
// We save these values so we can make sure `next_local_commit_tx_fee_msat` and
// `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
// be, by comparing the cached values to the fee of the tranaction generated by
// `build_commitment_transaction`.
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_local_commitment_tx_fee_info_cached : Mutex < Option < CommitmentTxInfoCached > > ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_remote_commitment_tx_fee_info_cached : Mutex < Option < CommitmentTxInfoCached > > ,
2021-06-23 16:39:27 +00:00
/// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
/// they will not send a channel_reestablish until the channel locks in. Then, they will send a
2022-05-30 14:39:04 -07:00
/// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
/// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
2021-06-23 16:39:27 +00:00
/// message until we receive a channel_reestablish.
///
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
2022-05-30 14:39:04 -07:00
pub workaround_lnd_bug_4006 : Option < msgs ::ChannelReady > ,
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
// When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
// corresponding HTLC on the inbound path. If, then, the outbound path channel is
// disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
// messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
// is fine, but as a sanity check in our failure to generate the second claim, we check here
// that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
historical_inbound_htlc_fulfills : HashSet < u64 > ,
2021-09-17 17:32:36 +00:00
/// This channel's type, as negotiated during channel open
channel_type : ChannelTypeFeatures ,
2022-02-01 17:37:16 +00:00
// Our counterparty can offer us SCID aliases which they will map to this channel when routing
// outbound payments. These can be used in invoice route hints to avoid explicitly revealing
// the channel's funding UTXO.
2022-02-01 21:57:01 +00:00
//
// We also use this when sending our peer a channel_update that isn't to be broadcasted
// publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
// associated channel mapping.
//
2022-02-01 17:37:16 +00:00
// We only bother storing the most recent SCID alias at any time, though our counterparty has
// to store all of them.
latest_inbound_scid_alias : Option < u64 > ,
2022-02-15 23:27:07 +00:00
// We always offer our counterparty a static SCID alias, which we recognize as for this channel
// if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
// don't currently support node id aliases and eventually privacy should be provided with
// blinded paths instead of simple scid+node_id aliases.
outbound_scid_alias : u64 ,
2021-02-05 13:09:23 -05:00
}
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
struct CommitmentTxInfoCached {
fee : u64 ,
total_pending_htlcs : usize ,
next_holder_htlc_id : u64 ,
next_counterparty_htlc_id : u64 ,
feerate : u32 ,
2017-12-25 01:05:27 -05:00
}
2019-01-15 11:52:02 +02:00
pub const OUR_MAX_HTLCS : u16 = 50 ; //TODO
2020-01-17 14:15:07 -05:00
2022-01-04 15:54:54 -08:00
pub ( crate ) fn commitment_tx_base_weight ( opt_anchors : bool ) -> u64 {
const COMMITMENT_TX_BASE_WEIGHT : u64 = 724 ;
const COMMITMENT_TX_BASE_ANCHOR_WEIGHT : u64 = 1124 ;
if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
}
2020-01-17 14:15:07 -05:00
#[ cfg(not(test)) ]
const COMMITMENT_TX_WEIGHT_PER_HTLC : u64 = 172 ;
#[ cfg(test) ]
pub const COMMITMENT_TX_WEIGHT_PER_HTLC : u64 = 172 ;
2021-08-22 11:08:28 +02:00
pub const ANCHOR_OUTPUT_VALUE_SATOSHI : u64 = 330 ;
2022-04-27 00:37:45 +02:00
/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
/// `holder_max_htlc_value_in_flight_msat`.
pub const MAX_IN_FLIGHT_PERCENT_LEGACY : u8 = 10 ;
2022-04-15 17:31:20 -04:00
/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
/// `option_support_large_channel` (aka wumbo channels) is not supported.
2022-04-15 18:10:39 -04:00
/// It's 2^24 - 1.
pub const MAX_FUNDING_SATOSHIS_NO_WUMBO : u64 = ( 1 < < 24 ) - 1 ;
2022-04-15 17:31:20 -04:00
/// Total bitcoin supply in satoshis.
pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS : u64 = 21_000_000 * 1_0000_0000 ;
2017-12-25 01:05:27 -05:00
2021-09-01 20:18:47 +00:00
/// The maximum network dust limit for standard script formats. This currently represents the
/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
/// transaction non-standard and thus refuses to relay it.
/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
/// implementations use this value for their dust limit today.
pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS : u64 = 546 ;
/// The maximum channel dust limit we will accept from our counterparty.
pub const MAX_CHAN_DUST_LIMIT_SATOSHIS : u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS ;
2021-03-16 18:07:22 -04:00
2021-08-31 23:22:14 +00:00
/// The dust limit is used for both the commitment transaction outputs as well as the closing
/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
/// In order to avoid having to concern ourselves with standardness during the closing process, we
/// simply require our counterparty to use a dust limit which will leave any segwit output
/// standard.
2022-05-20 17:17:29 +02:00
/// See https://github.com/lightning/bolts/issues/905 for more details.
2021-09-27 17:56:21 +00:00
pub const MIN_CHAN_DUST_LIMIT_SATOSHIS : u64 = 354 ;
2021-03-16 18:07:22 -04:00
2022-07-18 16:58:10 -07:00
// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS : u64 = 1000 ;
2018-09-30 18:19:59 -04:00
/// Used to return a simple Error back to ChannelManager. Will get converted to a
/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
/// channel_id in ChannelManager.
2020-02-08 17:22:58 -05:00
pub ( super ) enum ChannelError {
2020-07-13 13:16:32 +09:00
Ignore ( String ) ,
2021-07-26 20:01:36 +00:00
Warn ( String ) ,
2020-07-13 13:16:32 +09:00
Close ( String ) ,
2019-07-10 16:39:10 -04:00
}
2020-02-08 17:22:58 -05:00
impl fmt ::Debug for ChannelError {
2019-07-10 16:39:10 -04:00
fn fmt ( & self , f : & mut fmt ::Formatter ) -> fmt ::Result {
match self {
2020-07-13 13:16:32 +09:00
& ChannelError ::Ignore ( ref e ) = > write! ( f , " Ignore : {} " , e ) ,
2021-07-26 20:01:36 +00:00
& ChannelError ::Warn ( ref e ) = > write! ( f , " Warn : {} " , e ) ,
2020-07-13 13:16:32 +09:00
& ChannelError ::Close ( ref e ) = > write! ( f , " Close : {} " , e ) ,
2019-07-10 16:39:10 -04:00
}
}
2018-09-30 18:19:59 -04:00
}
2018-10-29 20:38:29 -04:00
macro_rules ! secp_check {
( $res : expr , $err : expr ) = > {
match $res {
Ok ( thing ) = > thing ,
Err ( _ ) = > return Err ( ChannelError ::Close ( $err ) ) ,
}
} ;
2017-12-25 01:05:27 -05:00
}
2018-10-29 20:38:29 -04:00
2021-02-16 16:30:08 -05:00
impl < Signer : Sign > Channel < Signer > {
2022-04-27 00:37:45 +02:00
/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
/// `channel_value_satoshis` in msat, set through
/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
///
/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
///
/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
fn get_holder_max_htlc_value_in_flight_msat ( channel_value_satoshis : u64 , config : & ChannelHandshakeConfig ) -> u64 {
let configured_percent = if config . max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
1
} else if config . max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
100
} else {
config . max_inbound_htlc_value_in_flight_percent_of_channel as u64
} ;
channel_value_satoshis * 10 * configured_percent
2017-12-25 01:05:27 -05:00
}
2020-05-01 18:39:18 -04:00
/// Returns a minimum channel reserve value the remote needs to maintain,
2022-07-18 16:58:10 -07:00
/// required by us according to the configured or default
/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2018-09-25 23:17:11 +09:00
///
2017-12-25 01:05:27 -05:00
/// Guaranteed to return a value no larger than channel_value_satoshis
2021-11-09 21:12:30 +00:00
///
2022-07-18 16:58:10 -07:00
/// This is used both for outbound and inbound channels and has lower bound
/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
pub ( crate ) fn get_holder_selected_channel_reserve_satoshis ( channel_value_satoshis : u64 , config : & UserConfig ) -> u64 {
let calculated_reserve = channel_value_satoshis . saturating_mul ( config . channel_handshake_config . their_channel_reserve_proportional_millionths as u64 ) / 1_000_000 ;
cmp ::min ( channel_value_satoshis , cmp ::max ( calculated_reserve , MIN_THEIR_CHAN_RESERVE_SATOSHIS ) )
}
/// This is for legacy reasons, present for forward-compatibility.
/// LDK versions older than 0.0.104 don't know how read/handle values other than default
/// from storage. Hence, we use this function to not persist default values of
/// `holder_selected_channel_reserve_satoshis` for channels into storage.
pub ( crate ) fn get_legacy_default_holder_selected_channel_reserve_satoshis ( channel_value_satoshis : u64 ) -> u64 {
2018-08-17 13:12:58 -04:00
let ( q , _ ) = channel_value_satoshis . overflowing_div ( 100 ) ;
2022-07-18 16:58:10 -07:00
cmp ::min ( channel_value_satoshis , cmp ::max ( q , 1000 ) )
2017-12-25 01:05:27 -05:00
}
2021-11-15 18:03:46 -08:00
pub ( crate ) fn opt_anchors ( & self ) -> bool {
self . channel_transaction_parameters . opt_anchors . is_some ( )
}
2022-02-17 22:13:54 +00:00
fn get_initial_channel_type ( config : & UserConfig ) -> ChannelTypeFeatures {
// The default channel type (ie the first one we try) depends on whether the channel is
// public - if it is, we just go with `only_static_remotekey` as it's the only option
// available. If it's private, we first try `scid_privacy` as it provides better privacy
// with no other changes, and fall back to `only_static_remotekey`
let mut ret = ChannelTypeFeatures ::only_static_remote_key ( ) ;
2022-06-13 12:53:56 -07:00
if ! config . channel_handshake_config . announced_channel & & config . channel_handshake_config . negotiate_scid_privacy {
2022-02-17 22:13:54 +00:00
ret . set_scid_privacy_required ( ) ;
}
ret
}
/// If we receive an error message, it may only be a rejection of the channel type we tried,
/// not of our ability to open any channel at all. Thus, on error, we should first call this
/// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
pub ( crate ) fn maybe_handle_error_without_close ( & mut self , chain_hash : BlockHash ) -> Result < msgs ::OpenChannel , ( ) > {
if ! self . is_outbound ( ) | | self . channel_state ! = ChannelState ::OurInitSent as u32 { return Err ( ( ) ) ; }
if self . channel_type = = ChannelTypeFeatures ::only_static_remote_key ( ) {
// We've exhausted our options
return Err ( ( ) ) ;
}
self . channel_type = ChannelTypeFeatures ::only_static_remote_key ( ) ; // We only currently support two types
Ok ( self . get_open_channel ( chain_hash ) )
}
2017-12-25 01:05:27 -05:00
// Constructors:
2021-09-19 23:49:57 +00:00
pub fn new_outbound < K : Deref , F : Deref > (
2022-06-29 15:13:40 +02:00
fee_estimator : & LowerBoundedFeeEstimator < F > , keys_provider : & K , counterparty_node_id : PublicKey , their_features : & InitFeatures ,
2022-02-15 23:27:07 +00:00
channel_value_satoshis : u64 , push_msat : u64 , user_id : u64 , config : & UserConfig , current_chain_height : u32 ,
outbound_scid_alias : u64
2021-09-19 23:49:57 +00:00
) -> Result < Channel < Signer > , APIError >
2021-02-16 16:30:08 -05:00
where K ::Target : KeysInterface < Signer = Signer > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-02-26 16:00:26 -05:00
{
2022-01-04 15:54:54 -08:00
let opt_anchors = false ; // TODO - should be based on features
2022-06-13 12:53:56 -07:00
let holder_selected_contest_delay = config . channel_handshake_config . our_to_self_delay ;
2021-02-20 10:05:55 -05:00
let holder_signer = keys_provider . get_channel_signer ( false , channel_value_satoshis ) ;
let pubkeys = holder_signer . pubkeys ( ) . clone ( ) ;
2018-10-26 11:40:01 -04:00
2022-04-15 18:10:39 -04:00
if ! their_features . supports_wumbo ( ) & & channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
return Err ( APIError ::APIMisuseError { err : format ! ( " funding_value must not exceed {}, it was {} " , MAX_FUNDING_SATOSHIS_NO_WUMBO , channel_value_satoshis ) } ) ;
}
if channel_value_satoshis > = TOTAL_BITCOIN_SUPPLY_SATOSHIS {
return Err ( APIError ::APIMisuseError { err : format ! ( " funding_value must be smaller than the total bitcoin supply, it was {} " , channel_value_satoshis ) } ) ;
2018-08-15 00:59:42 +09:00
}
2020-07-13 13:16:32 +09:00
let channel_value_msat = channel_value_satoshis * 1000 ;
if push_msat > channel_value_msat {
return Err ( APIError ::APIMisuseError { err : format ! ( " Push value ({}) was larger than channel_value ({}) " , push_msat , channel_value_msat ) } ) ;
2017-12-25 01:05:27 -05:00
}
2020-08-26 15:27:12 -04:00
if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
return Err ( APIError ::APIMisuseError { err : format ! ( " Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks " , holder_selected_contest_delay ) } ) ;
2019-07-19 19:23:10 -04:00
}
2022-07-18 16:58:10 -07:00
let holder_selected_channel_reserve_satoshis = Channel ::< Signer > ::get_holder_selected_channel_reserve_satoshis ( channel_value_satoshis , config ) ;
2021-09-27 17:56:21 +00:00
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2022-07-18 16:58:10 -07:00
// Protocol level safety check in place, although it should never happen because
// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
2021-03-16 18:07:22 -04:00
return Err ( APIError ::APIMisuseError { err : format ! ( " Holder selected channel reserve below implemention limit dust_limit_satoshis {} " , holder_selected_channel_reserve_satoshis ) } ) ;
2018-08-17 12:57:51 +09:00
}
2022-07-25 18:08:44 +00:00
let feerate = fee_estimator . bounded_sat_per_1000_weight ( ConfirmationTarget ::Normal ) ;
2017-12-25 01:05:27 -05:00
2021-11-10 01:36:26 +00:00
let value_to_self_msat = channel_value_satoshis * 1000 - push_msat ;
2022-01-04 15:54:54 -08:00
let commitment_tx_fee = Self ::commit_tx_fee_msat ( feerate , MIN_AFFORDABLE_HTLC_COUNT , opt_anchors ) ;
2021-11-10 01:36:26 +00:00
if value_to_self_msat < commitment_tx_fee {
return Err ( APIError ::APIMisuseError { err : format ! ( " Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}. " , value_to_self_msat / 1000 , commitment_tx_fee / 1000 ) } ) ;
}
2021-02-13 11:20:07 -05:00
let mut secp_ctx = Secp256k1 ::new ( ) ;
secp_ctx . seeded_randomize ( & keys_provider . get_secure_random_bytes ( ) ) ;
2022-06-13 12:53:56 -07:00
let shutdown_scriptpubkey = if config . channel_handshake_config . commit_upfront_shutdown_pubkey {
2021-07-26 14:04:44 -04:00
Some ( keys_provider . get_shutdown_scriptpubkey ( ) )
} else { None } ;
if let Some ( shutdown_scriptpubkey ) = & shutdown_scriptpubkey {
2021-08-01 22:31:07 -05:00
if ! shutdown_scriptpubkey . is_compatible ( & their_features ) {
return Err ( APIError ::IncompatibleShutdownScript { script : shutdown_scriptpubkey . clone ( ) } ) ;
2021-07-26 14:04:44 -04:00
}
}
2018-08-15 00:59:42 +09:00
Ok ( Channel {
2020-10-06 16:47:23 -07:00
user_id ,
2022-06-09 14:01:56 -07:00
config : LegacyChannelConfig {
2022-06-13 12:53:56 -07:00
options : config . channel_config . clone ( ) ,
announced_channel : config . channel_handshake_config . announced_channel ,
commit_upfront_shutdown_pubkey : config . channel_handshake_config . commit_upfront_shutdown_pubkey ,
2022-06-09 14:01:56 -07:00
} ,
2022-06-16 16:24:42 -07:00
prev_config : None ,
2022-06-13 12:53:56 -07:00
inbound_handshake_limits_override : Some ( config . channel_handshake_limits . clone ( ) ) ,
2017-12-25 01:05:27 -05:00
2020-08-23 17:06:33 -04:00
channel_id : keys_provider . get_secure_random_bytes ( ) ,
2017-12-25 01:05:27 -05:00
channel_state : ChannelState ::OurInitSent as u32 ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
announcement_sigs_state : AnnouncementSigsState ::NotSent ,
2021-02-13 11:20:07 -05:00
secp_ctx ,
2020-10-06 16:47:23 -07:00
channel_value_satoshis ,
2017-12-25 01:05:27 -05:00
2020-02-05 19:39:31 -05:00
latest_monitor_update_id : 0 ,
2021-02-20 10:05:55 -05:00
holder_signer ,
2021-07-26 14:04:44 -04:00
shutdown_scriptpubkey ,
2020-02-08 17:45:40 -05:00
destination_script : keys_provider . get_destination_script ( ) ,
2020-06-08 20:47:55 -04:00
cur_holder_commitment_transaction_number : INITIAL_COMMITMENT_NUMBER ,
cur_counterparty_commitment_transaction_number : INITIAL_COMMITMENT_NUMBER ,
2021-11-10 01:36:26 +00:00
value_to_self_msat ,
2018-10-17 22:13:31 -04:00
2018-09-09 12:53:57 -04:00
pending_inbound_htlcs : Vec ::new ( ) ,
pending_outbound_htlcs : Vec ::new ( ) ,
2018-04-04 11:56:54 -04:00
holding_cell_htlc_updates : Vec ::new ( ) ,
2018-09-26 19:54:28 -04:00
pending_update_fee : None ,
holding_cell_update_fee : None ,
2020-06-08 20:47:55 -04:00
next_holder_htlc_id : 0 ,
next_counterparty_htlc_id : 0 ,
2020-03-05 18:01:06 -05:00
update_time_counter : 1 ,
2017-12-25 01:05:27 -05:00
2019-03-05 15:36:11 -05:00
resend_order : RAACommitmentOrder ::CommitmentFirst ,
2022-05-30 14:39:04 -07:00
monitor_pending_channel_ready : false ,
2018-10-17 18:19:55 -04:00
monitor_pending_revoke_and_ack : false ,
monitor_pending_commitment_signed : false ,
monitor_pending_forwards : Vec ::new ( ) ,
monitor_pending_failures : Vec ::new ( ) ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
monitor_pending_finalized_fulfills : Vec ::new ( ) ,
2018-10-17 18:19:55 -04:00
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
2021-07-19 16:13:00 +02:00
holder_max_commitment_tx_output : Mutex ::new ( ( channel_value_satoshis * 1000 - push_msat , push_msat ) ) ,
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
2021-07-19 16:13:00 +02:00
counterparty_max_commitment_tx_output : Mutex ::new ( ( channel_value_satoshis * 1000 - push_msat , push_msat ) ) ,
2018-10-01 17:48:22 -04:00
2018-03-26 16:48:18 -04:00
last_sent_closing_fee : None ,
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
pending_counterparty_closing_signed : None ,
2021-07-20 03:19:01 +00:00
closing_fee_limits : None ,
target_closing_feerate_sats_per_kw : None ,
2018-03-26 16:48:18 -04:00
2022-01-26 00:21:22 +01:00
inbound_awaiting_accept : false ,
2018-08-01 16:13:04 -04:00
funding_tx_confirmed_in : None ,
2021-03-15 20:13:57 -04:00
funding_tx_confirmation_height : 0 ,
2017-12-25 01:05:27 -05:00
short_channel_id : None ,
2021-09-19 23:49:57 +00:00
channel_creation_height : current_chain_height ,
2017-12-25 01:05:27 -05:00
2018-07-24 20:34:56 -04:00
feerate_per_kw : feerate ,
2020-06-08 20:47:55 -04:00
counterparty_dust_limit_satoshis : 0 ,
2021-09-27 17:56:21 +00:00
holder_dust_limit_satoshis : MIN_CHAN_DUST_LIMIT_SATOSHIS ,
2020-06-08 20:47:55 -04:00
counterparty_max_htlc_value_in_flight_msat : 0 ,
2022-06-13 12:53:56 -07:00
holder_max_htlc_value_in_flight_msat : Self ::get_holder_max_htlc_value_in_flight_msat ( channel_value_satoshis , & config . channel_handshake_config ) ,
2021-07-03 15:27:12 +00:00
counterparty_selected_channel_reserve_satoshis : None , // Filled in in accept_channel
2021-11-09 21:12:30 +00:00
holder_selected_channel_reserve_satoshis ,
2020-06-08 20:47:55 -04:00
counterparty_htlc_minimum_msat : 0 ,
2022-06-13 12:53:56 -07:00
holder_htlc_minimum_msat : if config . channel_handshake_config . our_htlc_minimum_msat = = 0 { 1 } else { config . channel_handshake_config . our_htlc_minimum_msat } ,
2020-06-08 20:47:55 -04:00
counterparty_max_accepted_htlcs : 0 ,
2021-07-03 15:27:12 +00:00
minimum_depth : None , // Filled in in accept_channel
2017-12-25 01:05:27 -05:00
2021-03-12 14:23:20 -05:00
counterparty_forwarding_info : None ,
2020-10-15 13:45:18 +02:00
channel_transaction_parameters : ChannelTransactionParameters {
holder_pubkeys : pubkeys ,
2022-06-13 12:53:56 -07:00
holder_selected_contest_delay : config . channel_handshake_config . our_to_self_delay ,
2020-10-15 13:45:18 +02:00
is_outbound_from_holder : true ,
counterparty_parameters : None ,
2021-11-15 18:03:46 -08:00
funding_outpoint : None ,
2022-01-04 15:54:54 -08:00
opt_anchors : if opt_anchors { Some ( ( ) ) } else { None } ,
2020-10-15 13:45:18 +02:00
} ,
2021-03-26 18:07:24 -04:00
funding_transaction : None ,
2018-04-08 14:24:12 -04:00
2021-03-26 18:07:24 -04:00
counterparty_cur_commitment_point : None ,
2020-06-08 20:47:55 -04:00
counterparty_prev_commitment_point : None ,
counterparty_node_id ,
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
counterparty_shutdown_scriptpubkey : None ,
2018-03-26 16:48:18 -04:00
2020-02-07 17:48:46 -05:00
commitment_secrets : CounterpartyCommitmentSecrets ::new ( ) ,
2018-07-25 02:34:51 +00:00
2021-05-13 15:33:54 +00:00
channel_update_status : ChannelUpdateStatus ::Enabled ,
2021-07-26 20:43:05 +00:00
closing_signed_in_flight : false ,
2021-02-05 13:09:23 -05:00
2021-05-06 01:15:35 +00:00
announcement_sigs : None ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_local_commitment_tx_fee_info_cached : Mutex ::new ( None ) ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_remote_commitment_tx_fee_info_cached : Mutex ::new ( None ) ,
2021-06-23 16:39:27 +00:00
workaround_lnd_bug_4006 : None ,
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
2022-02-01 17:37:16 +00:00
latest_inbound_scid_alias : None ,
2022-02-15 23:27:07 +00:00
outbound_scid_alias ,
2022-02-01 17:37:16 +00:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
historical_inbound_htlc_fulfills : HashSet ::new ( ) ,
2021-09-17 17:32:36 +00:00
2022-02-17 22:13:54 +00:00
channel_type : Self ::get_initial_channel_type ( & config ) ,
2018-08-15 00:59:42 +09:00
} )
2017-12-25 01:05:27 -05:00
}
2022-06-29 15:13:40 +02:00
fn check_remote_fee < F : Deref > ( fee_estimator : & LowerBoundedFeeEstimator < F > , feerate_per_kw : u32 ) -> Result < ( ) , ChannelError >
2020-02-27 11:33:03 -05:00
where F ::Target : FeeEstimator
{
2021-06-30 03:16:01 +00:00
// We only bound the fee updates on the upper side to prevent completely absurd feerates,
// always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
// We generally don't care too much if they set the feerate to something very high, but it
// could result in the channel being useless due to everything being dust.
let upper_limit = cmp ::max ( 250 * 25 ,
2022-07-25 18:08:44 +00:00
fee_estimator . bounded_sat_per_1000_weight ( ConfirmationTarget ::HighPriority ) as u64 * 10 ) ;
2020-07-13 13:16:32 +09:00
if feerate_per_kw as u64 > upper_limit {
return Err ( ChannelError ::Close ( format! ( " Peer's feerate much too high. Actual: {} . Our expected upper limit: {} " , feerate_per_kw , upper_limit ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2022-07-25 18:08:44 +00:00
let lower_limit = fee_estimator . bounded_sat_per_1000_weight ( ConfirmationTarget ::Background ) ;
2022-01-26 00:10:19 +00:00
// Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
// occasional issues with feerate disagreements between an initiator that wants a feerate
// of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
// sat/kw before the comparison here.
if feerate_per_kw + 250 < lower_limit {
return Err ( ChannelError ::Close ( format! ( " Peer's feerate much too low. Actual: {} . Our expected lower limit: {} (- 250) " , feerate_per_kw , lower_limit ) ) ) ;
}
2017-12-25 01:05:27 -05:00
Ok ( ( ) )
}
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
2021-11-09 21:25:33 +00:00
pub fn new_from_req < K : Deref , F : Deref , L : Deref > (
2022-06-29 15:13:40 +02:00
fee_estimator : & LowerBoundedFeeEstimator < F > , keys_provider : & K , counterparty_node_id : PublicKey , their_features : & InitFeatures ,
2022-02-15 23:27:07 +00:00
msg : & msgs ::OpenChannel , user_id : u64 , config : & UserConfig , current_chain_height : u32 , logger : & L ,
outbound_scid_alias : u64
2021-09-19 23:49:57 +00:00
) -> Result < Channel < Signer > , ChannelError >
2021-02-16 16:30:08 -05:00
where K ::Target : KeysInterface < Signer = Signer > ,
2021-11-09 21:25:33 +00:00
F ::Target : FeeEstimator ,
L ::Target : Logger ,
2020-02-26 16:00:26 -05:00
{
2022-01-04 15:54:54 -08:00
let opt_anchors = false ; // TODO - should be based on features
2022-02-01 17:23:52 +00:00
let announced_channel = if ( msg . channel_flags & 1 ) = = 1 { true } else { false } ;
2022-01-04 15:54:54 -08:00
2021-09-17 17:32:36 +00:00
// First check the channel type is known, failing before we do anything else if we don't
// support this channel type.
let channel_type = if let Some ( channel_type ) = & msg . channel_type {
if channel_type . supports_any_optional_bits ( ) {
return Err ( ChannelError ::Close ( " Channel Type field contained optional bits - this is not allowed " . to_owned ( ) ) ) ;
}
2022-06-01 17:05:17 -07:00
if channel_type . requires_unknown_bits ( ) {
return Err ( ChannelError ::Close ( " Channel Type field contains unknown bits " . to_owned ( ) ) ) ;
}
// We currently only allow four channel types, so write it all out here - we allow
// `only_static_remote_key` or `static_remote_key | zero_conf` in all contexts, and
// further allow `static_remote_key | scid_privacy` or
// `static_remote_key | scid_privacy | zero_conf`, if the channel is not
// publicly announced.
if * channel_type ! = ChannelTypeFeatures ::only_static_remote_key ( ) {
if ! channel_type . requires_scid_privacy ( ) & & ! channel_type . requires_zero_conf ( ) {
2022-02-01 17:23:52 +00:00
return Err ( ChannelError ::Close ( " Channel Type was not understood " . to_owned ( ) ) ) ;
}
2022-06-01 17:05:17 -07:00
if channel_type . requires_scid_privacy ( ) & & announced_channel {
2022-02-01 17:23:52 +00:00
return Err ( ChannelError ::Close ( " SCID Alias/Privacy Channel Type cannot be set on a public channel " . to_owned ( ) ) ) ;
}
2021-09-17 17:32:36 +00:00
}
channel_type . clone ( )
} else {
ChannelTypeFeatures ::from_counterparty_init ( & their_features )
} ;
if ! channel_type . supports_static_remote_key ( ) {
return Err ( ChannelError ::Close ( " Channel Type was not understood - we require static remote key " . to_owned ( ) ) ) ;
}
2021-02-20 10:05:55 -05:00
let holder_signer = keys_provider . get_channel_signer ( true , msg . funding_satoshis ) ;
let pubkeys = holder_signer . pubkeys ( ) . clone ( ) ;
2020-06-08 20:47:55 -04:00
let counterparty_pubkeys = ChannelPublicKeys {
2020-01-16 16:51:15 -08:00
funding_pubkey : msg . funding_pubkey ,
revocation_basepoint : msg . revocation_basepoint ,
2020-03-08 20:38:16 -04:00
payment_point : msg . payment_point ,
2020-01-16 16:51:15 -08:00
delayed_payment_basepoint : msg . delayed_payment_basepoint ,
htlc_basepoint : msg . htlc_basepoint
} ;
2018-10-26 11:40:01 -04:00
2022-06-13 12:53:56 -07:00
if config . channel_handshake_config . our_to_self_delay < BREAKDOWN_TIMEOUT {
return Err ( ChannelError ::Close ( format! ( " Configured with an unreasonable our_to_self_delay ( {} ) putting user funds at risks. It must be greater than {} " , config . channel_handshake_config . our_to_self_delay , BREAKDOWN_TIMEOUT ) ) ) ;
2019-07-19 19:23:10 -04:00
}
2017-12-25 01:05:27 -05:00
// Check sanity of message fields:
2022-06-13 12:53:56 -07:00
if msg . funding_satoshis > config . channel_handshake_limits . max_funding_satoshis {
return Err ( ChannelError ::Close ( format! ( " Per our config, funding must be at most {} . It was {} " , config . channel_handshake_limits . max_funding_satoshis , msg . funding_satoshis ) ) ) ;
2022-04-15 18:10:39 -04:00
}
if msg . funding_satoshis > = TOTAL_BITCOIN_SUPPLY_SATOSHIS {
return Err ( ChannelError ::Close ( format! ( " Funding must be smaller than the total bitcoin supply. It was {} " , msg . funding_satoshis ) ) ) ;
2017-12-25 01:05:27 -05:00
}
if msg . channel_reserve_satoshis > msg . funding_satoshis {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Bogus channel_reserve_satoshis ( {} ). Must be not greater than funding_satoshis: {} " , msg . channel_reserve_satoshis , msg . funding_satoshis ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2022-04-28 19:46:13 +00:00
let full_channel_value_msat = ( msg . funding_satoshis - msg . channel_reserve_satoshis ) * 1000 ;
if msg . push_msat > full_channel_value_msat {
2022-04-28 19:46:22 +00:00
return Err ( ChannelError ::Close ( format! ( " push_msat {} was larger than channel amount minus reserve ( {} ) " , msg . push_msat , full_channel_value_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-04-01 19:54:14 -04:00
if msg . dust_limit_satoshis > msg . funding_satoshis {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " dust_limit_satoshis {} was larger than funding_satoshis {} . Peer never wants payout outputs? " , msg . dust_limit_satoshis , msg . funding_satoshis ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2020-07-13 13:16:32 +09:00
if msg . htlc_minimum_msat > = full_channel_value_msat {
return Err ( ChannelError ::Close ( format! ( " Minimum htlc value ( {} ) was larger than full channel value ( {} ) " , msg . htlc_minimum_msat , full_channel_value_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2021-02-16 16:30:08 -05:00
Channel ::< Signer > ::check_remote_fee ( fee_estimator , msg . feerate_per_kw ) ? ;
2018-08-15 01:12:40 +09:00
2022-06-13 12:53:56 -07:00
let max_counterparty_selected_contest_delay = u16 ::min ( config . channel_handshake_limits . their_to_self_delay , MAX_LOCAL_BREAKDOWN_TIMEOUT ) ;
2020-05-28 20:32:46 -04:00
if msg . to_self_delay > max_counterparty_selected_contest_delay {
return Err ( ChannelError ::Close ( format! ( " They wanted our payments to be delayed by a needlessly long period. Upper limit: {} . Actual: {} " , max_counterparty_selected_contest_delay , msg . to_self_delay ) ) ) ;
2017-12-25 01:05:27 -05:00
}
if msg . max_accepted_htlcs < 1 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " 0 max_accepted_htlcs makes for a useless channel " . to_owned ( ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2020-10-15 13:45:18 +02:00
if msg . max_accepted_htlcs > MAX_HTLCS {
return Err ( ChannelError ::Close ( format! ( " max_accepted_htlcs was {} . It must not be larger than {} " , msg . max_accepted_htlcs , MAX_HTLCS ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-10-31 14:51:39 -04:00
// Now check against optional parameters as set by config...
2022-06-13 12:53:56 -07:00
if msg . funding_satoshis < config . channel_handshake_limits . min_funding_satoshis {
return Err ( ChannelError ::Close ( format! ( " Funding satoshis ( {} ) is less than the user specified limit ( {} ) " , msg . funding_satoshis , config . channel_handshake_limits . min_funding_satoshis ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-06-13 12:53:56 -07:00
if msg . htlc_minimum_msat > config . channel_handshake_limits . max_htlc_minimum_msat {
return Err ( ChannelError ::Close ( format! ( " htlc_minimum_msat ( {} ) is higher than the user specified limit ( {} ) " , msg . htlc_minimum_msat , config . channel_handshake_limits . max_htlc_minimum_msat ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-06-13 12:53:56 -07:00
if msg . max_htlc_value_in_flight_msat < config . channel_handshake_limits . min_max_htlc_value_in_flight_msat {
return Err ( ChannelError ::Close ( format! ( " max_htlc_value_in_flight_msat ( {} ) is less than the user specified limit ( {} ) " , msg . max_htlc_value_in_flight_msat , config . channel_handshake_limits . min_max_htlc_value_in_flight_msat ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-06-13 12:53:56 -07:00
if msg . channel_reserve_satoshis > config . channel_handshake_limits . max_channel_reserve_satoshis {
return Err ( ChannelError ::Close ( format! ( " channel_reserve_satoshis ( {} ) is higher than the user specified limit ( {} ) " , msg . channel_reserve_satoshis , config . channel_handshake_limits . max_channel_reserve_satoshis ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-06-13 12:53:56 -07:00
if msg . max_accepted_htlcs < config . channel_handshake_limits . min_max_accepted_htlcs {
return Err ( ChannelError ::Close ( format! ( " max_accepted_htlcs ( {} ) is less than the user specified limit ( {} ) " , msg . max_accepted_htlcs , config . channel_handshake_limits . min_max_accepted_htlcs ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2021-09-27 17:56:21 +00:00
if msg . dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
return Err ( ChannelError ::Close ( format! ( " dust_limit_satoshis ( {} ) is less than the implementation limit ( {} ) " , msg . dust_limit_satoshis , MIN_CHAN_DUST_LIMIT_SATOSHIS ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2021-09-01 20:18:47 +00:00
if msg . dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
return Err ( ChannelError ::Close ( format! ( " dust_limit_satoshis ( {} ) is greater than the implementation limit ( {} ) " , msg . dust_limit_satoshis , MAX_CHAN_DUST_LIMIT_SATOSHIS ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2017-12-25 01:05:27 -05:00
// Convert things into internal flags and prep our state:
2022-06-13 12:53:56 -07:00
if config . channel_handshake_limits . force_announced_channel_preference {
if config . channel_handshake_config . announced_channel ! = announced_channel {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer tried to open channel but their announcement preference is different from ours " . to_owned ( ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2018-07-26 14:33:01 -04:00
}
2017-12-25 01:05:27 -05:00
2022-07-18 16:58:10 -07:00
let holder_selected_channel_reserve_satoshis = Channel ::< Signer > ::get_holder_selected_channel_reserve_satoshis ( msg . funding_satoshis , config ) ;
2021-09-27 17:56:21 +00:00
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2022-07-18 16:58:10 -07:00
// Protocol level safety check in place, although it should never happen because
// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
2021-09-27 17:56:21 +00:00
return Err ( ChannelError ::Close ( format! ( " Suitable channel reserve not found. remote_channel_reserve was ( {} ). dust_limit_satoshis is ( {} ). " , holder_selected_channel_reserve_satoshis , MIN_CHAN_DUST_LIMIT_SATOSHIS ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2022-04-28 19:46:13 +00:00
if holder_selected_channel_reserve_satoshis * 1000 > = full_channel_value_msat {
2022-07-18 16:58:10 -07:00
return Err ( ChannelError ::Close ( format! ( " Suitable channel reserve not found. remote_channel_reserve was ( {} )msats. Channel value is ( {} - {} )msats. " , holder_selected_channel_reserve_satoshis * 1000 , full_channel_value_msat , msg . push_msat ) ) ) ;
2022-04-28 19:46:13 +00:00
}
2021-09-27 17:56:21 +00:00
if msg . channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2021-11-09 21:25:33 +00:00
log_debug! ( logger , " channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty. " ,
msg . channel_reserve_satoshis , MIN_CHAN_DUST_LIMIT_SATOSHIS ) ;
2018-08-15 01:12:40 +09:00
}
2020-06-08 20:47:55 -04:00
if holder_selected_channel_reserve_satoshis < msg . dust_limit_satoshis {
return Err ( ChannelError ::Close ( format! ( " Dust limit ( {} ) too high for the channel reserve we require the remote to keep ( {} ) " , msg . dust_limit_satoshis , holder_selected_channel_reserve_satoshis ) ) ) ;
2018-08-15 01:12:40 +09:00
}
// check if the funder's amount for the initial commitment tx is sufficient
2021-11-10 01:36:26 +00:00
// for full fee payment plus a few HTLCs to ensure the channel will be useful.
2018-08-15 01:12:40 +09:00
let funders_amount_msat = msg . funding_satoshis * 1000 - msg . push_msat ;
2022-01-04 15:54:54 -08:00
let commitment_tx_fee = Self ::commit_tx_fee_msat ( msg . feerate_per_kw , MIN_AFFORDABLE_HTLC_COUNT , opt_anchors ) / 1000 ;
2021-11-10 01:36:26 +00:00
if funders_amount_msat / 1000 < commitment_tx_fee {
return Err ( ChannelError ::Close ( format! ( " Funding amount ( {} sats) can't even pay fee for initial commitment transaction fee of {} sats. " , funders_amount_msat / 1000 , commitment_tx_fee ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2021-11-10 01:36:26 +00:00
let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee ;
// While it's reasonable for us to not meet the channel reserve initially (if they don't
// want to push much to us), our counterparty should always have more than our reserve.
if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
return Err ( ChannelError ::Close ( " Insufficient funding amount for initial reserve " . to_owned ( ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2020-06-08 20:47:55 -04:00
let counterparty_shutdown_scriptpubkey = if their_features . supports_upfront_shutdown_script ( ) {
2019-07-09 13:00:15 -04:00
match & msg . shutdown_scriptpubkey {
& OptionalField ::Present ( ref script ) = > {
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
2021-02-05 15:14:12 +01:00
if script . len ( ) = = 0 {
2019-07-09 13:00:15 -04:00
None
2021-02-05 15:14:12 +01:00
} else {
2021-09-01 20:22:49 +00:00
if ! script ::is_bolt2_compliant ( & script , their_features ) {
return Err ( ChannelError ::Close ( format! ( " Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {} " , script ) ) )
2021-07-28 14:04:10 -05:00
}
2021-09-01 20:22:49 +00:00
Some ( script . clone ( ) )
2019-07-09 13:00:15 -04:00
}
} ,
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
& OptionalField ::Absent = > {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out " . to_owned ( ) ) ) ;
2019-07-09 13:00:15 -04:00
}
}
} else { None } ;
2022-06-13 12:53:56 -07:00
let shutdown_scriptpubkey = if config . channel_handshake_config . commit_upfront_shutdown_pubkey {
2021-07-26 14:04:44 -04:00
Some ( keys_provider . get_shutdown_scriptpubkey ( ) )
} else { None } ;
if let Some ( shutdown_scriptpubkey ) = & shutdown_scriptpubkey {
if ! shutdown_scriptpubkey . is_compatible ( & their_features ) {
2021-07-30 23:27:58 -05:00
return Err ( ChannelError ::Close ( format! ( " Provided a scriptpubkey format not accepted by peer: {} " , shutdown_scriptpubkey ) ) ) ;
2021-07-26 14:04:44 -04:00
}
}
2021-02-13 11:20:07 -05:00
let mut secp_ctx = Secp256k1 ::new ( ) ;
secp_ctx . seeded_randomize ( & keys_provider . get_secure_random_bytes ( ) ) ;
2020-02-08 16:17:59 -05:00
let chan = Channel {
2020-10-06 16:47:23 -07:00
user_id ,
2022-06-09 14:01:56 -07:00
config : LegacyChannelConfig {
2022-06-13 12:53:56 -07:00
options : config . channel_config . clone ( ) ,
2022-06-09 14:01:56 -07:00
announced_channel ,
2022-06-13 12:53:56 -07:00
commit_upfront_shutdown_pubkey : config . channel_handshake_config . commit_upfront_shutdown_pubkey ,
2022-06-09 14:01:56 -07:00
} ,
2022-06-16 16:24:42 -07:00
prev_config : None ,
2022-02-01 21:16:27 +00:00
inbound_handshake_limits_override : None ,
2017-12-25 01:05:27 -05:00
channel_id : msg . temporary_channel_id ,
channel_state : ( ChannelState ::OurInitSent as u32 ) | ( ChannelState ::TheirInitSent as u32 ) ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
announcement_sigs_state : AnnouncementSigsState ::NotSent ,
2021-02-13 11:20:07 -05:00
secp_ctx ,
2017-12-25 01:05:27 -05:00
2020-02-05 19:39:31 -05:00
latest_monitor_update_id : 0 ,
2021-02-20 10:05:55 -05:00
holder_signer ,
2021-07-26 14:04:44 -04:00
shutdown_scriptpubkey ,
2020-02-08 17:45:40 -05:00
destination_script : keys_provider . get_destination_script ( ) ,
2020-06-08 20:47:55 -04:00
cur_holder_commitment_transaction_number : INITIAL_COMMITMENT_NUMBER ,
cur_counterparty_commitment_transaction_number : INITIAL_COMMITMENT_NUMBER ,
2017-12-25 01:05:27 -05:00
value_to_self_msat : msg . push_msat ,
2018-10-17 22:13:31 -04:00
2018-09-09 12:53:57 -04:00
pending_inbound_htlcs : Vec ::new ( ) ,
pending_outbound_htlcs : Vec ::new ( ) ,
2018-04-04 11:56:54 -04:00
holding_cell_htlc_updates : Vec ::new ( ) ,
2018-09-26 19:54:28 -04:00
pending_update_fee : None ,
holding_cell_update_fee : None ,
2020-06-08 20:47:55 -04:00
next_holder_htlc_id : 0 ,
next_counterparty_htlc_id : 0 ,
2020-03-05 18:01:06 -05:00
update_time_counter : 1 ,
2017-12-25 01:05:27 -05:00
2019-03-05 15:36:11 -05:00
resend_order : RAACommitmentOrder ::CommitmentFirst ,
2022-05-30 14:39:04 -07:00
monitor_pending_channel_ready : false ,
2018-10-17 18:19:55 -04:00
monitor_pending_revoke_and_ack : false ,
monitor_pending_commitment_signed : false ,
monitor_pending_forwards : Vec ::new ( ) ,
monitor_pending_failures : Vec ::new ( ) ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
monitor_pending_finalized_fulfills : Vec ::new ( ) ,
2018-10-17 18:19:55 -04:00
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
2021-07-19 16:13:00 +02:00
holder_max_commitment_tx_output : Mutex ::new ( ( msg . push_msat , msg . funding_satoshis * 1000 - msg . push_msat ) ) ,
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
2021-07-19 16:13:00 +02:00
counterparty_max_commitment_tx_output : Mutex ::new ( ( msg . push_msat , msg . funding_satoshis * 1000 - msg . push_msat ) ) ,
2018-10-01 17:48:22 -04:00
2018-03-26 16:48:18 -04:00
last_sent_closing_fee : None ,
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
pending_counterparty_closing_signed : None ,
2021-07-20 03:19:01 +00:00
closing_fee_limits : None ,
target_closing_feerate_sats_per_kw : None ,
2018-03-26 16:48:18 -04:00
2022-01-26 00:21:22 +01:00
inbound_awaiting_accept : true ,
2018-08-01 16:13:04 -04:00
funding_tx_confirmed_in : None ,
2021-03-15 20:13:57 -04:00
funding_tx_confirmation_height : 0 ,
2017-12-25 01:05:27 -05:00
short_channel_id : None ,
2021-09-19 23:49:57 +00:00
channel_creation_height : current_chain_height ,
2017-12-25 01:05:27 -05:00
2020-06-15 17:28:01 -04:00
feerate_per_kw : msg . feerate_per_kw ,
2017-12-25 01:05:27 -05:00
channel_value_satoshis : msg . funding_satoshis ,
2020-06-08 20:47:55 -04:00
counterparty_dust_limit_satoshis : msg . dust_limit_satoshis ,
2021-09-27 17:56:21 +00:00
holder_dust_limit_satoshis : MIN_CHAN_DUST_LIMIT_SATOSHIS ,
2020-06-08 20:47:55 -04:00
counterparty_max_htlc_value_in_flight_msat : cmp ::min ( msg . max_htlc_value_in_flight_msat , msg . funding_satoshis * 1000 ) ,
2022-06-13 12:53:56 -07:00
holder_max_htlc_value_in_flight_msat : Self ::get_holder_max_htlc_value_in_flight_msat ( msg . funding_satoshis , & config . channel_handshake_config ) ,
2021-07-03 15:27:12 +00:00
counterparty_selected_channel_reserve_satoshis : Some ( msg . channel_reserve_satoshis ) ,
2021-11-09 21:12:30 +00:00
holder_selected_channel_reserve_satoshis ,
2020-06-08 20:47:55 -04:00
counterparty_htlc_minimum_msat : msg . htlc_minimum_msat ,
2022-06-13 12:53:56 -07:00
holder_htlc_minimum_msat : if config . channel_handshake_config . our_htlc_minimum_msat = = 0 { 1 } else { config . channel_handshake_config . our_htlc_minimum_msat } ,
2020-06-08 20:47:55 -04:00
counterparty_max_accepted_htlcs : msg . max_accepted_htlcs ,
2022-06-13 12:53:56 -07:00
minimum_depth : Some ( cmp ::max ( config . channel_handshake_config . minimum_depth , 1 ) ) ,
2017-12-25 01:05:27 -05:00
2021-03-12 14:23:20 -05:00
counterparty_forwarding_info : None ,
2020-10-15 13:45:18 +02:00
channel_transaction_parameters : ChannelTransactionParameters {
holder_pubkeys : pubkeys ,
2022-06-13 12:53:56 -07:00
holder_selected_contest_delay : config . channel_handshake_config . our_to_self_delay ,
2020-10-15 13:45:18 +02:00
is_outbound_from_holder : false ,
counterparty_parameters : Some ( CounterpartyChannelTransactionParameters {
selected_contest_delay : msg . to_self_delay ,
pubkeys : counterparty_pubkeys ,
} ) ,
2021-11-15 18:03:46 -08:00
funding_outpoint : None ,
2022-01-04 15:54:54 -08:00
opt_anchors : if opt_anchors { Some ( ( ) ) } else { None } ,
2020-10-15 13:45:18 +02:00
} ,
2021-03-26 18:07:24 -04:00
funding_transaction : None ,
2018-04-08 14:24:12 -04:00
2021-03-26 18:07:24 -04:00
counterparty_cur_commitment_point : Some ( msg . first_per_commitment_point ) ,
2020-06-08 20:47:55 -04:00
counterparty_prev_commitment_point : None ,
counterparty_node_id ,
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
counterparty_shutdown_scriptpubkey ,
2018-03-26 16:48:18 -04:00
2020-02-07 17:48:46 -05:00
commitment_secrets : CounterpartyCommitmentSecrets ::new ( ) ,
2018-07-25 02:34:51 +00:00
2021-05-13 15:33:54 +00:00
channel_update_status : ChannelUpdateStatus ::Enabled ,
2021-07-26 20:43:05 +00:00
closing_signed_in_flight : false ,
2021-02-05 13:09:23 -05:00
2021-05-06 01:15:35 +00:00
announcement_sigs : None ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_local_commitment_tx_fee_info_cached : Mutex ::new ( None ) ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_remote_commitment_tx_fee_info_cached : Mutex ::new ( None ) ,
2021-06-23 16:39:27 +00:00
workaround_lnd_bug_4006 : None ,
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
2022-02-01 17:37:16 +00:00
latest_inbound_scid_alias : None ,
2022-02-15 23:27:07 +00:00
outbound_scid_alias ,
2022-02-01 17:37:16 +00:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
historical_inbound_htlc_fulfills : HashSet ::new ( ) ,
2021-09-17 17:32:36 +00:00
channel_type ,
2017-12-25 01:05:27 -05:00
} ;
Ok ( chan )
}
/// Transaction nomenclature is somewhat confusing here as there are many different cases - a
/// transaction is referred to as "a's transaction" implying that a will be able to broadcast
/// the transaction. Thus, b will generally be sending a signature over such a transaction to
/// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
/// such, a transaction is generally the result of b increasing the amount paid to a (or adding
/// an HTLC to a).
/// @local is used only to convert relevant internal structures which refer to remote vs local
/// to decide value of outputs and direction of HTLCs.
/// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
/// state may indicate that one peer has informed the other that they'd like to add an HTLC but
/// have not yet committed it. Such HTLCs will only be included in transactions which are being
/// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
/// which peer generated this transaction and "to whom" this transaction flows.
#[ inline ]
2021-11-18 21:23:41 -05:00
fn build_commitment_transaction < L : Deref > ( & self , commitment_number : u64 , keys : & TxCreationKeys , local : bool , generated_by_local : bool , logger : & L ) -> CommitmentStats
where L ::Target : Logger
{
2019-01-06 17:02:53 -05:00
let mut included_dust_htlcs : Vec < ( HTLCOutputInCommitment , Option < & HTLCSource > ) > = Vec ::new ( ) ;
2020-10-15 13:45:18 +02:00
let num_htlcs = self . pending_inbound_htlcs . len ( ) + self . pending_outbound_htlcs . len ( ) ;
let mut included_non_dust_htlcs : Vec < ( HTLCOutputInCommitment , Option < & HTLCSource > ) > = Vec ::with_capacity ( num_htlcs ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
let broadcaster_dust_limit_satoshis = if local { self . holder_dust_limit_satoshis } else { self . counterparty_dust_limit_satoshis } ;
2017-12-25 01:05:27 -05:00
let mut remote_htlc_total_msat = 0 ;
let mut local_htlc_total_msat = 0 ;
2018-04-04 11:56:54 -04:00
let mut value_to_self_msat_offset = 0 ;
2017-12-25 01:05:27 -05:00
2021-07-12 15:39:27 +00:00
let mut feerate_per_kw = self . feerate_per_kw ;
if let Some ( ( feerate , update_state ) ) = self . pending_update_fee {
if match update_state {
// Note that these match the inclusion criteria when scanning
// pending_inbound_htlcs below.
FeeUpdateState ::RemoteAnnounced = > { debug_assert! ( ! self . is_outbound ( ) ) ; ! generated_by_local } ,
FeeUpdateState ::AwaitingRemoteRevokeToAnnounce = > { debug_assert! ( ! self . is_outbound ( ) ) ; ! generated_by_local } ,
FeeUpdateState ::Outbound = > { assert! ( self . is_outbound ( ) ) ; generated_by_local } ,
} {
feerate_per_kw = feerate ;
}
}
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}... " ,
commitment_number , ( INITIAL_COMMITMENT_NUMBER - commitment_number ) ,
get_commitment_transaction_number_obscure_factor ( & self . get_holder_pubkeys ( ) . payment_point , & self . get_counterparty_pubkeys ( ) . payment_point , self . is_outbound ( ) ) ,
log_bytes! ( self . channel_id ) , if local { " us " } else { " remote " } , if generated_by_local { " us " } else { " remote " } , feerate_per_kw ) ;
2019-01-08 16:20:24 -05:00
2019-01-06 17:02:53 -05:00
macro_rules ! get_htlc_in_commitment {
( $htlc : expr , $offered : expr ) = > {
HTLCOutputInCommitment {
offered : $offered ,
amount_msat : $htlc . amount_msat ,
cltv_expiry : $htlc . cltv_expiry ,
payment_hash : $htlc . payment_hash ,
transaction_output_index : None
}
}
}
2018-09-09 12:53:57 -04:00
macro_rules ! add_htlc_output {
2019-01-08 16:20:24 -05:00
( $htlc : expr , $outbound : expr , $source : expr , $state_name : expr ) = > {
2018-09-09 12:53:57 -04:00
if $outbound = = local { // "offered HTLC output"
2019-01-06 17:02:53 -05:00
let htlc_in_tx = get_htlc_in_commitment! ( $htlc , true ) ;
2022-01-05 13:40:08 -08:00
if $htlc . amount_msat / 1000 > = broadcaster_dust_limit_satoshis + ( feerate_per_kw as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...including {} {} HTLC {} (hash {}) with value {} " , if $outbound { " outbound " } else { " inbound " } , $state_name , $htlc . htlc_id , log_bytes! ( $htlc . payment_hash . 0 ) , $htlc . amount_msat ) ;
2020-10-15 13:45:18 +02:00
included_non_dust_htlcs . push ( ( htlc_in_tx , $source ) ) ;
2018-11-30 10:58:44 -05:00
} else {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit " , if $outbound { " outbound " } else { " inbound " } , $state_name , $htlc . htlc_id , log_bytes! ( $htlc . payment_hash . 0 ) , $htlc . amount_msat ) ;
2019-01-06 17:02:53 -05:00
included_dust_htlcs . push ( ( htlc_in_tx , $source ) ) ;
2017-12-25 01:05:27 -05:00
}
} else {
2019-01-06 17:02:53 -05:00
let htlc_in_tx = get_htlc_in_commitment! ( $htlc , false ) ;
2022-01-05 13:40:08 -08:00
if $htlc . amount_msat / 1000 > = broadcaster_dust_limit_satoshis + ( feerate_per_kw as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...including {} {} HTLC {} (hash {}) with value {} " , if $outbound { " outbound " } else { " inbound " } , $state_name , $htlc . htlc_id , log_bytes! ( $htlc . payment_hash . 0 ) , $htlc . amount_msat ) ;
2020-10-15 13:45:18 +02:00
included_non_dust_htlcs . push ( ( htlc_in_tx , $source ) ) ;
2018-11-30 10:58:44 -05:00
} else {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...including {} {} dust HTLC {} (hash {}) with value {} " , if $outbound { " outbound " } else { " inbound " } , $state_name , $htlc . htlc_id , log_bytes! ( $htlc . payment_hash . 0 ) , $htlc . amount_msat ) ;
2019-01-06 17:02:53 -05:00
included_dust_htlcs . push ( ( htlc_in_tx , $source ) ) ;
2017-12-25 01:05:27 -05:00
}
}
2018-09-09 12:53:57 -04:00
}
}
for ref htlc in self . pending_inbound_htlcs . iter ( ) {
2019-01-08 16:20:24 -05:00
let ( include , state_name ) = match htlc . state {
InboundHTLCState ::RemoteAnnounced ( _ ) = > ( ! generated_by_local , " RemoteAnnounced " ) ,
InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( _ ) = > ( ! generated_by_local , " AwaitingRemoteRevokeToAnnounce " ) ,
InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( _ ) = > ( true , " AwaitingAnnouncedRemoteRevoke " ) ,
InboundHTLCState ::Committed = > ( true , " Committed " ) ,
InboundHTLCState ::LocalRemoved ( _ ) = > ( ! generated_by_local , " LocalRemoved " ) ,
2018-09-09 12:53:57 -04:00
} ;
if include {
2019-01-08 16:20:24 -05:00
add_htlc_output! ( htlc , false , None , state_name ) ;
2018-09-09 12:53:57 -04:00
remote_htlc_total_msat + = htlc . amount_msat ;
} else {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...not including inbound HTLC {} (hash {}) with value {} due to state ({}) " , htlc . htlc_id , log_bytes! ( htlc . payment_hash . 0 ) , htlc . amount_msat , state_name ) ;
2018-10-15 14:38:19 -04:00
match & htlc . state {
& InboundHTLCState ::LocalRemoved ( ref reason ) = > {
if generated_by_local {
if let & InboundHTLCRemovalReason ::Fulfill ( _ ) = reason {
value_to_self_msat_offset + = htlc . amount_msat as i64 ;
}
2018-09-09 12:53:57 -04:00
}
} ,
_ = > { } ,
}
}
}
2022-01-19 12:19:27 +01:00
let mut preimages : Vec < PaymentPreimage > = Vec ::new ( ) ;
2018-09-09 12:53:57 -04:00
for ref htlc in self . pending_outbound_htlcs . iter ( ) {
2019-01-08 16:20:24 -05:00
let ( include , state_name ) = match htlc . state {
OutboundHTLCState ::LocalAnnounced ( _ ) = > ( generated_by_local , " LocalAnnounced " ) ,
OutboundHTLCState ::Committed = > ( true , " Committed " ) ,
2019-03-03 14:02:51 -05:00
OutboundHTLCState ::RemoteRemoved ( _ ) = > ( generated_by_local , " RemoteRemoved " ) ,
OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( _ ) = > ( generated_by_local , " AwaitingRemoteRevokeToRemove " ) ,
OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( _ ) = > ( false , " AwaitingRemovedRemoteRevoke " ) ,
2018-09-09 12:53:57 -04:00
} ;
2022-01-19 12:19:27 +01:00
let preimage_opt = match htlc . state {
OutboundHTLCState ::RemoteRemoved ( OutboundHTLCOutcome ::Success ( p ) ) = > p ,
OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( OutboundHTLCOutcome ::Success ( p ) ) = > p ,
OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( OutboundHTLCOutcome ::Success ( p ) ) = > p ,
_ = > None ,
} ;
if let Some ( preimage ) = preimage_opt {
preimages . push ( preimage ) ;
}
2018-09-09 12:53:57 -04:00
if include {
2019-01-08 16:20:24 -05:00
add_htlc_output! ( htlc , true , Some ( & htlc . source ) , state_name ) ;
2018-09-09 12:53:57 -04:00
local_htlc_total_msat + = htlc . amount_msat ;
2018-04-04 11:56:54 -04:00
} else {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...not including outbound HTLC {} (hash {}) with value {} due to state ({}) " , htlc . htlc_id , log_bytes! ( htlc . payment_hash . 0 ) , htlc . amount_msat , state_name ) ;
2018-04-04 11:56:54 -04:00
match htlc . state {
2022-01-18 14:17:52 +01:00
OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( OutboundHTLCOutcome ::Success ( _ ) ) | OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( OutboundHTLCOutcome ::Success ( _ ) ) = > {
2019-03-03 14:02:51 -05:00
value_to_self_msat_offset - = htlc . amount_msat as i64 ;
2018-04-04 11:56:54 -04:00
} ,
2022-01-18 14:17:52 +01:00
OutboundHTLCState ::RemoteRemoved ( OutboundHTLCOutcome ::Success ( _ ) ) = > {
2019-03-03 14:02:51 -05:00
if ! generated_by_local {
2018-07-24 15:01:56 -04:00
value_to_self_msat_offset - = htlc . amount_msat as i64 ;
}
} ,
2018-04-04 11:56:54 -04:00
_ = > { } ,
}
2017-12-25 01:05:27 -05:00
}
}
2021-11-18 21:23:41 -05:00
let mut value_to_self_msat : i64 = ( self . value_to_self_msat - local_htlc_total_msat ) as i64 + value_to_self_msat_offset ;
2019-03-02 21:45:30 -05:00
assert! ( value_to_self_msat > = 0 ) ;
// Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
// AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
// "violate" their reserve value by couting those against it. Thus, we have to convert
// everything to i64 before subtracting as otherwise we can overflow.
2021-11-18 21:23:41 -05:00
let mut value_to_remote_msat : i64 = ( self . channel_value_satoshis * 1000 ) as i64 - ( self . value_to_self_msat as i64 ) - ( remote_htlc_total_msat as i64 ) - value_to_self_msat_offset ;
2019-03-02 21:45:30 -05:00
assert! ( value_to_remote_msat > = 0 ) ;
2018-10-01 17:48:22 -04:00
#[ cfg(debug_assertions) ]
{
// Make sure that the to_self/to_remote is always either past the appropriate
// channel_reserve *or* it is making progress towards it.
2020-06-08 20:47:55 -04:00
let mut broadcaster_max_commitment_tx_output = if generated_by_local {
self . holder_max_commitment_tx_output . lock ( ) . unwrap ( )
2018-10-01 17:48:22 -04:00
} else {
2020-06-08 20:47:55 -04:00
self . counterparty_max_commitment_tx_output . lock ( ) . unwrap ( )
2018-10-01 17:48:22 -04:00
} ;
2021-07-03 15:27:12 +00:00
debug_assert! ( broadcaster_max_commitment_tx_output . 0 < = value_to_self_msat as u64 | | value_to_self_msat / 1000 > = self . counterparty_selected_channel_reserve_satoshis . unwrap ( ) as i64 ) ;
2020-06-08 20:47:55 -04:00
broadcaster_max_commitment_tx_output . 0 = cmp ::max ( broadcaster_max_commitment_tx_output . 0 , value_to_self_msat as u64 ) ;
2021-11-09 21:12:30 +00:00
debug_assert! ( broadcaster_max_commitment_tx_output . 1 < = value_to_remote_msat as u64 | | value_to_remote_msat / 1000 > = self . holder_selected_channel_reserve_satoshis as i64 ) ;
2020-06-08 20:47:55 -04:00
broadcaster_max_commitment_tx_output . 1 = cmp ::max ( broadcaster_max_commitment_tx_output . 1 , value_to_remote_msat as u64 ) ;
2018-10-01 17:48:22 -04:00
}
2022-01-04 15:54:54 -08:00
let total_fee_sat = Channel ::< Signer > ::commit_tx_fee_sat ( feerate_per_kw , included_non_dust_htlcs . len ( ) , self . channel_transaction_parameters . opt_anchors . is_some ( ) ) ;
2022-01-04 16:05:28 -08:00
let anchors_val = if self . channel_transaction_parameters . opt_anchors . is_some ( ) { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64 ;
2020-10-15 13:45:18 +02:00
let ( value_to_self , value_to_remote ) = if self . is_outbound ( ) {
2022-01-04 16:05:28 -08:00
( value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64 , value_to_remote_msat / 1000 )
2018-10-01 17:48:22 -04:00
} else {
2022-01-04 16:05:28 -08:00
( value_to_self_msat / 1000 , value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64 )
2018-10-01 17:48:22 -04:00
} ;
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
let mut value_to_a = if local { value_to_self } else { value_to_remote } ;
let mut value_to_b = if local { value_to_remote } else { value_to_self } ;
2021-08-22 11:08:28 +02:00
let ( funding_pubkey_a , funding_pubkey_b ) = if local {
( self . get_holder_pubkeys ( ) . funding_pubkey , self . get_counterparty_pubkeys ( ) . funding_pubkey )
} else {
( self . get_counterparty_pubkeys ( ) . funding_pubkey , self . get_holder_pubkeys ( ) . funding_pubkey )
} ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
if value_to_a > = ( broadcaster_dust_limit_satoshis as i64 ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...including {} output with value {} " , if local { " to_local " } else { " to_remote " } , value_to_a ) ;
2020-10-15 13:45:18 +02:00
} else {
value_to_a = 0 ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
if value_to_b > = ( broadcaster_dust_limit_satoshis as i64 ) {
2020-03-02 12:55:53 -05:00
log_trace! ( logger , " ...including {} output with value {} " , if local { " to_remote " } else { " to_local " } , value_to_b ) ;
2020-10-15 13:45:18 +02:00
} else {
value_to_b = 0 ;
2019-01-06 17:02:53 -05:00
}
2020-10-15 13:45:18 +02:00
let num_nondust_htlcs = included_non_dust_htlcs . len ( ) ;
let channel_parameters =
if local { self . channel_transaction_parameters . as_holder_broadcastable ( ) }
else { self . channel_transaction_parameters . as_counterparty_broadcastable ( ) } ;
let tx = CommitmentTransaction ::new_with_auxiliary_htlc_data ( commitment_number ,
value_to_a as u64 ,
value_to_b as u64 ,
2021-11-15 18:03:46 -08:00
self . channel_transaction_parameters . opt_anchors . is_some ( ) ,
2021-08-22 11:08:28 +02:00
funding_pubkey_a ,
funding_pubkey_b ,
2020-10-15 13:45:18 +02:00
keys . clone ( ) ,
feerate_per_kw ,
& mut included_non_dust_htlcs ,
& channel_parameters
) ;
let mut htlcs_included = included_non_dust_htlcs ;
// The unwrap is safe, because all non-dust HTLCs have been assigned an output index
htlcs_included . sort_unstable_by_key ( | h | h . 0. transaction_output_index . unwrap ( ) ) ;
2019-01-04 14:37:48 -05:00
htlcs_included . append ( & mut included_dust_htlcs ) ;
2017-12-25 01:05:27 -05:00
2021-11-18 21:23:41 -05:00
// For the stats, trimmed-to-0 the value in msats accordingly
value_to_self_msat = if ( value_to_self_msat * 1000 ) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat } ;
value_to_remote_msat = if ( value_to_remote_msat * 1000 ) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat } ;
CommitmentStats {
tx ,
feerate_per_kw ,
total_fee_sat ,
num_nondust_htlcs ,
htlcs_included ,
local_balance_msat : value_to_self_msat as u64 ,
remote_balance_msat : value_to_remote_msat as u64 ,
2022-01-19 12:19:27 +01:00
preimages
2021-11-18 21:23:41 -05:00
}
2017-12-25 01:05:27 -05:00
}
2018-03-26 16:48:18 -04:00
#[ inline ]
fn get_closing_scriptpubkey ( & self ) -> Script {
2021-07-26 12:31:24 -04:00
// The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
// is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
// outside of those situations will fail.
self . shutdown_scriptpubkey . clone ( ) . unwrap ( ) . into_inner ( )
2018-03-26 16:48:18 -04:00
}
#[ inline ]
2020-09-18 18:26:12 -04:00
fn get_closing_transaction_weight ( & self , a_scriptpubkey : Option < & Script > , b_scriptpubkey : Option < & Script > ) -> u64 {
let mut ret =
( 4 + // version
1 + // input count
36 + // prevout
1 + // script length (0)
4 + // sequence
1 + // output count
4 // lock time
) * 4 + // * 4 for non-witness parts
2 + // witness marker and flag
1 + // witness element count
4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
self . get_funding_redeemscript ( ) . len ( ) as u64 + // funding witness script
2 * ( 1 + 71 ) ; // two signatures + sighash type flags
if let Some ( spk ) = a_scriptpubkey {
ret + = ( ( 8 + 1 ) + // output values and script length
spk . len ( ) as u64 ) * 4 ; // scriptpubkey and witness multiplier
}
if let Some ( spk ) = b_scriptpubkey {
ret + = ( ( 8 + 1 ) + // output values and script length
spk . len ( ) as u64 ) * 4 ; // scriptpubkey and witness multiplier
}
ret
2018-03-26 16:48:18 -04:00
}
#[ inline ]
2021-09-01 14:56:50 +02:00
fn build_closing_transaction ( & self , proposed_total_fee_satoshis : u64 , skip_remote_output : bool ) -> ( ClosingTransaction , u64 ) {
2018-09-09 12:53:57 -04:00
assert! ( self . pending_inbound_htlcs . is_empty ( ) ) ;
assert! ( self . pending_outbound_htlcs . is_empty ( ) ) ;
2021-07-12 15:39:27 +00:00
assert! ( self . pending_update_fee . is_none ( ) ) ;
2018-03-26 16:48:18 -04:00
let mut total_fee_satoshis = proposed_total_fee_satoshis ;
2021-08-31 15:23:48 +02:00
let mut value_to_holder : i64 = ( self . value_to_self_msat as i64 ) / 1000 - if self . is_outbound ( ) { total_fee_satoshis as i64 } else { 0 } ;
let mut value_to_counterparty : i64 = ( ( self . channel_value_satoshis * 1000 - self . value_to_self_msat ) as i64 / 1000 ) - if self . is_outbound ( ) { 0 } else { total_fee_satoshis as i64 } ;
2018-03-26 16:48:18 -04:00
2021-08-31 15:23:48 +02:00
if value_to_holder < 0 {
2020-10-15 13:45:18 +02:00
assert! ( self . is_outbound ( ) ) ;
2021-08-31 15:23:48 +02:00
total_fee_satoshis + = ( - value_to_holder ) as u64 ;
} else if value_to_counterparty < 0 {
2020-10-15 13:45:18 +02:00
assert! ( ! self . is_outbound ( ) ) ;
2021-08-31 15:23:48 +02:00
total_fee_satoshis + = ( - value_to_counterparty ) as u64 ;
2018-03-26 16:48:18 -04:00
}
2021-08-31 15:23:48 +02:00
if skip_remote_output | | value_to_counterparty as u64 < = self . holder_dust_limit_satoshis {
value_to_counterparty = 0 ;
2018-03-26 16:48:18 -04:00
}
2021-08-31 15:23:48 +02:00
if value_to_holder as u64 < = self . holder_dust_limit_satoshis {
value_to_holder = 0 ;
2018-03-26 16:48:18 -04:00
}
2021-08-31 15:23:48 +02:00
assert! ( self . shutdown_scriptpubkey . is_some ( ) ) ;
let holder_shutdown_script = self . get_closing_scriptpubkey ( ) ;
let counterparty_shutdown_script = self . counterparty_shutdown_scriptpubkey . clone ( ) . unwrap ( ) ;
let funding_outpoint = self . funding_outpoint ( ) . into_bitcoin_outpoint ( ) ;
2018-03-26 16:48:18 -04:00
2021-09-01 14:56:50 +02:00
let closing_transaction = ClosingTransaction ::new ( value_to_holder as u64 , value_to_counterparty as u64 , holder_shutdown_script , counterparty_shutdown_script , funding_outpoint ) ;
( closing_transaction , total_fee_satoshis )
2018-03-26 16:48:18 -04:00
}
2020-10-15 13:45:18 +02:00
fn funding_outpoint ( & self ) -> OutPoint {
self . channel_transaction_parameters . funding_outpoint . unwrap ( )
}
2017-12-25 01:05:27 -05:00
#[ inline ]
/// Creates a set of keys for build_commitment_transaction to generate a transaction which our
/// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
/// our counterparty!)
2020-05-28 20:32:46 -04:00
/// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2017-12-25 01:05:27 -05:00
/// TODO Some magic rust shit to compile-time check this?
2020-06-08 20:47:55 -04:00
fn build_holder_transaction_keys ( & self , commitment_number : u64 ) -> Result < TxCreationKeys , ChannelError > {
2021-02-20 10:05:55 -05:00
let per_commitment_point = self . holder_signer . get_per_commitment_point ( commitment_number , & self . secp_ctx ) ;
2020-10-15 13:45:18 +02:00
let delayed_payment_base = & self . get_holder_pubkeys ( ) . delayed_payment_basepoint ;
let htlc_basepoint = & self . get_holder_pubkeys ( ) . htlc_basepoint ;
let counterparty_pubkeys = self . get_counterparty_pubkeys ( ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
Ok ( secp_check! ( TxCreationKeys ::derive_new ( & self . secp_ctx , & per_commitment_point , delayed_payment_base , htlc_basepoint , & counterparty_pubkeys . revocation_basepoint , & counterparty_pubkeys . htlc_basepoint ) , " Local tx keys generation got bogus keys " . to_owned ( ) ) )
2017-12-25 01:05:27 -05:00
}
#[ inline ]
/// Creates a set of keys for build_commitment_transaction to generate a transaction which we
/// will sign and send to our counterparty.
2018-11-22 19:38:28 -05:00
/// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
2020-02-08 17:22:58 -05:00
fn build_remote_transaction_keys ( & self ) -> Result < TxCreationKeys , ChannelError > {
2017-12-25 01:05:27 -05:00
//TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
//may see payments to it!
2020-10-15 13:45:18 +02:00
let revocation_basepoint = & self . get_holder_pubkeys ( ) . revocation_basepoint ;
let htlc_basepoint = & self . get_holder_pubkeys ( ) . htlc_basepoint ;
let counterparty_pubkeys = self . get_counterparty_pubkeys ( ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
Ok ( secp_check! ( TxCreationKeys ::derive_new ( & self . secp_ctx , & self . counterparty_cur_commitment_point . unwrap ( ) , & counterparty_pubkeys . delayed_payment_basepoint , & counterparty_pubkeys . htlc_basepoint , revocation_basepoint , htlc_basepoint ) , " Remote tx keys generation got bogus keys " . to_owned ( ) ) )
2017-12-25 01:05:27 -05:00
}
/// Gets the redeemscript for the funding transaction output (ie the funding transaction output
/// pays to get_funding_redeemscript().to_v0_p2wsh()).
2018-09-06 17:13:41 -04:00
/// Panics if called before accept_channel/new_from_req
2017-12-25 01:05:27 -05:00
pub fn get_funding_redeemscript ( & self ) -> Script {
2020-10-15 13:45:18 +02:00
make_funding_redeemscript ( & self . get_holder_pubkeys ( ) . funding_pubkey , self . counterparty_funding_pubkey ( ) )
2017-12-25 01:05:27 -05:00
}
2022-04-22 17:58:19 +00:00
/// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2022-04-18 15:42:11 +00:00
/// entirely.
///
2022-04-22 17:58:19 +00:00
/// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
/// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2022-04-18 15:42:11 +00:00
///
/// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
/// disconnected).
pub fn claim_htlc_while_disconnected_dropping_mon_update < L : Deref >
( & mut self , htlc_id_arg : u64 , payment_preimage_arg : PaymentPreimage , logger : & L )
where L ::Target : Logger {
// Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
// (see equivalent if condition there).
assert! ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ! = 0 ) ;
let mon_update_id = self . latest_monitor_update_id ; // Forget the ChannelMonitor update
let fulfill_resp = self . get_update_fulfill_htlc ( htlc_id_arg , payment_preimage_arg , logger ) ;
self . latest_monitor_update_id = mon_update_id ;
if let UpdateFulfillFetch ::NewClaim { msg , .. } = fulfill_resp {
assert! ( msg . is_none ( ) ) ; // The HTLC must have ended up in the holding cell.
}
}
2021-07-15 21:56:42 +00:00
fn get_update_fulfill_htlc < L : Deref > ( & mut self , htlc_id_arg : u64 , payment_preimage_arg : PaymentPreimage , logger : & L ) -> UpdateFulfillFetch where L ::Target : Logger {
2019-01-24 16:41:51 +02:00
// Either ChannelFunded got set (which means it won't be unset) or there is no way any
2018-04-01 19:54:14 -04:00
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
// either.
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2018-04-01 19:54:14 -04:00
panic! ( " Was asked to fulfill an HTLC when channel was not in an operational state " ) ;
2017-12-25 01:05:27 -05:00
}
2018-03-26 16:48:18 -04:00
assert_eq! ( self . channel_state & ChannelState ::ShutdownComplete as u32 , 0 ) ;
2017-12-25 01:05:27 -05:00
2018-12-17 23:58:02 -05:00
let payment_hash_calc = PaymentHash ( Sha256 ::hash ( & payment_preimage_arg . 0 [ .. ] ) . into_inner ( ) ) ;
2018-04-04 11:56:54 -04:00
2018-12-10 23:56:02 -05:00
// ChannelManager may generate duplicate claims/fails due to HTLC update events from
// on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
// these, but for now we just have to treat them as normal.
2021-05-23 23:22:46 +00:00
let mut pending_idx = core ::usize ::MAX ;
2021-07-16 02:16:50 +00:00
let mut htlc_value_msat = 0 ;
2018-09-09 12:53:57 -04:00
for ( idx , htlc ) in self . pending_inbound_htlcs . iter ( ) . enumerate ( ) {
2018-09-11 14:20:40 -04:00
if htlc . htlc_id = = htlc_id_arg {
assert_eq! ( htlc . payment_hash , payment_hash_calc ) ;
2018-12-10 23:56:02 -05:00
match htlc . state {
InboundHTLCState ::Committed = > { } ,
InboundHTLCState ::LocalRemoved ( ref reason ) = > {
if let & InboundHTLCRemovalReason ::Fulfill ( _ ) = reason {
} else {
2020-03-02 12:55:53 -05:00
log_warn! ( logger , " Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {} " , log_bytes! ( htlc . payment_hash . 0 ) , log_bytes! ( self . channel_id ( ) ) ) ;
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
debug_assert! ( false , " Tried to fulfill an HTLC that was already failed " ) ;
2018-12-10 23:56:02 -05:00
}
2021-07-15 21:56:42 +00:00
return UpdateFulfillFetch ::DuplicateClaim { } ;
2018-12-10 23:56:02 -05:00
} ,
_ = > {
debug_assert! ( false , " Have an inbound HTLC we tried to claim before it was fully committed to " ) ;
// Don't return in release mode here so that we can update channel_monitor
}
2018-07-26 17:53:10 -04:00
}
2018-09-11 14:27:17 -04:00
pending_idx = idx ;
2021-07-16 02:16:50 +00:00
htlc_value_msat = htlc . amount_msat ;
2018-09-11 14:27:17 -04:00
break ;
2018-07-26 17:53:10 -04:00
}
}
2021-05-23 23:22:46 +00:00
if pending_idx = = core ::usize ::MAX {
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
// If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
// this is simply a duplicate claim, not previously failed and we lost funds.
debug_assert! ( self . historical_inbound_htlc_fulfills . contains ( & htlc_id_arg ) ) ;
2021-07-15 21:56:42 +00:00
return UpdateFulfillFetch ::DuplicateClaim { } ;
2018-07-26 17:53:10 -04:00
}
2018-04-04 11:56:54 -04:00
// Now update local state:
2018-07-26 17:53:10 -04:00
//
// We have to put the payment_preimage in the channel_monitor right away here to ensure we
// can claim it even if the channel hits the chain before we see their next commitment.
2020-02-07 20:08:31 -05:00
self . latest_monitor_update_id + = 1 ;
let monitor_update = ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
updates : vec ! [ ChannelMonitorUpdateStep ::PaymentPreimage {
payment_preimage : payment_preimage_arg . clone ( ) ,
} ] ,
} ;
2018-07-26 17:53:10 -04:00
2018-10-17 18:19:55 -04:00
if ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ) ! = 0 {
2022-04-18 15:42:11 +00:00
// Note that this condition is the same as the assertion in
// `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
// `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
// do not not get into this branch.
2018-04-04 11:56:54 -04:00
for pending_update in self . holding_cell_htlc_updates . iter ( ) {
match pending_update {
2018-09-11 14:20:40 -04:00
& HTLCUpdateAwaitingACK ::ClaimHTLC { htlc_id , .. } = > {
if htlc_id_arg = = htlc_id {
2020-03-18 21:10:22 -04:00
// Make sure we don't leave latest_monitor_update_id incremented here:
self . latest_monitor_update_id - = 1 ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
debug_assert! ( self . historical_inbound_htlc_fulfills . contains ( & htlc_id_arg ) ) ;
2021-07-15 21:56:42 +00:00
return UpdateFulfillFetch ::DuplicateClaim { } ;
2018-04-04 11:56:54 -04:00
}
} ,
2018-09-11 14:20:40 -04:00
& HTLCUpdateAwaitingACK ::FailHTLC { htlc_id , .. } = > {
if htlc_id_arg = = htlc_id {
2020-03-02 12:55:53 -05:00
log_warn! ( logger , " Have preimage and want to fulfill HTLC with pending failure against channel {} " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-12-10 23:56:02 -05:00
// TODO: We may actually be able to switch to a fulfill here, though its
// rare enough it may not be worth the complexity burden.
2020-03-19 18:16:07 -04:00
debug_assert! ( false , " Tried to fulfill an HTLC that was already failed " ) ;
2021-07-16 02:16:50 +00:00
return UpdateFulfillFetch ::NewClaim { monitor_update , htlc_value_msat , msg : None } ;
2018-04-04 11:56:54 -04:00
}
} ,
_ = > { }
}
}
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Adding HTLC claim to holding_cell in channel {}! Current state: {} " , log_bytes! ( self . channel_id ( ) ) , self . channel_state ) ;
2018-04-04 11:56:54 -04:00
self . holding_cell_htlc_updates . push ( HTLCUpdateAwaitingACK ::ClaimHTLC {
2018-09-11 14:20:40 -04:00
payment_preimage : payment_preimage_arg , htlc_id : htlc_id_arg ,
2018-04-04 11:56:54 -04:00
} ) ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
self . historical_inbound_htlc_fulfills . insert ( htlc_id_arg ) ;
2021-07-16 02:16:50 +00:00
return UpdateFulfillFetch ::NewClaim { monitor_update , htlc_value_msat , msg : None } ;
2018-07-26 17:53:10 -04:00
}
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
self . historical_inbound_htlc_fulfills . insert ( htlc_id_arg ) ;
2018-07-26 17:53:10 -04:00
2018-09-11 14:20:40 -04:00
{
2018-09-09 12:53:57 -04:00
let htlc = & mut self . pending_inbound_htlcs [ pending_idx ] ;
2018-10-15 14:38:19 -04:00
if let InboundHTLCState ::Committed = htlc . state {
} else {
2018-09-11 14:27:17 -04:00
debug_assert! ( false , " Have an inbound HTLC we tried to claim before it was fully committed to " ) ;
2021-07-16 02:16:50 +00:00
return UpdateFulfillFetch ::NewClaim { monitor_update , htlc_value_msat , msg : None } ;
2018-04-04 11:56:54 -04:00
}
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}! " , log_bytes! ( htlc . payment_hash . 0 ) , log_bytes! ( self . channel_id ) ) ;
2018-10-15 14:38:19 -04:00
htlc . state = InboundHTLCState ::LocalRemoved ( InboundHTLCRemovalReason ::Fulfill ( payment_preimage_arg . clone ( ) ) ) ;
2018-09-11 14:20:40 -04:00
}
2017-12-25 01:05:27 -05:00
2021-07-15 21:56:42 +00:00
UpdateFulfillFetch ::NewClaim {
monitor_update ,
2021-07-16 02:16:50 +00:00
htlc_value_msat ,
2021-07-15 21:56:42 +00:00
msg : Some ( msgs ::UpdateFulfillHTLC {
channel_id : self . channel_id ( ) ,
htlc_id : htlc_id_arg ,
payment_preimage : payment_preimage_arg ,
} ) ,
}
2017-12-25 01:05:27 -05:00
}
2021-07-15 22:26:51 +00:00
pub fn get_update_fulfill_htlc_and_commit < L : Deref > ( & mut self , htlc_id : u64 , payment_preimage : PaymentPreimage , logger : & L ) -> Result < UpdateFulfillCommitFetch , ( ChannelError , ChannelMonitorUpdate ) > where L ::Target : Logger {
2021-07-15 21:56:42 +00:00
match self . get_update_fulfill_htlc ( htlc_id , payment_preimage , logger ) {
2021-07-16 02:16:50 +00:00
UpdateFulfillFetch ::NewClaim { mut monitor_update , htlc_value_msat , msg : Some ( update_fulfill_htlc ) } = > {
2021-07-15 22:26:51 +00:00
let ( commitment , mut additional_update ) = match self . send_commitment_no_status_check ( logger ) {
Err ( e ) = > return Err ( ( e , monitor_update ) ) ,
Ok ( res ) = > res
} ;
2020-02-07 20:08:31 -05:00
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
2021-07-16 02:16:50 +00:00
Ok ( UpdateFulfillCommitFetch ::NewClaim { monitor_update , htlc_value_msat , msgs : Some ( ( update_fulfill_htlc , commitment ) ) } )
2018-04-24 00:19:52 -04:00
} ,
2021-07-16 02:16:50 +00:00
UpdateFulfillFetch ::NewClaim { monitor_update , htlc_value_msat , msg : None } = >
Ok ( UpdateFulfillCommitFetch ::NewClaim { monitor_update , htlc_value_msat , msgs : None } ) ,
2021-07-15 21:56:42 +00:00
UpdateFulfillFetch ::DuplicateClaim { } = > Ok ( UpdateFulfillCommitFetch ::DuplicateClaim { } ) ,
2018-04-04 11:56:54 -04:00
}
}
2021-07-15 21:56:42 +00:00
/// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
/// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
/// however, fail more than once as we wait for an upstream failure to be irrevocably committed
/// before we fail backwards.
/// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return
/// Ok(_) if debug assertions are turned on or preconditions are met.
2021-04-20 21:35:11 +00:00
pub fn get_update_fail_htlc < L : Deref > ( & mut self , htlc_id_arg : u64 , err_packet : msgs ::OnionErrorPacket , logger : & L ) -> Result < Option < msgs ::UpdateFailHTLC > , ChannelError > where L ::Target : Logger {
2018-03-20 19:11:27 -04:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2018-09-06 19:12:32 -04:00
panic! ( " Was asked to fail an HTLC when channel was not in an operational state " ) ;
2018-03-20 19:11:27 -04:00
}
2018-03-26 16:48:18 -04:00
assert_eq! ( self . channel_state & ChannelState ::ShutdownComplete as u32 , 0 ) ;
2018-03-20 19:11:27 -04:00
2018-12-10 23:56:02 -05:00
// ChannelManager may generate duplicate claims/fails due to HTLC update events from
// on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
// these, but for now we just have to treat them as normal.
2021-05-23 23:22:46 +00:00
let mut pending_idx = core ::usize ::MAX ;
2018-09-11 14:27:17 -04:00
for ( idx , htlc ) in self . pending_inbound_htlcs . iter ( ) . enumerate ( ) {
if htlc . htlc_id = = htlc_id_arg {
2018-12-10 23:56:02 -05:00
match htlc . state {
InboundHTLCState ::Committed = > { } ,
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
InboundHTLCState ::LocalRemoved ( ref reason ) = > {
if let & InboundHTLCRemovalReason ::Fulfill ( _ ) = reason {
} else {
debug_assert! ( false , " Tried to fail an HTLC that was already failed " ) ;
}
2018-12-10 23:56:02 -05:00
return Ok ( None ) ;
} ,
_ = > {
debug_assert! ( false , " Have an inbound HTLC we tried to claim before it was fully committed to " ) ;
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Ignore ( format! ( " Unable to find a pending HTLC which matched the given HTLC ID ( {} ) " , htlc . htlc_id ) ) ) ;
2018-12-10 23:56:02 -05:00
}
2018-09-11 14:27:17 -04:00
}
pending_idx = idx ;
}
}
2021-05-23 23:22:46 +00:00
if pending_idx = = core ::usize ::MAX {
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
// If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
// is simply a duplicate fail, not previously failed and we failed-back too early.
debug_assert! ( self . historical_inbound_htlc_fulfills . contains ( & htlc_id_arg ) ) ;
return Ok ( None ) ;
2018-09-11 14:27:17 -04:00
}
2018-04-04 11:56:54 -04:00
// Now update local state:
2018-10-17 18:19:55 -04:00
if ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ) ! = 0 {
2018-04-04 11:56:54 -04:00
for pending_update in self . holding_cell_htlc_updates . iter ( ) {
match pending_update {
2018-09-11 14:20:40 -04:00
& HTLCUpdateAwaitingACK ::ClaimHTLC { htlc_id , .. } = > {
if htlc_id_arg = = htlc_id {
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
debug_assert! ( self . historical_inbound_htlc_fulfills . contains ( & htlc_id_arg ) ) ;
return Ok ( None ) ;
2018-04-04 11:56:54 -04:00
}
} ,
2018-09-11 14:20:40 -04:00
& HTLCUpdateAwaitingACK ::FailHTLC { htlc_id , .. } = > {
if htlc_id_arg = = htlc_id {
2020-03-19 18:16:07 -04:00
debug_assert! ( false , " Tried to fail an HTLC that was already failed " ) ;
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Ignore ( " Unable to find a pending HTLC which matched the given HTLC ID " . to_owned ( ) ) ) ;
2018-04-04 11:56:54 -04:00
}
} ,
_ = > { }
}
}
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Placing failure for HTLC ID {} in holding cell in channel {}. " , htlc_id_arg , log_bytes! ( self . channel_id ( ) ) ) ;
2018-04-04 11:56:54 -04:00
self . holding_cell_htlc_updates . push ( HTLCUpdateAwaitingACK ::FailHTLC {
2018-09-11 14:20:40 -04:00
htlc_id : htlc_id_arg ,
2018-04-04 11:56:54 -04:00
err_packet ,
} ) ;
return Ok ( None ) ;
}
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Failing HTLC ID {} back with a update_fail_htlc message in channel {}. " , htlc_id_arg , log_bytes! ( self . channel_id ( ) ) ) ;
2018-09-11 14:27:17 -04:00
{
let htlc = & mut self . pending_inbound_htlcs [ pending_idx ] ;
2018-10-15 14:38:19 -04:00
htlc . state = InboundHTLCState ::LocalRemoved ( InboundHTLCRemovalReason ::FailRelay ( err_packet . clone ( ) ) ) ;
2018-03-20 19:11:27 -04:00
}
2018-04-04 11:56:54 -04:00
Ok ( Some ( msgs ::UpdateFailHTLC {
2018-03-20 19:11:27 -04:00
channel_id : self . channel_id ( ) ,
2018-09-11 14:20:40 -04:00
htlc_id : htlc_id_arg ,
2018-03-20 19:11:27 -04:00
reason : err_packet
2018-04-04 11:56:54 -04:00
} ) )
}
2017-12-25 01:05:27 -05:00
// Message handlers:
2022-02-01 21:16:27 +00:00
pub fn accept_channel ( & mut self , msg : & msgs ::AcceptChannel , default_limits : & ChannelHandshakeLimits , their_features : & InitFeatures ) -> Result < ( ) , ChannelError > {
let peer_limits = if let Some ( ref limits ) = self . inbound_handshake_limits_override { limits } else { default_limits } ;
2017-12-25 01:05:27 -05:00
// Check sanity of message fields:
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got an accept_channel message from an inbound peer " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
if self . channel_state ! = ChannelState ::OurInitSent as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got an accept_channel message at a strange time " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-03-22 17:40:53 -04:00
if msg . dust_limit_satoshis > 21000000 * 100000000 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Peer never wants payout outputs? dust_limit_satoshis was {} " , msg . dust_limit_satoshis ) ) ) ;
2018-03-22 17:40:53 -04:00
}
2017-12-25 01:05:27 -05:00
if msg . channel_reserve_satoshis > self . channel_value_satoshis {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Bogus channel_reserve_satoshis ( {} ). Must not be greater than ( {} ) " , msg . channel_reserve_satoshis , self . channel_value_satoshis ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2021-11-09 21:12:30 +00:00
if msg . dust_limit_satoshis > self . holder_selected_channel_reserve_satoshis {
return Err ( ChannelError ::Close ( format! ( " Dust limit ( {} ) is bigger than our channel reserve ( {} ) " , msg . dust_limit_satoshis , self . holder_selected_channel_reserve_satoshis ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2022-05-02 20:45:17 +00:00
if msg . channel_reserve_satoshis > self . channel_value_satoshis - self . holder_selected_channel_reserve_satoshis {
return Err ( ChannelError ::Close ( format! ( " Bogus channel_reserve_satoshis ( {} ). Must not be greater than channel value minus our reserve ( {} ) " ,
msg . channel_reserve_satoshis , self . channel_value_satoshis - self . holder_selected_channel_reserve_satoshis ) ) ) ;
}
2020-07-13 13:16:32 +09:00
let full_channel_value_msat = ( self . channel_value_satoshis - msg . channel_reserve_satoshis ) * 1000 ;
if msg . htlc_minimum_msat > = full_channel_value_msat {
return Err ( ChannelError ::Close ( format! ( " Minimum htlc value ( {} ) is full channel value ( {} ) " , msg . htlc_minimum_msat , full_channel_value_msat ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2022-02-01 21:16:27 +00:00
let max_delay_acceptable = u16 ::min ( peer_limits . their_to_self_delay , MAX_LOCAL_BREAKDOWN_TIMEOUT ) ;
2020-07-13 13:16:32 +09:00
if msg . to_self_delay > max_delay_acceptable {
return Err ( ChannelError ::Close ( format! ( " They wanted our payments to be delayed by a needlessly long period. Upper limit: {} . Actual: {} " , max_delay_acceptable , msg . to_self_delay ) ) ) ;
2017-12-25 01:05:27 -05:00
}
if msg . max_accepted_htlcs < 1 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " 0 max_accepted_htlcs makes for a useless channel " . to_owned ( ) ) ) ;
2018-08-15 01:12:40 +09:00
}
2020-10-15 13:45:18 +02:00
if msg . max_accepted_htlcs > MAX_HTLCS {
return Err ( ChannelError ::Close ( format! ( " max_accepted_htlcs was {} . It must not be larger than {} " , msg . max_accepted_htlcs , MAX_HTLCS ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-10-31 14:51:39 -04:00
// Now check against optional parameters as set by config...
2022-02-01 21:16:27 +00:00
if msg . htlc_minimum_msat > peer_limits . max_htlc_minimum_msat {
return Err ( ChannelError ::Close ( format! ( " htlc_minimum_msat ( {} ) is higher than the user specified limit ( {} ) " , msg . htlc_minimum_msat , peer_limits . max_htlc_minimum_msat ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-02-01 21:16:27 +00:00
if msg . max_htlc_value_in_flight_msat < peer_limits . min_max_htlc_value_in_flight_msat {
return Err ( ChannelError ::Close ( format! ( " max_htlc_value_in_flight_msat ( {} ) is less than the user specified limit ( {} ) " , msg . max_htlc_value_in_flight_msat , peer_limits . min_max_htlc_value_in_flight_msat ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-02-01 21:16:27 +00:00
if msg . channel_reserve_satoshis > peer_limits . max_channel_reserve_satoshis {
return Err ( ChannelError ::Close ( format! ( " channel_reserve_satoshis ( {} ) is higher than the user specified limit ( {} ) " , msg . channel_reserve_satoshis , peer_limits . max_channel_reserve_satoshis ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-02-01 21:16:27 +00:00
if msg . max_accepted_htlcs < peer_limits . min_max_accepted_htlcs {
return Err ( ChannelError ::Close ( format! ( " max_accepted_htlcs ( {} ) is less than the user specified limit ( {} ) " , msg . max_accepted_htlcs , peer_limits . min_max_accepted_htlcs ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2021-09-27 17:56:21 +00:00
if msg . dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
return Err ( ChannelError ::Close ( format! ( " dust_limit_satoshis ( {} ) is less than the implementation limit ( {} ) " , msg . dust_limit_satoshis , MIN_CHAN_DUST_LIMIT_SATOSHIS ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2021-09-01 20:18:47 +00:00
if msg . dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
return Err ( ChannelError ::Close ( format! ( " dust_limit_satoshis ( {} ) is greater than the implementation limit ( {} ) " , msg . dust_limit_satoshis , MAX_CHAN_DUST_LIMIT_SATOSHIS ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2022-02-01 21:16:27 +00:00
if msg . minimum_depth > peer_limits . max_minimum_depth {
return Err ( ChannelError ::Close ( format! ( " We consider the minimum depth to be unreasonably large. Expected minimum: ( {} ). Actual: ( {} ) " , peer_limits . max_minimum_depth , msg . minimum_depth ) ) ) ;
2018-10-31 14:51:39 -04:00
}
2018-08-15 01:18:10 +09:00
2022-02-16 21:34:16 +00:00
if let Some ( ty ) = & msg . channel_type {
if * ty ! = self . channel_type {
return Err ( ChannelError ::Close ( " Channel Type in accept_channel didn't match the one sent in open_channel. " . to_owned ( ) ) ) ;
}
} else if their_features . supports_channel_type ( ) {
// Assume they've accepted the channel type as they said they understand it.
} else {
self . channel_type = ChannelTypeFeatures ::from_counterparty_init ( & their_features )
}
2020-06-08 20:47:55 -04:00
let counterparty_shutdown_scriptpubkey = if their_features . supports_upfront_shutdown_script ( ) {
2019-07-09 13:00:15 -04:00
match & msg . shutdown_scriptpubkey {
& OptionalField ::Present ( ref script ) = > {
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
2021-02-05 15:14:12 +01:00
if script . len ( ) = = 0 {
2019-07-09 13:00:15 -04:00
None
} else {
2021-09-01 20:22:49 +00:00
if ! script ::is_bolt2_compliant ( & script , their_features ) {
return Err ( ChannelError ::Close ( format! ( " Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {} " , script ) ) ) ;
2021-07-28 14:04:10 -05:00
}
2021-09-01 20:22:49 +00:00
Some ( script . clone ( ) )
2019-07-09 13:00:15 -04:00
}
} ,
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
& OptionalField ::Absent = > {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out " . to_owned ( ) ) ) ;
2019-07-09 13:00:15 -04:00
}
}
} else { None } ;
2020-06-08 20:47:55 -04:00
self . counterparty_dust_limit_satoshis = msg . dust_limit_satoshis ;
self . counterparty_max_htlc_value_in_flight_msat = cmp ::min ( msg . max_htlc_value_in_flight_msat , self . channel_value_satoshis * 1000 ) ;
2021-07-03 15:27:12 +00:00
self . counterparty_selected_channel_reserve_satoshis = Some ( msg . channel_reserve_satoshis ) ;
2020-06-08 20:47:55 -04:00
self . counterparty_htlc_minimum_msat = msg . htlc_minimum_msat ;
self . counterparty_max_accepted_htlcs = msg . max_accepted_htlcs ;
2022-02-01 21:57:01 +00:00
if peer_limits . trust_own_funding_0conf {
self . minimum_depth = Some ( msg . minimum_depth ) ;
} else {
self . minimum_depth = Some ( cmp ::max ( 1 , msg . minimum_depth ) ) ;
}
2020-01-17 14:31:29 -08:00
2020-06-08 20:47:55 -04:00
let counterparty_pubkeys = ChannelPublicKeys {
2020-01-17 14:31:29 -08:00
funding_pubkey : msg . funding_pubkey ,
revocation_basepoint : msg . revocation_basepoint ,
2020-03-08 20:38:16 -04:00
payment_point : msg . payment_point ,
2020-01-17 14:31:29 -08:00
delayed_payment_basepoint : msg . delayed_payment_basepoint ,
htlc_basepoint : msg . htlc_basepoint
} ;
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . counterparty_parameters = Some ( CounterpartyChannelTransactionParameters {
selected_contest_delay : msg . to_self_delay ,
pubkeys : counterparty_pubkeys ,
} ) ;
2020-01-17 14:31:29 -08:00
2020-06-08 20:47:55 -04:00
self . counterparty_cur_commitment_point = Some ( msg . first_per_commitment_point ) ;
self . counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey ;
2017-12-25 01:05:27 -05:00
self . channel_state = ChannelState ::OurInitSent as u32 | ChannelState ::TheirInitSent as u32 ;
2022-02-01 21:16:27 +00:00
self . inbound_handshake_limits_override = None ; // We're done enforcing limits on our peer's handshake now.
2017-12-25 01:05:27 -05:00
Ok ( ( ) )
}
2020-10-15 13:45:18 +02:00
fn funding_created_signature < L : Deref > ( & mut self , sig : & Signature , logger : & L ) -> Result < ( Txid , CommitmentTransaction , Signature ) , ChannelError > where L ::Target : Logger {
2017-12-25 01:05:27 -05:00
let funding_script = self . get_funding_redeemscript ( ) ;
2020-06-08 20:47:55 -04:00
let keys = self . build_holder_transaction_keys ( self . cur_holder_commitment_transaction_number ) ? ;
2021-11-18 21:23:41 -05:00
let initial_commitment_tx = self . build_commitment_transaction ( self . cur_holder_commitment_transaction_number , & keys , true , false , logger ) . tx ;
2020-10-15 13:45:18 +02:00
{
let trusted_tx = initial_commitment_tx . trust ( ) ;
let initial_commitment_bitcoin_tx = trusted_tx . built_transaction ( ) ;
let sighash = initial_commitment_bitcoin_tx . get_sighash_all ( & funding_script , self . channel_value_satoshis ) ;
// They sign the holder commitment transaction...
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}. " ,
log_bytes! ( sig . serialize_compact ( ) [ .. ] ) , log_bytes! ( self . counterparty_funding_pubkey ( ) . serialize ( ) ) ,
encode ::serialize_hex ( & initial_commitment_bitcoin_tx . transaction ) , log_bytes! ( sighash [ .. ] ) ,
encode ::serialize_hex ( & funding_script ) , log_bytes! ( self . channel_id ( ) ) ) ;
2022-05-05 17:59:38 +02:00
secp_check! ( self . secp_ctx . verify_ecdsa ( & sighash , & sig , self . counterparty_funding_pubkey ( ) ) , " Invalid funding_created signature from peer " . to_owned ( ) ) ;
2020-10-15 13:45:18 +02:00
}
2018-10-31 02:49:19 +00:00
2020-06-08 20:47:55 -04:00
let counterparty_keys = self . build_remote_transaction_keys ( ) ? ;
2021-11-18 21:23:41 -05:00
let counterparty_initial_commitment_tx = self . build_commitment_transaction ( self . cur_counterparty_commitment_transaction_number , & counterparty_keys , false , false , logger ) . tx ;
2020-10-15 13:45:18 +02:00
let counterparty_trusted_tx = counterparty_initial_commitment_tx . trust ( ) ;
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx . built_transaction ( ) ;
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Initial counterparty tx for channel {} is: txid {} tx {} " ,
log_bytes! ( self . channel_id ( ) ) , counterparty_initial_bitcoin_tx . txid , encode ::serialize_hex ( & counterparty_initial_bitcoin_tx . transaction ) ) ;
2020-10-15 13:45:18 +02:00
2022-01-19 12:19:27 +01:00
let counterparty_signature = self . holder_signer . sign_counterparty_commitment ( & counterparty_initial_commitment_tx , Vec ::new ( ) , & self . secp_ctx )
2020-07-13 13:16:32 +09:00
. map_err ( | _ | ChannelError ::Close ( " Failed to get signatures for new commitment_signed " . to_owned ( ) ) ) ? . 0 ;
2018-09-13 22:31:51 +09:00
2020-06-08 20:47:55 -04:00
// We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2020-10-15 13:45:18 +02:00
Ok ( ( counterparty_initial_bitcoin_tx . txid , initial_commitment_tx , counterparty_signature ) )
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn counterparty_funding_pubkey ( & self ) -> & PublicKey {
2020-10-15 13:45:18 +02:00
& self . get_counterparty_pubkeys ( ) . funding_pubkey
2020-01-17 14:31:29 -08:00
}
2022-05-30 14:39:04 -07:00
pub fn funding_created < L : Deref > ( & mut self , msg : & msgs ::FundingCreated , best_block : BestBlock , logger : & L ) -> Result < ( msgs ::FundingSigned , ChannelMonitor < Signer > , Option < msgs ::ChannelReady > ) , ChannelError > where L ::Target : Logger {
2020-10-15 13:45:18 +02:00
if self . is_outbound ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Received funding_created for an outbound channel? " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
if self . channel_state ! = ( ChannelState ::OurInitSent as u32 | ChannelState ::TheirInitSent as u32 ) {
2018-09-06 11:31:33 -04:00
// BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
2019-01-24 16:41:51 +02:00
// remember the channel, so it's safe to just send an error_message here and drop the
2018-09-06 11:31:33 -04:00
// channel.
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Received funding_created after we got the channel! " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2022-01-26 00:21:22 +01:00
if self . inbound_awaiting_accept {
return Err ( ChannelError ::Close ( " FundingCreated message received before the channel was accepted " . to_owned ( ) ) ) ;
}
2020-02-07 17:48:46 -05:00
if self . commitment_secrets . get_min_seen_secret ( ) ! = ( 1 < < 48 ) | |
2020-06-08 20:47:55 -04:00
self . cur_counterparty_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER | |
self . cur_holder_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER {
2017-12-25 01:05:27 -05:00
panic! ( " Should not have advanced channel commitment tx numbers prior to funding_created " ) ;
}
2020-10-15 13:45:18 +02:00
let funding_txo = OutPoint { txid : msg . funding_txid , index : msg . funding_output_index } ;
self . channel_transaction_parameters . funding_outpoint = Some ( funding_txo ) ;
// This is an externally observable change before we finish all our checks. In particular
// funding_created_signature may fail.
2021-02-20 10:05:55 -05:00
self . holder_signer . ready_channel ( & self . channel_transaction_parameters ) ;
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
let ( counterparty_initial_commitment_txid , initial_commitment_tx , signature ) = match self . funding_created_signature ( & msg . signature , logger ) {
2018-04-24 00:19:52 -04:00
Ok ( res ) = > res ,
2020-10-15 13:45:18 +02:00
Err ( ChannelError ::Close ( e ) ) = > {
self . channel_transaction_parameters . funding_outpoint = None ;
return Err ( ChannelError ::Close ( e ) ) ;
} ,
2017-12-25 01:05:27 -05:00
Err ( e ) = > {
2020-10-15 13:45:18 +02:00
// The only error we know how to handle is ChannelError::Close, so we fall over here
// to make sure we don't continue with an inconsistent state.
panic! ( " unexpected error type from funding_created_signature {:?} " , e ) ;
2017-12-25 01:05:27 -05:00
}
} ;
2020-10-15 13:45:18 +02:00
let holder_commitment_tx = HolderCommitmentTransaction ::new (
initial_commitment_tx ,
msg . signature ,
Vec ::new ( ) ,
& self . get_holder_pubkeys ( ) . funding_pubkey ,
self . counterparty_funding_pubkey ( )
) ;
2022-01-19 12:19:27 +01:00
self . holder_signer . validate_holder_commitment ( & holder_commitment_tx , Vec ::new ( ) )
2021-08-20 21:57:18 +02:00
. map_err ( | _ | ChannelError ::Close ( " Failed to validate our commitment " . to_owned ( ) ) ) ? ;
2021-08-09 12:09:39 +02:00
2020-02-08 17:45:40 -05:00
// Now that we're past error-generating stuff, update our local state:
2020-02-08 16:17:59 -05:00
let funding_redeemscript = self . get_funding_redeemscript ( ) ;
let funding_txo_script = funding_redeemscript . to_v0_p2wsh ( ) ;
2020-12-29 19:26:49 -08:00
let obscure_factor = get_commitment_transaction_number_obscure_factor ( & self . get_holder_pubkeys ( ) . payment_point , & self . get_counterparty_pubkeys ( ) . payment_point , self . is_outbound ( ) ) ;
2021-07-26 12:31:24 -04:00
let shutdown_script = self . shutdown_scriptpubkey . clone ( ) . map ( | script | script . into_inner ( ) ) ;
2021-02-28 21:42:27 -08:00
let channel_monitor = ChannelMonitor ::new ( self . secp_ctx . clone ( ) , self . holder_signer . clone ( ) ,
2021-07-26 12:31:24 -04:00
shutdown_script , self . get_holder_selected_contest_delay ( ) ,
2021-02-28 21:42:27 -08:00
& self . destination_script , ( funding_txo , funding_txo_script . clone ( ) ) ,
& self . channel_transaction_parameters ,
funding_redeemscript . clone ( ) , self . channel_value_satoshis ,
obscure_factor ,
2022-07-05 15:36:27 -07:00
holder_commitment_tx , best_block , self . counterparty_node_id ) ;
2020-02-12 15:47:04 -05:00
2020-10-15 13:45:18 +02:00
channel_monitor . provide_latest_counterparty_commitment_tx ( counterparty_initial_commitment_txid , Vec ::new ( ) , self . cur_counterparty_commitment_transaction_number , self . counterparty_cur_commitment_point . unwrap ( ) , logger ) ;
2020-02-12 15:47:04 -05:00
2017-12-25 01:05:27 -05:00
self . channel_state = ChannelState ::FundingSent as u32 ;
2018-06-27 09:11:58 -04:00
self . channel_id = funding_txo . to_channel_id ( ) ;
2020-06-08 20:47:55 -04:00
self . cur_counterparty_commitment_transaction_number - = 1 ;
self . cur_holder_commitment_transaction_number - = 1 ;
2017-12-25 01:05:27 -05:00
2021-06-22 03:35:52 +00:00
log_info! ( logger , " Generated funding_signed for peer for channel {} " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-04-24 00:19:52 -04:00
Ok ( ( msgs ::FundingSigned {
2017-12-25 01:05:27 -05:00
channel_id : self . channel_id ,
2020-10-06 16:47:23 -07:00
signature
2022-05-30 14:39:04 -07:00
} , channel_monitor , self . check_get_channel_ready ( 0 ) ) )
2017-12-25 01:05:27 -05:00
}
/// Handles a funding_signed message from the remote end.
2018-02-27 23:38:52 +01:00
/// If this call is successful, broadcast the funding transaction (and not before!)
2022-05-30 14:39:04 -07:00
pub fn funding_signed < L : Deref > ( & mut self , msg : & msgs ::FundingSigned , best_block : BestBlock , logger : & L ) -> Result < ( ChannelMonitor < Signer > , Transaction , Option < msgs ::ChannelReady > ) , ChannelError > where L ::Target : Logger {
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Received funding_signed for an inbound channel? " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2019-07-26 18:05:05 -04:00
if self . channel_state & ! ( ChannelState ::MonitorUpdateFailed as u32 ) ! = ChannelState ::FundingCreated as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Received funding_signed in strange state! " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-02-07 17:48:46 -05:00
if self . commitment_secrets . get_min_seen_secret ( ) ! = ( 1 < < 48 ) | |
2020-06-08 20:47:55 -04:00
self . cur_counterparty_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER | |
self . cur_holder_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER {
2017-12-25 01:05:27 -05:00
panic! ( " Should not have advanced channel commitment tx numbers prior to funding_created " ) ;
}
let funding_script = self . get_funding_redeemscript ( ) ;
2020-06-08 20:47:55 -04:00
let counterparty_keys = self . build_remote_transaction_keys ( ) ? ;
2021-11-18 21:23:41 -05:00
let counterparty_initial_commitment_tx = self . build_commitment_transaction ( self . cur_counterparty_commitment_transaction_number , & counterparty_keys , false , false , logger ) . tx ;
2020-10-15 13:45:18 +02:00
let counterparty_trusted_tx = counterparty_initial_commitment_tx . trust ( ) ;
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx . built_transaction ( ) ;
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Initial counterparty tx for channel {} is: txid {} tx {} " ,
log_bytes! ( self . channel_id ( ) ) , counterparty_initial_bitcoin_tx . txid , encode ::serialize_hex ( & counterparty_initial_bitcoin_tx . transaction ) ) ;
2020-04-18 16:35:01 -04:00
2021-02-20 10:05:55 -05:00
let holder_signer = self . build_holder_transaction_keys ( self . cur_holder_commitment_transaction_number ) ? ;
2021-11-18 21:23:41 -05:00
let initial_commitment_tx = self . build_commitment_transaction ( self . cur_holder_commitment_transaction_number , & holder_signer , true , false , logger ) . tx ;
2020-10-15 13:45:18 +02:00
{
let trusted_tx = initial_commitment_tx . trust ( ) ;
let initial_commitment_bitcoin_tx = trusted_tx . built_transaction ( ) ;
let sighash = initial_commitment_bitcoin_tx . get_sighash_all ( & funding_script , self . channel_value_satoshis ) ;
// They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2022-05-05 17:59:38 +02:00
if let Err ( _ ) = self . secp_ctx . verify_ecdsa ( & sighash , & msg . signature , & self . get_counterparty_pubkeys ( ) . funding_pubkey ) {
2020-10-15 13:45:18 +02:00
return Err ( ChannelError ::Close ( " Invalid funding_signed signature from peer " . to_owned ( ) ) ) ;
}
}
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
let holder_commitment_tx = HolderCommitmentTransaction ::new (
initial_commitment_tx ,
msg . signature ,
Vec ::new ( ) ,
& self . get_holder_pubkeys ( ) . funding_pubkey ,
self . counterparty_funding_pubkey ( )
) ;
2020-01-17 14:31:29 -08:00
2022-01-19 12:19:27 +01:00
self . holder_signer . validate_holder_commitment ( & holder_commitment_tx , Vec ::new ( ) )
2021-08-20 21:57:18 +02:00
. map_err ( | _ | ChannelError ::Close ( " Failed to validate our commitment " . to_owned ( ) ) ) ? ;
2017-12-25 01:05:27 -05:00
2020-04-18 16:35:01 -04:00
let funding_redeemscript = self . get_funding_redeemscript ( ) ;
2020-10-15 13:45:18 +02:00
let funding_txo = self . get_funding_txo ( ) . unwrap ( ) ;
2020-04-18 16:35:01 -04:00
let funding_txo_script = funding_redeemscript . to_v0_p2wsh ( ) ;
2020-12-29 19:26:49 -08:00
let obscure_factor = get_commitment_transaction_number_obscure_factor ( & self . get_holder_pubkeys ( ) . payment_point , & self . get_counterparty_pubkeys ( ) . payment_point , self . is_outbound ( ) ) ;
2021-07-26 12:31:24 -04:00
let shutdown_script = self . shutdown_scriptpubkey . clone ( ) . map ( | script | script . into_inner ( ) ) ;
2021-02-28 21:42:27 -08:00
let channel_monitor = ChannelMonitor ::new ( self . secp_ctx . clone ( ) , self . holder_signer . clone ( ) ,
2021-07-26 12:31:24 -04:00
shutdown_script , self . get_holder_selected_contest_delay ( ) ,
2021-02-28 21:42:27 -08:00
& self . destination_script , ( funding_txo , funding_txo_script ) ,
& self . channel_transaction_parameters ,
funding_redeemscript . clone ( ) , self . channel_value_satoshis ,
obscure_factor ,
2022-07-05 15:36:27 -07:00
holder_commitment_tx , best_block , self . counterparty_node_id ) ;
2020-04-18 16:35:01 -04:00
2020-10-15 13:45:18 +02:00
channel_monitor . provide_latest_counterparty_commitment_tx ( counterparty_initial_bitcoin_tx . txid , Vec ::new ( ) , self . cur_counterparty_commitment_transaction_number , self . counterparty_cur_commitment_point . unwrap ( ) , logger ) ;
2020-04-18 16:35:01 -04:00
assert_eq! ( self . channel_state & ( ChannelState ::MonitorUpdateFailed as u32 ) , 0 ) ; // We have no had any monitor(s) yet to fail update!
self . channel_state = ChannelState ::FundingSent as u32 ;
2020-06-08 20:47:55 -04:00
self . cur_holder_commitment_transaction_number - = 1 ;
self . cur_counterparty_commitment_transaction_number - = 1 ;
2020-04-18 16:35:01 -04:00
2021-06-22 03:35:52 +00:00
log_info! ( logger , " Received funding_signed from peer for channel {} " , log_bytes! ( self . channel_id ( ) ) ) ;
2022-05-30 14:39:04 -07:00
Ok ( ( channel_monitor , self . funding_transaction . as_ref ( ) . cloned ( ) . unwrap ( ) , self . check_get_channel_ready ( 0 ) ) )
2017-12-25 01:05:27 -05:00
}
2022-05-30 14:39:04 -07:00
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
/// and the channel is now usable (and public), this may generate an announcement_signatures to
/// reply with.
2022-05-30 14:39:04 -07:00
pub fn channel_ready < L : Deref > ( & mut self , msg : & msgs ::ChannelReady , node_pk : PublicKey , genesis_block_hash : BlockHash , best_block : & BestBlock , logger : & L ) -> Result < Option < msgs ::AnnouncementSignatures > , ChannelError > where L ::Target : Logger {
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2021-06-23 16:39:27 +00:00
self . workaround_lnd_bug_4006 = Some ( msg . clone ( ) ) ;
2022-05-30 14:39:04 -07:00
return Err ( ChannelError ::Ignore ( " Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006 " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2018-10-17 18:19:55 -04:00
2022-02-01 17:37:16 +00:00
if let Some ( scid_alias ) = msg . short_channel_id_alias {
if Some ( scid_alias ) ! = self . short_channel_id {
// The scid alias provided can be used to route payments *from* our counterparty,
// i.e. can be used for inbound payments and provided in invoices, but is not used
// when routing outbound payments.
self . latest_inbound_scid_alias = Some ( scid_alias ) ;
}
}
2018-10-17 18:19:55 -04:00
let non_shutdown_state = self . channel_state & ( ! MULTI_STATE_FLAGS ) ;
2018-03-26 16:48:18 -04:00
if non_shutdown_state = = ChannelState ::FundingSent as u32 {
2022-05-30 14:39:04 -07:00
self . channel_state | = ChannelState ::TheirChannelReady as u32 ;
} else if non_shutdown_state = = ( ChannelState ::FundingSent as u32 | ChannelState ::OurChannelReady as u32 ) {
2018-10-17 18:19:55 -04:00
self . channel_state = ChannelState ::ChannelFunded as u32 | ( self . channel_state & MULTI_STATE_FLAGS ) ;
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2022-02-01 17:37:28 +00:00
} else if self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ! = 0 | |
2022-05-30 14:39:04 -07:00
// If we reconnected before sending our `channel_ready` they may still resend theirs:
( self . channel_state & ( ChannelState ::FundingSent as u32 | ChannelState ::TheirChannelReady as u32 ) = =
( ChannelState ::FundingSent as u32 | ChannelState ::TheirChannelReady as u32 ) )
2022-02-01 17:37:28 +00:00
{
2022-05-30 14:39:04 -07:00
// They probably disconnected/reconnected and re-sent the channel_ready, which is
2022-02-01 17:37:28 +00:00
// required, or they're sending a fresh SCID alias.
let expected_point =
if self . cur_counterparty_commitment_transaction_number = = INITIAL_COMMITMENT_NUMBER - 1 {
// If they haven't ever sent an updated point, the point they send should match
// the current one.
self . counterparty_cur_commitment_point
} else {
2022-05-30 14:39:04 -07:00
// If they have sent updated points, channel_ready is always supposed to match
2022-02-01 17:37:28 +00:00
// their "first" point, which we re-derive here.
Some ( PublicKey ::from_secret_key ( & self . secp_ctx , & SecretKey ::from_slice (
& self . commitment_secrets . get_secret ( INITIAL_COMMITMENT_NUMBER - 1 ) . expect ( " We should have all prev secrets available " )
) . expect ( " We already advanced, so previous secret keys should have been validated already " ) ) )
} ;
if expected_point ! = Some ( msg . next_per_commitment_point ) {
2022-05-30 14:39:04 -07:00
return Err ( ChannelError ::Close ( " Peer sent a reconnect channel_ready with a different point " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
return Ok ( None ) ;
2018-03-26 16:48:18 -04:00
} else {
2022-05-30 14:39:04 -07:00
return Err ( ChannelError ::Close ( " Peer sent a channel_ready at a strange time " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
self . counterparty_prev_commitment_point = self . counterparty_cur_commitment_point ;
self . counterparty_cur_commitment_point = Some ( msg . next_per_commitment_point ) ;
2021-06-22 03:35:52 +00:00
2022-05-30 14:39:04 -07:00
log_info! ( logger , " Received channel_ready from peer for channel {} " , log_bytes! ( self . channel_id ( ) ) ) ;
2021-06-22 03:35:52 +00:00
2021-12-07 19:11:18 +00:00
Ok ( self . get_announcement_sigs ( node_pk , genesis_block_hash , best_block . height ( ) , logger ) )
2017-12-25 01:05:27 -05:00
}
2021-09-27 22:20:07 +05:30
/// Returns transaction if there is pending funding transaction that is yet to broadcast
pub fn unbroadcasted_funding ( & self ) -> Option < Transaction > {
2022-02-16 21:12:22 +00:00
if self . channel_state & ( ChannelState ::FundingCreated as u32 ) ! = 0 {
self . funding_transaction . clone ( )
} else {
None
}
2021-09-27 22:20:07 +05:30
}
2021-07-28 19:54:20 -04:00
/// Returns a HTLCStats about inbound pending htlcs
2021-08-21 18:52:05 -04:00
fn get_inbound_pending_htlc_stats ( & self , outbound_feerate_update : Option < u32 > ) -> HTLCStats {
2021-07-28 19:54:20 -04:00
let mut stats = HTLCStats {
pending_htlcs : self . pending_inbound_htlcs . len ( ) as u32 ,
pending_htlcs_value_msat : 0 ,
on_counterparty_tx_dust_exposure_msat : 0 ,
on_holder_tx_dust_exposure_msat : 0 ,
2021-08-21 18:05:51 -04:00
holding_cell_msat : 0 ,
2021-11-21 21:42:58 -05:00
on_holder_tx_holding_cell_htlcs_count : 0 ,
2021-07-28 19:54:20 -04:00
} ;
2022-01-05 13:40:08 -08:00
let counterparty_dust_limit_timeout_sat = ( self . get_dust_buffer_feerate ( outbound_feerate_update ) as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . counterparty_dust_limit_satoshis ;
let holder_dust_limit_success_sat = ( self . get_dust_buffer_feerate ( outbound_feerate_update ) as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . holder_dust_limit_satoshis ;
2018-09-09 12:53:57 -04:00
for ref htlc in self . pending_inbound_htlcs . iter ( ) {
2021-07-28 19:54:20 -04:00
stats . pending_htlcs_value_msat + = htlc . amount_msat ;
if htlc . amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
stats . on_counterparty_tx_dust_exposure_msat + = htlc . amount_msat ;
}
if htlc . amount_msat / 1000 < holder_dust_limit_success_sat {
stats . on_holder_tx_dust_exposure_msat + = htlc . amount_msat ;
}
2018-09-09 12:53:57 -04:00
}
2021-07-28 19:54:20 -04:00
stats
2018-10-03 09:19:22 +09:00
}
2021-07-28 19:54:20 -04:00
/// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2021-08-21 18:52:05 -04:00
fn get_outbound_pending_htlc_stats ( & self , outbound_feerate_update : Option < u32 > ) -> HTLCStats {
2021-07-28 19:54:20 -04:00
let mut stats = HTLCStats {
pending_htlcs : self . pending_outbound_htlcs . len ( ) as u32 ,
pending_htlcs_value_msat : 0 ,
on_counterparty_tx_dust_exposure_msat : 0 ,
on_holder_tx_dust_exposure_msat : 0 ,
2021-08-21 18:05:51 -04:00
holding_cell_msat : 0 ,
2021-11-21 21:42:58 -05:00
on_holder_tx_holding_cell_htlcs_count : 0 ,
2021-07-28 19:54:20 -04:00
} ;
2022-01-05 13:40:08 -08:00
let counterparty_dust_limit_success_sat = ( self . get_dust_buffer_feerate ( outbound_feerate_update ) as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . counterparty_dust_limit_satoshis ;
let holder_dust_limit_timeout_sat = ( self . get_dust_buffer_feerate ( outbound_feerate_update ) as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . holder_dust_limit_satoshis ;
2018-09-09 12:53:57 -04:00
for ref htlc in self . pending_outbound_htlcs . iter ( ) {
2021-07-28 19:54:20 -04:00
stats . pending_htlcs_value_msat + = htlc . amount_msat ;
if htlc . amount_msat / 1000 < counterparty_dust_limit_success_sat {
stats . on_counterparty_tx_dust_exposure_msat + = htlc . amount_msat ;
}
if htlc . amount_msat / 1000 < holder_dust_limit_timeout_sat {
stats . on_holder_tx_dust_exposure_msat + = htlc . amount_msat ;
}
2017-12-25 01:05:27 -05:00
}
2018-09-09 12:53:57 -04:00
2019-01-21 11:44:59 -05:00
for update in self . holding_cell_htlc_updates . iter ( ) {
if let & HTLCUpdateAwaitingACK ::AddHTLC { ref amount_msat , .. } = update {
2021-07-28 19:54:20 -04:00
stats . pending_htlcs + = 1 ;
stats . pending_htlcs_value_msat + = amount_msat ;
2021-08-21 18:05:51 -04:00
stats . holding_cell_msat + = amount_msat ;
2021-07-28 19:54:20 -04:00
if * amount_msat / 1000 < counterparty_dust_limit_success_sat {
stats . on_counterparty_tx_dust_exposure_msat + = amount_msat ;
}
if * amount_msat / 1000 < holder_dust_limit_timeout_sat {
stats . on_holder_tx_dust_exposure_msat + = amount_msat ;
2021-11-21 21:42:58 -05:00
} else {
stats . on_holder_tx_holding_cell_htlcs_count + = 1 ;
2021-07-28 19:54:20 -04:00
}
2019-01-21 11:44:59 -05:00
}
}
2021-07-28 19:54:20 -04:00
stats
2017-12-25 01:05:27 -05:00
}
2022-04-27 16:11:47 +00:00
/// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2019-06-01 12:11:27 -04:00
/// Doesn't bother handling the
/// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
/// corner case properly.
2022-04-27 16:11:47 +00:00
pub fn get_available_balances ( & self ) -> AvailableBalances {
2019-06-01 12:11:27 -04:00
// Note that we have to handle overflow due to the above case.
2022-04-16 20:07:34 +00:00
let outbound_stats = self . get_outbound_pending_htlc_stats ( None ) ;
2021-07-02 23:54:57 +00:00
2022-01-20 04:28:38 +00:00
let mut balance_msat = self . value_to_self_msat ;
for ref htlc in self . pending_inbound_htlcs . iter ( ) {
if let InboundHTLCState ::LocalRemoved ( InboundHTLCRemovalReason ::Fulfill ( _ ) ) = htlc . state {
balance_msat + = htlc . amount_msat ;
}
}
2022-04-27 16:11:47 +00:00
balance_msat - = outbound_stats . pending_htlcs_value_msat ;
let outbound_capacity_msat = cmp ::max ( self . value_to_self_msat as i64
- outbound_stats . pending_htlcs_value_msat as i64
- self . counterparty_selected_channel_reserve_satoshis . unwrap_or ( 0 ) as i64 * 1000 ,
0 ) as u64 ;
AvailableBalances {
inbound_capacity_msat : cmp ::max ( self . channel_value_satoshis as i64 * 1000
- self . value_to_self_msat as i64
- self . get_inbound_pending_htlc_stats ( None ) . pending_htlcs_value_msat as i64
- self . holder_selected_channel_reserve_satoshis as i64 * 1000 ,
0 ) as u64 ,
outbound_capacity_msat ,
next_outbound_htlc_limit_msat : cmp ::max ( cmp ::min ( outbound_capacity_msat as i64 ,
self . counterparty_max_htlc_value_in_flight_msat as i64
- outbound_stats . pending_htlcs_value_msat as i64 ) ,
0 ) as u64 ,
balance_msat ,
}
2021-12-05 12:42:25 +01:00
}
2021-07-02 23:54:57 +00:00
pub fn get_holder_counterparty_selected_channel_reserve_satoshis ( & self ) -> ( u64 , Option < u64 > ) {
2021-11-09 21:12:30 +00:00
( self . holder_selected_channel_reserve_satoshis , self . counterparty_selected_channel_reserve_satoshis )
2019-06-01 12:11:27 -04:00
}
2021-10-19 13:29:50 -04:00
// Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2020-05-06 19:18:23 -04:00
// Note that num_htlcs should not include dust HTLCs.
2022-01-04 15:54:54 -08:00
fn commit_tx_fee_msat ( feerate_per_kw : u32 , num_htlcs : usize , opt_anchors : bool ) -> u64 {
2020-05-06 19:18:23 -04:00
// Note that we need to divide before multiplying to round properly,
// since the lowest denomination of bitcoin on-chain is the satoshi.
2022-01-04 15:54:54 -08:00
( commitment_tx_base_weight ( opt_anchors ) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC ) * feerate_per_kw as u64 / 1000 * 1000
2020-05-06 19:18:23 -04:00
}
2021-10-19 13:29:50 -04:00
// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
// Note that num_htlcs should not include dust HTLCs.
#[ inline ]
2022-01-04 15:54:54 -08:00
fn commit_tx_fee_sat ( feerate_per_kw : u32 , num_htlcs : usize , opt_anchors : bool ) -> u64 {
feerate_per_kw as u64 * ( commitment_tx_base_weight ( opt_anchors ) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC ) / 1000
2021-10-19 13:29:50 -04:00
}
2020-12-04 16:05:10 -05:00
// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
// number of pending HTLCs that are on track to be in our next commitment tx, plus an additional
// HTLC if `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs
// are excluded.
fn next_local_commit_tx_fee_msat ( & self , htlc : HTLCCandidate , fee_spike_buffer_htlc : Option < ( ) > ) -> u64 {
2020-10-15 13:45:18 +02:00
assert! ( self . is_outbound ( ) ) ;
2020-05-06 19:18:23 -04:00
2022-01-05 13:40:08 -08:00
let real_dust_limit_success_sat = ( self . feerate_per_kw as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . holder_dust_limit_satoshis ;
let real_dust_limit_timeout_sat = ( self . feerate_per_kw as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . holder_dust_limit_satoshis ;
2020-12-04 16:05:10 -05:00
let mut addl_htlcs = 0 ;
if fee_spike_buffer_htlc . is_some ( ) { addl_htlcs + = 1 ; }
match htlc . origin {
HTLCInitiator ::LocalOffered = > {
if htlc . amount_msat / 1000 > = real_dust_limit_timeout_sat {
addl_htlcs + = 1 ;
}
} ,
HTLCInitiator ::RemoteOffered = > {
if htlc . amount_msat / 1000 > = real_dust_limit_success_sat {
addl_htlcs + = 1 ;
}
}
}
let mut included_htlcs = 0 ;
for ref htlc in self . pending_inbound_htlcs . iter ( ) {
if htlc . amount_msat / 1000 < real_dust_limit_success_sat {
continue
}
// We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
// transaction including this HTLC if it times out before they RAA.
included_htlcs + = 1 ;
}
2020-05-06 19:18:23 -04:00
for ref htlc in self . pending_outbound_htlcs . iter ( ) {
2020-12-04 16:05:10 -05:00
if htlc . amount_msat / 1000 < real_dust_limit_timeout_sat {
2020-05-06 19:18:23 -04:00
continue
}
match htlc . state {
2020-12-04 16:05:10 -05:00
OutboundHTLCState ::LocalAnnounced { .. } = > included_htlcs + = 1 ,
OutboundHTLCState ::Committed = > included_htlcs + = 1 ,
OutboundHTLCState ::RemoteRemoved { .. } = > included_htlcs + = 1 ,
// We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
// transaction won't be generated until they send us their next RAA, which will mean
// dropping any HTLCs in this state.
2020-05-06 19:18:23 -04:00
_ = > { } ,
}
}
for htlc in self . holding_cell_htlc_updates . iter ( ) {
match htlc {
2020-12-04 16:05:10 -05:00
& HTLCUpdateAwaitingACK ::AddHTLC { amount_msat , .. } = > {
if amount_msat / 1000 < real_dust_limit_timeout_sat {
continue
}
included_htlcs + = 1
} ,
_ = > { } , // Don't include claims/fails that are awaiting ack, because once we get the
// ack we're guaranteed to never include them in commitment txs anymore.
2020-05-06 19:18:23 -04:00
}
}
2021-02-05 13:09:23 -05:00
let num_htlcs = included_htlcs + addl_htlcs ;
2022-01-04 15:54:54 -08:00
let res = Self ::commit_tx_fee_msat ( self . feerate_per_kw , num_htlcs , self . opt_anchors ( ) ) ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
{
let mut fee = res ;
if fee_spike_buffer_htlc . is_some ( ) {
2022-01-04 15:54:54 -08:00
fee = Self ::commit_tx_fee_msat ( self . feerate_per_kw , num_htlcs - 1 , self . opt_anchors ( ) ) ;
2021-02-05 13:09:23 -05:00
}
let total_pending_htlcs = self . pending_inbound_htlcs . len ( ) + self . pending_outbound_htlcs . len ( )
+ self . holding_cell_htlc_updates . len ( ) ;
let commitment_tx_info = CommitmentTxInfoCached {
fee ,
total_pending_htlcs ,
next_holder_htlc_id : match htlc . origin {
HTLCInitiator ::LocalOffered = > self . next_holder_htlc_id + 1 ,
HTLCInitiator ::RemoteOffered = > self . next_holder_htlc_id ,
} ,
next_counterparty_htlc_id : match htlc . origin {
HTLCInitiator ::LocalOffered = > self . next_counterparty_htlc_id ,
HTLCInitiator ::RemoteOffered = > self . next_counterparty_htlc_id + 1 ,
} ,
feerate : self . feerate_per_kw ,
} ;
* self . next_local_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) = Some ( commitment_tx_info ) ;
}
res
2020-05-06 19:18:23 -04:00
}
2020-12-04 16:05:10 -05:00
// Get the commitment tx fee for the remote's next commitment transaction based on the number of
// pending HTLCs that are on track to be in their next commitment tx, plus an additional HTLC if
// `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs are
// excluded.
fn next_remote_commit_tx_fee_msat ( & self , htlc : HTLCCandidate , fee_spike_buffer_htlc : Option < ( ) > ) -> u64 {
2020-10-15 13:45:18 +02:00
assert! ( ! self . is_outbound ( ) ) ;
2020-05-06 19:18:23 -04:00
2022-01-05 13:40:08 -08:00
let real_dust_limit_success_sat = ( self . feerate_per_kw as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . counterparty_dust_limit_satoshis ;
let real_dust_limit_timeout_sat = ( self . feerate_per_kw as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . counterparty_dust_limit_satoshis ;
2020-12-04 16:05:10 -05:00
let mut addl_htlcs = 0 ;
if fee_spike_buffer_htlc . is_some ( ) { addl_htlcs + = 1 ; }
match htlc . origin {
HTLCInitiator ::LocalOffered = > {
if htlc . amount_msat / 1000 > = real_dust_limit_success_sat {
addl_htlcs + = 1 ;
}
} ,
HTLCInitiator ::RemoteOffered = > {
if htlc . amount_msat / 1000 > = real_dust_limit_timeout_sat {
addl_htlcs + = 1 ;
}
}
}
// When calculating the set of HTLCs which will be included in their next commitment_signed, all
// non-dust inbound HTLCs are included (as all states imply it will be included) and only
// committed outbound HTLCs, see below.
let mut included_htlcs = 0 ;
for ref htlc in self . pending_inbound_htlcs . iter ( ) {
if htlc . amount_msat / 1000 < = real_dust_limit_timeout_sat {
continue
}
included_htlcs + = 1 ;
}
2020-05-06 19:18:23 -04:00
for ref htlc in self . pending_outbound_htlcs . iter ( ) {
2020-12-04 16:05:10 -05:00
if htlc . amount_msat / 1000 < = real_dust_limit_success_sat {
2020-05-06 19:18:23 -04:00
continue
}
2020-12-04 16:05:10 -05:00
// We only include outbound HTLCs if it will not be included in their next commitment_signed,
// i.e. if they've responded to us with an RAA after announcement.
2020-05-06 19:18:23 -04:00
match htlc . state {
2020-12-04 16:05:10 -05:00
OutboundHTLCState ::Committed = > included_htlcs + = 1 ,
OutboundHTLCState ::RemoteRemoved { .. } = > included_htlcs + = 1 ,
2021-02-05 13:09:23 -05:00
OutboundHTLCState ::LocalAnnounced { .. } = > included_htlcs + = 1 ,
2020-05-06 19:18:23 -04:00
_ = > { } ,
}
}
2021-02-05 13:09:23 -05:00
let num_htlcs = included_htlcs + addl_htlcs ;
2022-01-04 15:54:54 -08:00
let res = Self ::commit_tx_fee_msat ( self . feerate_per_kw , num_htlcs , self . opt_anchors ( ) ) ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
{
let mut fee = res ;
if fee_spike_buffer_htlc . is_some ( ) {
2022-01-04 15:54:54 -08:00
fee = Self ::commit_tx_fee_msat ( self . feerate_per_kw , num_htlcs - 1 , self . opt_anchors ( ) ) ;
2021-02-05 13:09:23 -05:00
}
let total_pending_htlcs = self . pending_inbound_htlcs . len ( ) + self . pending_outbound_htlcs . len ( ) ;
let commitment_tx_info = CommitmentTxInfoCached {
fee ,
total_pending_htlcs ,
next_holder_htlc_id : match htlc . origin {
HTLCInitiator ::LocalOffered = > self . next_holder_htlc_id + 1 ,
HTLCInitiator ::RemoteOffered = > self . next_holder_htlc_id ,
} ,
next_counterparty_htlc_id : match htlc . origin {
HTLCInitiator ::LocalOffered = > self . next_counterparty_htlc_id ,
HTLCInitiator ::RemoteOffered = > self . next_counterparty_htlc_id + 1 ,
} ,
feerate : self . feerate_per_kw ,
} ;
* self . next_remote_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) = Some ( commitment_tx_info ) ;
}
res
2020-05-06 19:18:23 -04:00
}
2020-06-22 15:29:29 -04:00
pub fn update_add_htlc < F , L : Deref > ( & mut self , msg : & msgs ::UpdateAddHTLC , mut pending_forward_status : PendingHTLCStatus , create_pending_htlc_status : F , logger : & L ) -> Result < ( ) , ChannelError >
where F : for < ' a > Fn ( & ' a Self , PendingHTLCStatus , u16 ) -> PendingHTLCStatus , L ::Target : Logger {
2020-06-05 15:27:30 -04:00
// We can't accept HTLCs sent after we've sent a shutdown.
let local_sent_shutdown = ( self . channel_state & ( ChannelState ::ChannelFunded as u32 | ChannelState ::LocalShutdownSent as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) ;
if local_sent_shutdown {
2021-08-24 20:24:23 -05:00
pending_forward_status = create_pending_htlc_status ( self , pending_forward_status , 0x4000 | 8 ) ;
2020-06-05 15:27:30 -04:00
}
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
let remote_sent_shutdown = ( self . channel_state & ( ChannelState ::ChannelFunded as u32 | ChannelState ::RemoteShutdownSent as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) ;
if remote_sent_shutdown {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got add HTLC message when channel was not in an operational state " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent update_add_htlc when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2017-12-25 01:05:27 -05:00
if msg . amount_msat > self . channel_value_satoshis * 1000 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote side tried to send more than the total value of the channel " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-02-20 19:20:29 -05:00
if msg . amount_msat = = 0 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote side tried to send a 0-msat HTLC " . to_owned ( ) ) ) ;
2020-02-20 19:20:29 -05:00
}
2020-06-08 20:47:55 -04:00
if msg . amount_msat < self . holder_htlc_minimum_msat {
return Err ( ChannelError ::Close ( format! ( " Remote side tried to send less than our minimum HTLC value. Lower limit: ( {} ). Actual: ( {} ) " , self . holder_htlc_minimum_msat , msg . amount_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2021-08-21 18:52:05 -04:00
let inbound_stats = self . get_inbound_pending_htlc_stats ( None ) ;
let outbound_stats = self . get_outbound_pending_htlc_stats ( None ) ;
2021-07-28 19:54:20 -04:00
if inbound_stats . pending_htlcs + 1 > OUR_MAX_HTLCS as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Remote tried to push more than our max accepted HTLCs ( {} ) " , OUR_MAX_HTLCS ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2021-11-09 21:12:30 +00:00
if inbound_stats . pending_htlcs_value_msat + msg . amount_msat > self . holder_max_htlc_value_in_flight_msat {
return Err ( ChannelError ::Close ( format! ( " Remote HTLC add would put them over our max HTLC value ( {} ) " , self . holder_max_htlc_value_in_flight_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
// Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2018-03-21 18:49:33 -04:00
// the reserve_satoshis we told them to always have as direct payment so that they lose
// something if we punish them for broadcasting an old state).
2019-02-27 18:26:29 -05:00
// Note that we don't really care about having a small/no to_remote output in our local
// commitment transactions, as the purpose of the channel reserve is to ensure we can
// punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
// present in the next commitment transaction we send them (at least for fulfilled ones,
// failed ones won't modify value_to_self).
// Note that we will send HTLCs which another instance of rust-lightning would think
// violate the reserve value if we do not do this (as we forget inbound HTLCs from the
// Channel state once they will not be present in the next received commitment
// transaction).
let mut removed_outbound_total_msat = 0 ;
for ref htlc in self . pending_outbound_htlcs . iter ( ) {
2022-01-18 14:17:52 +01:00
if let OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( OutboundHTLCOutcome ::Success ( _ ) ) = htlc . state {
2019-02-27 18:26:29 -05:00
removed_outbound_total_msat + = htlc . amount_msat ;
2022-01-18 14:17:52 +01:00
} else if let OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( OutboundHTLCOutcome ::Success ( _ ) ) = htlc . state {
2019-02-27 18:26:29 -05:00
removed_outbound_total_msat + = htlc . amount_msat ;
}
}
2020-05-06 19:18:23 -04:00
2022-01-05 13:40:08 -08:00
let exposure_dust_limit_timeout_sats = ( self . get_dust_buffer_feerate ( None ) as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . counterparty_dust_limit_satoshis ;
2021-07-28 19:55:11 -04:00
if msg . amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats . on_counterparty_tx_dust_exposure_msat + outbound_stats . on_counterparty_tx_dust_exposure_msat + msg . amount_msat ;
if on_counterparty_tx_dust_htlc_exposure_msat > self . get_max_dust_htlc_exposure_msat ( ) {
log_info! ( logger , " Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx " ,
on_counterparty_tx_dust_htlc_exposure_msat , self . get_max_dust_htlc_exposure_msat ( ) ) ;
pending_forward_status = create_pending_htlc_status ( self , pending_forward_status , 0x1000 | 7 ) ;
}
}
2022-01-05 13:40:08 -08:00
let exposure_dust_limit_success_sats = ( self . get_dust_buffer_feerate ( None ) as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . holder_dust_limit_satoshis ;
2021-07-28 19:55:11 -04:00
if msg . amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats . on_holder_tx_dust_exposure_msat + outbound_stats . on_holder_tx_dust_exposure_msat + msg . amount_msat ;
if on_holder_tx_dust_htlc_exposure_msat > self . get_max_dust_htlc_exposure_msat ( ) {
log_info! ( logger , " Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx " ,
on_holder_tx_dust_htlc_exposure_msat , self . get_max_dust_htlc_exposure_msat ( ) ) ;
pending_forward_status = create_pending_htlc_status ( self , pending_forward_status , 0x1000 | 7 ) ;
}
}
2020-05-06 19:18:23 -04:00
let pending_value_to_self_msat =
2021-07-28 19:54:20 -04:00
self . value_to_self_msat + inbound_stats . pending_htlcs_value_msat - removed_outbound_total_msat ;
2020-05-06 19:18:23 -04:00
let pending_remote_value_msat =
self . channel_value_satoshis * 1000 - pending_value_to_self_msat ;
if pending_remote_value_msat < msg . amount_msat {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote HTLC add would overdraw remaining funds " . to_owned ( ) ) ) ;
2020-05-06 19:18:23 -04:00
}
// Check that the remote can afford to pay for this HTLC on-chain at the current
// feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2020-10-15 13:45:18 +02:00
let remote_commit_tx_fee_msat = if self . is_outbound ( ) { 0 } else {
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( msg . amount_msat , HTLCInitiator ::RemoteOffered ) ;
self . next_remote_commit_tx_fee_msat ( htlc_candidate , None ) // Don't include the extra fee spike buffer HTLC in calculations
2020-05-06 19:18:23 -04:00
} ;
if pending_remote_value_msat - msg . amount_msat < remote_commit_tx_fee_msat {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote HTLC add would not leave enough to pay for fees " . to_owned ( ) ) ) ;
2020-05-06 19:18:23 -04:00
} ;
2021-11-09 21:12:30 +00:00
if pending_remote_value_msat - msg . amount_msat - remote_commit_tx_fee_msat < self . holder_selected_channel_reserve_satoshis * 1000 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote HTLC add would put them under remote reserve value " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-05-06 19:18:23 -04:00
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2020-12-04 16:05:10 -05:00
// `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
// the spec because in the spec, the fee spike buffer requirement doesn't exist on the
// receiver's side, only on the sender's.
// Note that when we eventually remove support for fee updates and switch to anchor output
// fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
// the extra htlc when calculating the next remote commitment transaction fee as we should
// still be able to afford adding this HTLC plus one more future HTLC, regardless of being
// sensitive to fee spikes.
let htlc_candidate = HTLCCandidate ::new ( msg . amount_msat , HTLCInitiator ::RemoteOffered ) ;
let remote_fee_cost_incl_stuck_buffer_msat = 2 * self . next_remote_commit_tx_fee_msat ( htlc_candidate , Some ( ( ) ) ) ;
2021-11-09 21:12:30 +00:00
if pending_remote_value_msat - msg . amount_msat - self . holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
2020-05-06 19:18:23 -04:00
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
// the HTLC, i.e. its status is already set to failing.
2021-06-22 03:35:52 +00:00
log_info! ( logger , " Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required. " , log_bytes! ( self . channel_id ( ) ) ) ;
2020-05-06 19:18:23 -04:00
pending_forward_status = create_pending_htlc_status ( self , pending_forward_status , 0x1000 | 7 ) ;
}
} else {
// Check that they won't violate our local required channel reserve by adding this HTLC.
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( msg . amount_msat , HTLCInitiator ::RemoteOffered ) ;
let local_commit_tx_fee_msat = self . next_local_commit_tx_fee_msat ( htlc_candidate , None ) ;
2021-07-03 15:27:12 +00:00
if self . value_to_self_msat < self . counterparty_selected_channel_reserve_satoshis . unwrap ( ) * 1000 + local_commit_tx_fee_msat {
2020-06-08 20:47:55 -04:00
return Err ( ChannelError ::Close ( " Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value " . to_owned ( ) ) ) ;
2020-05-06 19:18:23 -04:00
}
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
if self . next_counterparty_htlc_id ! = msg . htlc_id {
return Err ( ChannelError ::Close ( format! ( " Remote skipped HTLC ID (skipped ID: {} ) " , self . next_counterparty_htlc_id ) ) ) ;
2017-12-25 01:05:27 -05:00
}
if msg . cltv_expiry > = 500000000 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote provided CLTV expiry in seconds instead of block height " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-10-30 21:47:56 -04:00
if self . channel_state & ChannelState ::LocalShutdownSent as u32 ! = 0 {
2020-06-05 15:27:30 -04:00
if let PendingHTLCStatus ::Forward ( _ ) = pending_forward_status {
2018-10-30 21:47:56 -04:00
panic! ( " ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing " ) ;
}
}
2017-12-25 01:05:27 -05:00
// Now update local state:
2020-06-08 20:47:55 -04:00
self . next_counterparty_htlc_id + = 1 ;
2018-09-09 12:53:57 -04:00
self . pending_inbound_htlcs . push ( InboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : msg . htlc_id ,
amount_msat : msg . amount_msat ,
payment_hash : msg . payment_hash ,
cltv_expiry : msg . cltv_expiry ,
2020-06-05 15:27:30 -04:00
state : InboundHTLCState ::RemoteAnnounced ( pending_forward_status ) ,
2017-12-25 01:05:27 -05:00
} ) ;
Ok ( ( ) )
}
2018-10-14 22:34:45 +09:00
/// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
2018-04-24 00:18:54 -04:00
#[ inline ]
2022-01-18 14:17:52 +01:00
fn mark_outbound_htlc_removed ( & mut self , htlc_id : u64 , check_preimage : Option < PaymentPreimage > , fail_reason : Option < HTLCFailReason > ) -> Result < & OutboundHTLCOutput , ChannelError > {
assert! ( ! ( check_preimage . is_some ( ) & & fail_reason . is_some ( ) ) , " cannot fail while we have a preimage " ) ;
2018-09-09 12:53:57 -04:00
for htlc in self . pending_outbound_htlcs . iter_mut ( ) {
if htlc . htlc_id = = htlc_id {
2022-01-18 14:17:52 +01:00
let outcome = match check_preimage {
None = > fail_reason . into ( ) ,
Some ( payment_preimage ) = > {
let payment_hash = PaymentHash ( Sha256 ::hash ( & payment_preimage . 0 [ .. ] ) . into_inner ( ) ) ;
2017-12-25 01:05:27 -05:00
if payment_hash ! = htlc . payment_hash {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Remote tried to fulfill HTLC ( {} ) with an incorrect preimage " , htlc_id ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2022-01-18 14:17:52 +01:00
OutboundHTLCOutcome ::Success ( Some ( payment_preimage ) )
}
2017-12-25 01:05:27 -05:00
} ;
2018-09-09 12:53:57 -04:00
match htlc . state {
2018-10-15 15:11:02 -04:00
OutboundHTLCState ::LocalAnnounced ( _ ) = >
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Remote tried to fulfill/fail HTLC ( {} ) before it had been committed " , htlc_id ) ) ) ,
2018-09-09 12:53:57 -04:00
OutboundHTLCState ::Committed = > {
2022-01-18 14:17:52 +01:00
htlc . state = OutboundHTLCState ::RemoteRemoved ( outcome ) ;
2018-09-09 12:53:57 -04:00
} ,
2019-03-03 14:02:51 -05:00
OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( _ ) | OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( _ ) | OutboundHTLCState ::RemoteRemoved ( _ ) = >
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( format! ( " Remote tried to fulfill/fail HTLC ( {} ) that they'd already fulfilled/failed " , htlc_id ) ) ) ,
2017-12-25 01:05:27 -05:00
}
2021-07-16 02:16:50 +00:00
return Ok ( htlc ) ;
2017-12-25 01:05:27 -05:00
}
}
2020-07-13 13:16:32 +09:00
Err ( ChannelError ::Close ( " Remote tried to fulfill/fail an HTLC we couldn't find " . to_owned ( ) ) )
2017-12-25 01:05:27 -05:00
}
2021-07-16 02:16:50 +00:00
pub fn update_fulfill_htlc ( & mut self , msg : & msgs ::UpdateFulfillHTLC ) -> Result < ( HTLCSource , u64 ) , ChannelError > {
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got fulfill HTLC message when channel was not in an operational state " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent update_fulfill_htlc when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2017-12-25 01:05:27 -05:00
2022-01-18 14:17:52 +01:00
self . mark_outbound_htlc_removed ( msg . htlc_id , Some ( msg . payment_preimage ) , None ) . map ( | htlc | ( htlc . source . clone ( ) , htlc . amount_msat ) )
2017-12-25 01:05:27 -05:00
}
2020-02-08 17:22:58 -05:00
pub fn update_fail_htlc ( & mut self , msg : & msgs ::UpdateFailHTLC , fail_reason : HTLCFailReason ) -> Result < ( ) , ChannelError > {
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got fail HTLC message when channel was not in an operational state " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent update_fail_htlc when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2017-12-25 01:05:27 -05:00
2018-11-22 16:17:46 -05:00
self . mark_outbound_htlc_removed ( msg . htlc_id , None , Some ( fail_reason ) ) ? ;
Ok ( ( ) )
2017-12-25 01:05:27 -05:00
}
2020-03-02 12:55:53 -05:00
pub fn update_fail_malformed_htlc ( & mut self , msg : & msgs ::UpdateFailMalformedHTLC , fail_reason : HTLCFailReason ) -> Result < ( ) , ChannelError > {
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got fail malformed HTLC message when channel was not in an operational state " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent update_fail_malformed_htlc when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2017-12-25 01:05:27 -05:00
2018-11-22 16:17:46 -05:00
self . mark_outbound_htlc_removed ( msg . htlc_id , None , Some ( fail_reason ) ) ? ;
Ok ( ( ) )
2017-12-25 01:05:27 -05:00
}
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
pub fn commitment_signed < L : Deref > ( & mut self , msg : & msgs ::CommitmentSigned , logger : & L ) -> Result < ( msgs ::RevokeAndACK , Option < msgs ::CommitmentSigned > , ChannelMonitorUpdate ) , ( Option < ChannelMonitorUpdate > , ChannelError ) >
where L ::Target : Logger
2020-03-02 12:55:53 -05:00
{
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2020-07-13 13:16:32 +09:00
return Err ( ( None , ChannelError ::Close ( " Got commitment signed message when channel was not in an operational state " . to_owned ( ) ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ( None , ChannelError ::Close ( " Peer sent commitment_signed when we needed a channel_reestablish " . to_owned ( ) ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2018-10-30 16:25:38 -04:00
if self . channel_state & BOTH_SIDES_SHUTDOWN_MASK = = BOTH_SIDES_SHUTDOWN_MASK & & self . last_sent_closing_fee . is_some ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ( None , ChannelError ::Close ( " Peer sent commitment_signed after we'd started exchanging closing_signeds " . to_owned ( ) ) ) ) ;
2018-10-30 16:25:38 -04:00
}
2017-12-25 01:05:27 -05:00
let funding_script = self . get_funding_redeemscript ( ) ;
2020-06-08 20:47:55 -04:00
let keys = self . build_holder_transaction_keys ( self . cur_holder_commitment_transaction_number ) . map_err ( | e | ( None , e ) ) ? ;
2018-09-26 19:54:28 -04:00
2021-11-18 21:23:41 -05:00
let commitment_stats = self . build_commitment_transaction ( self . cur_holder_commitment_transaction_number , & keys , true , false , logger ) ;
let commitment_txid = {
let trusted_tx = commitment_stats . tx . trust ( ) ;
let bitcoin_tx = trusted_tx . built_transaction ( ) ;
let sighash = bitcoin_tx . get_sighash_all ( & funding_script , self . channel_value_satoshis ) ;
log_trace! ( logger , " Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {} " ,
log_bytes! ( msg . signature . serialize_compact ( ) [ .. ] ) ,
log_bytes! ( self . counterparty_funding_pubkey ( ) . serialize ( ) ) , encode ::serialize_hex ( & bitcoin_tx . transaction ) ,
log_bytes! ( sighash [ .. ] ) , encode ::serialize_hex ( & funding_script ) , log_bytes! ( self . channel_id ( ) ) ) ;
2022-05-05 17:59:38 +02:00
if let Err ( _ ) = self . secp_ctx . verify_ecdsa ( & sighash , & msg . signature , & self . counterparty_funding_pubkey ( ) ) {
2021-11-18 21:23:41 -05:00
return Err ( ( None , ChannelError ::Close ( " Invalid commitment tx signature from peer " . to_owned ( ) ) ) ) ;
}
bitcoin_tx . txid
2018-11-30 16:06:28 -05:00
} ;
2021-11-18 21:23:41 -05:00
let mut htlcs_cloned : Vec < _ > = commitment_stats . htlcs_included . iter ( ) . map ( | htlc | ( htlc . 0. clone ( ) , htlc . 1. map ( | h | h . clone ( ) ) ) ) . collect ( ) ;
2017-12-25 01:05:27 -05:00
2021-07-12 15:39:27 +00:00
// If our counterparty updated the channel fee in this commitment transaction, check that
// they can actually afford the new fee now.
let update_fee = if let Some ( ( _ , update_state ) ) = self . pending_update_fee {
update_state = = FeeUpdateState ::RemoteAnnounced
} else { false } ;
2018-10-30 14:31:57 +02:00
if update_fee {
2021-11-18 21:23:41 -05:00
debug_assert! ( ! self . is_outbound ( ) ) ;
2021-11-09 21:12:30 +00:00
let counterparty_reserve_we_require_msat = self . holder_selected_channel_reserve_satoshis * 1000 ;
2021-11-18 21:23:41 -05:00
if commitment_stats . remote_balance_msat < commitment_stats . total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
2020-07-13 13:16:32 +09:00
return Err ( ( None , ChannelError ::Close ( " Funding remote cannot afford proposed new fee " . to_owned ( ) ) ) ) ;
2018-10-30 14:31:57 +02:00
}
}
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
{
if self . is_outbound ( ) {
let projected_commit_tx_info = self . next_local_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) . take ( ) ;
* self . next_remote_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) = None ;
if let Some ( info ) = projected_commit_tx_info {
let total_pending_htlcs = self . pending_inbound_htlcs . len ( ) + self . pending_outbound_htlcs . len ( )
+ self . holding_cell_htlc_updates . len ( ) ;
if info . total_pending_htlcs = = total_pending_htlcs
& & info . next_holder_htlc_id = = self . next_holder_htlc_id
& & info . next_counterparty_htlc_id = = self . next_counterparty_htlc_id
& & info . feerate = = self . feerate_per_kw {
2021-11-18 21:23:41 -05:00
assert_eq! ( commitment_stats . total_fee_sat , info . fee / 1000 ) ;
2021-02-05 13:09:23 -05:00
}
}
}
}
2018-10-30 14:31:57 +02:00
2021-11-18 21:23:41 -05:00
if msg . htlc_signatures . len ( ) ! = commitment_stats . num_nondust_htlcs {
return Err ( ( None , ChannelError ::Close ( format! ( " Got wrong number of HTLC signatures ( {} ) from remote. It must be {} " , msg . htlc_signatures . len ( ) , commitment_stats . num_nondust_htlcs ) ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-10-15 13:45:18 +02:00
// TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update
let mut htlcs_and_sigs = Vec ::with_capacity ( htlcs_cloned . len ( ) ) ;
for ( idx , ( htlc , source ) ) in htlcs_cloned . drain ( .. ) . enumerate ( ) {
2019-01-06 17:02:53 -05:00
if let Some ( _ ) = htlc . transaction_output_index {
2021-11-18 21:23:41 -05:00
let htlc_tx = chan_utils ::build_htlc_transaction ( & commitment_txid , commitment_stats . feerate_per_kw ,
2021-11-15 19:39:39 -08:00
self . get_counterparty_selected_contest_delay ( ) . unwrap ( ) , & htlc , self . opt_anchors ( ) ,
2021-07-04 14:13:10 +00:00
& keys . broadcaster_delayed_payment_key , & keys . revocation_key ) ;
2021-11-15 18:03:46 -08:00
let htlc_redeemscript = chan_utils ::get_htlc_redeemscript ( & htlc , self . opt_anchors ( ) , & keys ) ;
2022-05-05 17:59:38 +02:00
let htlc_sighashtype = if self . opt_anchors ( ) { EcdsaSighashType ::SinglePlusAnyoneCanPay } else { EcdsaSighashType ::All } ;
let htlc_sighash = hash_to_message! ( & sighash ::SighashCache ::new ( & htlc_tx ) . segwit_signature_hash ( 0 , & htlc_redeemscript , htlc . amount_msat / 1000 , htlc_sighashtype ) . unwrap ( ) [ .. ] ) ;
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}. " ,
log_bytes! ( msg . htlc_signatures [ idx ] . serialize_compact ( ) [ .. ] ) , log_bytes! ( keys . countersignatory_htlc_key . serialize ( ) ) ,
encode ::serialize_hex ( & htlc_tx ) , log_bytes! ( htlc_sighash [ .. ] ) , encode ::serialize_hex ( & htlc_redeemscript ) , log_bytes! ( self . channel_id ( ) ) ) ;
2022-05-05 17:59:38 +02:00
if let Err ( _ ) = self . secp_ctx . verify_ecdsa ( & htlc_sighash , & msg . htlc_signatures [ idx ] , & keys . countersignatory_htlc_key ) {
2020-07-13 13:16:32 +09:00
return Err ( ( None , ChannelError ::Close ( " Invalid HTLC tx signature from peer " . to_owned ( ) ) ) ) ;
2020-02-07 20:08:31 -05:00
}
2019-12-13 14:56:57 -05:00
htlcs_and_sigs . push ( ( htlc , Some ( msg . htlc_signatures [ idx ] ) , source ) ) ;
2019-01-04 14:37:48 -05:00
} else {
htlcs_and_sigs . push ( ( htlc , None , source ) ) ;
2019-01-06 17:02:53 -05:00
}
2017-12-25 01:05:27 -05:00
}
2020-10-15 13:45:18 +02:00
let holder_commitment_tx = HolderCommitmentTransaction ::new (
2021-11-18 21:23:41 -05:00
commitment_stats . tx ,
2020-10-15 13:45:18 +02:00
msg . signature ,
msg . htlc_signatures . clone ( ) ,
& self . get_holder_pubkeys ( ) . funding_pubkey ,
self . counterparty_funding_pubkey ( )
) ;
2021-02-20 10:05:55 -05:00
let next_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number - 1 , & self . secp_ctx ) ;
2022-01-19 12:19:27 +01:00
self . holder_signer . validate_holder_commitment ( & holder_commitment_tx , commitment_stats . preimages )
2021-08-20 21:57:18 +02:00
. map_err ( | _ | ( None , ChannelError ::Close ( " Failed to validate our commitment " . to_owned ( ) ) ) ) ? ;
2021-02-20 10:05:55 -05:00
let per_commitment_secret = self . holder_signer . release_commitment_secret ( self . cur_holder_commitment_transaction_number + 1 ) ;
2017-12-25 01:05:27 -05:00
// Update state now that we've passed all the can-fail calls...
2020-06-08 20:47:55 -04:00
let mut need_commitment = false ;
2021-07-12 15:39:27 +00:00
if let & mut Some ( ( _ , ref mut update_state ) ) = & mut self . pending_update_fee {
if * update_state = = FeeUpdateState ::RemoteAnnounced {
* update_state = FeeUpdateState ::AwaitingRemoteRevokeToAnnounce ;
need_commitment = true ;
2018-09-26 19:54:28 -04:00
}
}
2018-10-30 14:31:57 +02:00
2020-02-07 20:08:31 -05:00
self . latest_monitor_update_id + = 1 ;
let mut monitor_update = ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
2020-09-06 19:51:21 -04:00
updates : vec ! [ ChannelMonitorUpdateStep ::LatestHolderCommitmentTXInfo {
2020-10-15 13:45:18 +02:00
commitment_tx : holder_commitment_tx ,
2020-04-18 00:10:24 -04:00
htlc_outputs : htlcs_and_sigs
2020-02-07 20:08:31 -05:00
} ]
} ;
2017-12-25 01:05:27 -05:00
2018-09-09 12:53:57 -04:00
for htlc in self . pending_inbound_htlcs . iter_mut ( ) {
2018-10-15 14:38:19 -04:00
let new_forward = if let & InboundHTLCState ::RemoteAnnounced ( ref forward_info ) = & htlc . state {
Some ( forward_info . clone ( ) )
} else { None } ;
if let Some ( forward_info ) = new_forward {
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}. " ,
log_bytes! ( htlc . payment_hash . 0 ) , log_bytes! ( self . channel_id ) ) ;
2018-10-15 14:38:19 -04:00
htlc . state = InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( forward_info ) ;
2020-06-08 20:47:55 -04:00
need_commitment = true ;
2018-09-09 12:53:57 -04:00
}
}
for htlc in self . pending_outbound_htlcs . iter_mut ( ) {
2022-01-18 14:17:52 +01:00
if let & mut OutboundHTLCState ::RemoteRemoved ( ref mut outcome ) = & mut htlc . state {
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}. " ,
log_bytes! ( htlc . payment_hash . 0 ) , log_bytes! ( self . channel_id ) ) ;
2022-01-18 14:17:52 +01:00
// Grab the preimage, if it exists, instead of cloning
let mut reason = OutboundHTLCOutcome ::Success ( None ) ;
mem ::swap ( outcome , & mut reason ) ;
htlc . state = OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( reason ) ;
2020-06-08 20:47:55 -04:00
need_commitment = true ;
2017-12-25 01:05:27 -05:00
}
}
2020-06-08 20:47:55 -04:00
self . cur_holder_commitment_transaction_number - = 1 ;
// Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
2019-03-05 15:36:11 -05:00
// send_commitment_no_status_check() next which will reset this to RAAFirst.
self . resend_order = RAACommitmentOrder ::CommitmentFirst ;
2017-12-25 01:05:27 -05:00
2018-10-17 18:19:55 -04:00
if ( self . channel_state & ChannelState ::MonitorUpdateFailed as u32 ) ! = 0 {
2019-01-14 17:24:52 -05:00
// In case we initially failed monitor updating without requiring a response, we need
// to make sure the RAA gets sent first.
2018-10-17 18:19:55 -04:00
self . monitor_pending_revoke_and_ack = true ;
2020-06-08 20:47:55 -04:00
if need_commitment & & ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 ) ) = = 0 {
2019-01-14 17:24:52 -05:00
// If we were going to send a commitment_signed after the RAA, go ahead and do all
// the corresponding HTLC status updates so that get_last_commitment_update
// includes the right HTLCs.
self . monitor_pending_commitment_signed = true ;
2020-03-02 12:55:53 -05:00
let ( _ , mut additional_update ) = self . send_commitment_no_status_check ( logger ) . map_err ( | e | ( None , e ) ) ? ;
2020-02-07 20:08:31 -05:00
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
2019-01-14 17:24:52 -05:00
}
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply. " ,
log_bytes! ( self . channel_id ) ) ;
2020-07-13 13:16:32 +09:00
return Err ( ( Some ( monitor_update ) , ChannelError ::Ignore ( " Previous monitor update failure prevented generation of RAA " . to_owned ( ) ) ) ) ;
2018-10-17 18:19:55 -04:00
}
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
let commitment_signed = if need_commitment & & ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 ) ) = = 0 {
2018-04-04 11:56:54 -04:00
// If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
// we'll send one right away when we get the revoke_and_ack when we
// free_holding_cell_htlcs().
2020-03-02 12:55:53 -05:00
let ( msg , mut additional_update ) = self . send_commitment_no_status_check ( logger ) . map_err ( | e | ( None , e ) ) ? ;
2020-02-07 20:08:31 -05:00
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
Some ( msg )
} else { None } ;
2018-04-04 11:56:54 -04:00
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack. " ,
log_bytes! ( self . channel_id ( ) ) , if commitment_signed . is_some ( ) { " our own commitment_signed and " } else { " " } ) ;
2017-12-25 01:05:27 -05:00
Ok ( ( msgs ::RevokeAndACK {
channel_id : self . channel_id ,
2020-10-06 16:47:23 -07:00
per_commitment_secret ,
next_per_commitment_point ,
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
} , commitment_signed , monitor_update ) )
2018-04-04 11:56:54 -04:00
}
Free holding cell on monitor-updating-restored when there's no upd
If there is no pending channel update messages when monitor updating
is restored (though there may be an RAA to send), and we're
connected to our peer and not awaiting a remote RAA, we need to
free anything in our holding cell.
However, we don't want to immediately free the holding cell during
channel_monitor_updated as it presents a somewhat bug-prone case of
reentrancy:
a) it would re-enter user code around a monitor update while being
called from user code notifying us of the same monitor being
updated, making deadlocs very likely (in fact, our fuzzers
would have a bug here!),
b) the re-entrancy only occurs in a very rare case, making it
likely users will not hit it in testing, only deadlocking in
production.
Thus, we add a holding-cell-free pass over each channel in
get_and_clear_pending_msg_events. This fits up nicely with the
anticipated bug - users almost certainly need to process new
network messages immediately after monitor updating has been
restored to send messages which were not sent originally when the
monitor updating was paused.
Without this, chanmon_fail_consistency was able to find a stuck
condition where we sit on an HTLC failure in our holding cell and
don't ever handle it (at least until we have other actions to take
which empty the holding cell).
2021-03-18 18:03:30 -04:00
/// Public version of the below, checking relevant preconditions first.
/// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
/// returns `(None, Vec::new())`.
pub fn maybe_free_holding_cell_htlcs < L : Deref > ( & mut self , logger : & L ) -> Result < ( Option < ( msgs ::CommitmentUpdate , ChannelMonitorUpdate ) > , Vec < ( HTLCSource , PaymentHash ) > ) , ChannelError > where L ::Target : Logger {
if self . channel_state > = ChannelState ::ChannelFunded as u32 & &
( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ) = = 0 {
self . free_holding_cell_htlcs ( logger )
} else { Ok ( ( None , Vec ::new ( ) ) ) }
}
2018-04-04 11:56:54 -04:00
/// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
/// fulfilling or failing the last pending HTLC)
2020-05-06 18:15:43 -04:00
fn free_holding_cell_htlcs < L : Deref > ( & mut self , logger : & L ) -> Result < ( Option < ( msgs ::CommitmentUpdate , ChannelMonitorUpdate ) > , Vec < ( HTLCSource , PaymentHash ) > ) , ChannelError > where L ::Target : Logger {
2018-10-17 18:19:55 -04:00
assert_eq! ( self . channel_state & ChannelState ::MonitorUpdateFailed as u32 , 0 ) ;
2018-09-26 19:54:28 -04:00
if self . holding_cell_htlc_updates . len ( ) ! = 0 | | self . holding_cell_update_fee . is_some ( ) {
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Freeing holding cell with {} HTLC updates{} in channel {} " , self . holding_cell_htlc_updates . len ( ) ,
if self . holding_cell_update_fee . is_some ( ) { " and a fee update " } else { " " } , log_bytes! ( self . channel_id ( ) ) ) ;
2019-03-03 13:01:55 -05:00
2020-02-07 20:08:31 -05:00
let mut monitor_update = ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id + 1 , // We don't increment this yet!
updates : Vec ::new ( ) ,
} ;
2018-04-04 11:56:54 -04:00
let mut htlc_updates = Vec ::new ( ) ;
mem ::swap ( & mut htlc_updates , & mut self . holding_cell_htlc_updates ) ;
let mut update_add_htlcs = Vec ::with_capacity ( htlc_updates . len ( ) ) ;
let mut update_fulfill_htlcs = Vec ::with_capacity ( htlc_updates . len ( ) ) ;
let mut update_fail_htlcs = Vec ::with_capacity ( htlc_updates . len ( ) ) ;
2020-05-06 18:15:43 -04:00
let mut htlcs_to_fail = Vec ::new ( ) ;
2018-04-04 11:56:54 -04:00
for htlc_update in htlc_updates . drain ( .. ) {
// Note that this *can* fail, though it should be due to rather-rare conditions on
// fee races with adding too many outputs which push our total payments just over
2019-01-24 16:41:51 +02:00
// the limit. In case it's less rare than I anticipate, we may want to revisit
// handling this case better and maybe fulfilling some of the HTLCs while attempting
2018-04-04 11:56:54 -04:00
// to rebalance channels.
2020-05-06 18:15:43 -04:00
match & htlc_update {
& HTLCUpdateAwaitingACK ::AddHTLC { amount_msat , cltv_expiry , ref payment_hash , ref source , ref onion_routing_packet , .. } = > {
2021-08-21 18:05:51 -04:00
match self . send_htlc ( amount_msat , * payment_hash , cltv_expiry , source . clone ( ) , onion_routing_packet . clone ( ) , logger ) {
2020-05-06 18:15:43 -04:00
Ok ( update_add_msg_option ) = > update_add_htlcs . push ( update_add_msg_option . unwrap ( ) ) ,
Err ( e ) = > {
match e {
ChannelError ::Ignore ( ref msg ) = > {
2021-06-22 03:35:52 +00:00
log_info! ( logger , " Failed to send HTLC with payment_hash {} due to {} in channel {} " ,
log_bytes! ( payment_hash . 0 ) , msg , log_bytes! ( self . channel_id ( ) ) ) ;
2020-05-06 18:15:43 -04:00
// If we fail to send here, then this HTLC should
// be failed backwards. Failing to send here
// indicates that this HTLC may keep being put back
// into the holding cell without ever being
// successfully forwarded/failed/fulfilled, causing
// our counterparty to eventually close on us.
htlcs_to_fail . push ( ( source . clone ( ) , * payment_hash ) ) ;
} ,
_ = > {
panic! ( " Got a non-IgnoreError action trying to send holding cell HTLC " ) ;
} ,
2018-04-04 11:56:54 -04:00
}
}
2020-05-06 18:15:43 -04:00
}
} ,
& HTLCUpdateAwaitingACK ::ClaimHTLC { ref payment_preimage , htlc_id , .. } = > {
2021-07-15 21:56:42 +00:00
// If an HTLC claim was previously added to the holding cell (via
// `get_update_fulfill_htlc`, then generating the claim message itself must
// not fail - any in between attempts to claim the HTLC will have resulted
// in it hitting the holding cell again and we cannot change the state of a
// holding cell HTLC from fulfill to anything else.
let ( update_fulfill_msg_option , mut additional_monitor_update ) =
2021-07-16 02:16:50 +00:00
if let UpdateFulfillFetch ::NewClaim { msg , monitor_update , .. } = self . get_update_fulfill_htlc ( htlc_id , * payment_preimage , logger ) {
2021-07-15 21:56:42 +00:00
( msg , monitor_update )
} else { unreachable! ( ) } ;
update_fulfill_htlcs . push ( update_fulfill_msg_option . unwrap ( ) ) ;
monitor_update . updates . append ( & mut additional_monitor_update . updates ) ;
2020-05-06 18:15:43 -04:00
} ,
& HTLCUpdateAwaitingACK ::FailHTLC { htlc_id , ref err_packet } = > {
2021-04-20 21:35:11 +00:00
match self . get_update_fail_htlc ( htlc_id , err_packet . clone ( ) , logger ) {
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
Ok ( update_fail_msg_option ) = > {
// If an HTLC failure was previously added to the holding cell (via
// `get_update_fail_htlc`) then generating the fail message itself
// must not fail - we should never end up in a state where we
// double-fail an HTLC or fail-then-claim an HTLC as it indicates
// we didn't wait for a full revocation before failing.
update_fail_htlcs . push ( update_fail_msg_option . unwrap ( ) )
} ,
2020-05-06 18:15:43 -04:00
Err ( e ) = > {
if let ChannelError ::Ignore ( _ ) = e { }
else {
panic! ( " Got a non-IgnoreError action trying to fail holding cell HTLC " ) ;
2018-04-04 11:56:54 -04:00
}
}
2019-01-21 11:44:59 -05:00
}
2020-05-06 18:15:43 -04:00
} ,
2018-04-04 11:56:54 -04:00
}
}
2020-05-06 18:15:43 -04:00
if update_add_htlcs . is_empty ( ) & & update_fulfill_htlcs . is_empty ( ) & & update_fail_htlcs . is_empty ( ) & & self . holding_cell_update_fee . is_none ( ) {
return Ok ( ( None , htlcs_to_fail ) ) ;
}
2021-07-12 15:39:27 +00:00
let update_fee = if let Some ( feerate ) = self . holding_cell_update_fee . take ( ) {
2021-08-21 18:05:51 -04:00
self . send_update_fee ( feerate , logger )
2020-05-06 18:15:43 -04:00
} else {
None
} ;
2020-02-07 20:08:31 -05:00
2020-05-06 18:15:43 -04:00
let ( commitment_signed , mut additional_update ) = self . send_commitment_no_status_check ( logger ) ? ;
// send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
// but we want them to be strictly increasing by one, so reset it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
2020-02-07 20:08:31 -05:00
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed. " ,
log_bytes! ( self . channel_id ( ) ) , if update_fee . is_some ( ) { " a fee update, " } else { " " } ,
update_add_htlcs . len ( ) , update_fulfill_htlcs . len ( ) , update_fail_htlcs . len ( ) ) ;
2020-05-06 18:15:43 -04:00
Ok ( ( Some ( ( msgs ::CommitmentUpdate {
update_add_htlcs ,
update_fulfill_htlcs ,
update_fail_htlcs ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
2020-10-06 16:47:23 -07:00
update_fee ,
2020-05-06 18:15:43 -04:00
commitment_signed ,
} , monitor_update ) ) , htlcs_to_fail ) )
2018-04-04 11:56:54 -04:00
} else {
2020-05-06 18:15:43 -04:00
Ok ( ( None , Vec ::new ( ) ) )
2018-04-04 11:56:54 -04:00
}
2017-12-25 01:05:27 -05:00
}
/// Handles receiving a remote's revoke_and_ack. Note that we may return a new
/// commitment_signed message here in case we had pending outbound HTLCs to add which were
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
2021-10-02 01:59:38 +00:00
pub fn revoke_and_ack < L : Deref > ( & mut self , msg : & msgs ::RevokeAndACK , logger : & L ) -> Result < RAAUpdates , ChannelError >
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
where L ::Target : Logger ,
2020-02-27 11:33:03 -05:00
{
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got revoke/ACK message when channel was not in an operational state " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent revoke_and_ack when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2018-10-30 16:25:38 -04:00
if self . channel_state & BOTH_SIDES_SHUTDOWN_MASK = = BOTH_SIDES_SHUTDOWN_MASK & & self . last_sent_closing_fee . is_some ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent revoke_and_ack after we'd started exchanging closing_signeds " . to_owned ( ) ) ) ;
2018-10-30 16:25:38 -04:00
}
2018-10-17 18:19:55 -04:00
2021-08-09 16:48:06 +02:00
let secret = secp_check! ( SecretKey ::from_slice ( & msg . per_commitment_secret ) , " Peer provided an invalid per_commitment_secret " . to_owned ( ) ) ;
2020-06-08 20:47:55 -04:00
if let Some ( counterparty_prev_commitment_point ) = self . counterparty_prev_commitment_point {
2021-08-09 16:48:06 +02:00
if PublicKey ::from_secret_key ( & self . secp_ctx , & secret ) ! = counterparty_prev_commitment_point {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got a revoke commitment secret which didn't correspond to their current pubkey " . to_owned ( ) ) ) ;
2018-04-08 14:24:12 -04:00
}
2017-12-25 01:05:27 -05:00
}
2020-01-12 18:04:40 -05:00
if self . channel_state & ChannelState ::AwaitingRemoteRevoke as u32 = = 0 {
// Our counterparty seems to have burned their coins to us (by revoking a state when we
// haven't given them a new commitment transaction to broadcast). We should probably
// take advantage of this by updating our channel monitor, sending them an error, and
// waiting for them to broadcast their latest (now-revoked claim). But, that would be a
// lot of work, and there's some chance this is all a misunderstanding anyway.
// We have to do *something*, though, since our signer may get mad at us for otherwise
// jumping a remote commitment number, so best to just force-close and move on.
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Received an unexpected revoke_and_ack " . to_owned ( ) ) ) ;
2020-01-12 18:04:40 -05:00
}
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
{
* self . next_local_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) = None ;
* self . next_remote_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) = None ;
}
2021-08-09 16:48:06 +02:00
self . holder_signer . validate_counterparty_revocation (
self . cur_counterparty_commitment_transaction_number + 1 ,
& secret
2021-08-20 21:57:18 +02:00
) . map_err ( | _ | ChannelError ::Close ( " Failed to validate revocation from peer " . to_owned ( ) ) ) ? ;
2020-06-08 20:47:55 -04:00
self . commitment_secrets . provide_secret ( self . cur_counterparty_commitment_transaction_number + 1 , msg . per_commitment_secret )
2020-07-13 13:16:32 +09:00
. map_err ( | _ | ChannelError ::Close ( " Previous secrets did not match new one " . to_owned ( ) ) ) ? ;
2020-02-07 20:08:31 -05:00
self . latest_monitor_update_id + = 1 ;
let mut monitor_update = ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
updates : vec ! [ ChannelMonitorUpdateStep ::CommitmentSecret {
2020-06-08 20:47:55 -04:00
idx : self . cur_counterparty_commitment_transaction_number + 1 ,
2020-02-07 20:08:31 -05:00
secret : msg . per_commitment_secret ,
} ] ,
} ;
2017-12-25 01:05:27 -05:00
// Update state now that we've passed all the can-fail calls...
// (note that we may still fail to generate the new commitment_signed message, but that's
// OK, we step the channel here and *then* if the new generation fails we can fail the
// channel based on that, but stepping stuff here should be safe either way.
self . channel_state & = ! ( ChannelState ::AwaitingRemoteRevoke as u32 ) ;
2020-06-08 20:47:55 -04:00
self . counterparty_prev_commitment_point = self . counterparty_cur_commitment_point ;
self . counterparty_cur_commitment_point = Some ( msg . next_per_commitment_point ) ;
self . cur_counterparty_commitment_transaction_number - = 1 ;
2018-04-04 11:56:54 -04:00
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
if self . announcement_sigs_state = = AnnouncementSigsState ::Committed {
self . announcement_sigs_state = AnnouncementSigsState ::PeerReceived ;
}
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Updating HTLCs on receipt of RAA in channel {}... " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-04-04 11:56:54 -04:00
let mut to_forward_infos = Vec ::new ( ) ;
let mut revoked_htlcs = Vec ::new ( ) ;
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
let mut finalized_claimed_htlcs = Vec ::new ( ) ;
2018-08-26 16:34:47 -04:00
let mut update_fail_htlcs = Vec ::new ( ) ;
let mut update_fail_malformed_htlcs = Vec ::new ( ) ;
2018-04-04 11:56:54 -04:00
let mut require_commitment = false ;
let mut value_to_self_msat_diff : i64 = 0 ;
2019-01-14 17:33:43 -05:00
{
// Take references explicitly so that we can hold multiple references to self.
let pending_inbound_htlcs : & mut Vec < _ > = & mut self . pending_inbound_htlcs ;
let pending_outbound_htlcs : & mut Vec < _ > = & mut self . pending_outbound_htlcs ;
// We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
pending_inbound_htlcs . retain ( | htlc | {
if let & InboundHTLCState ::LocalRemoved ( ref reason ) = & htlc . state {
log_trace! ( logger , " ...removing inbound LocalRemoved {} " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
if let & InboundHTLCRemovalReason ::Fulfill ( _ ) = reason {
value_to_self_msat_diff + = htlc . amount_msat as i64 ;
}
false
} else { true }
} ) ;
pending_outbound_htlcs . retain ( | htlc | {
2022-01-18 14:17:52 +01:00
if let & OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( ref outcome ) = & htlc . state {
2019-01-14 17:33:43 -05:00
log_trace! ( logger , " ...removing outbound AwaitingRemovedRemoteRevoke {} " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2022-01-18 14:17:52 +01:00
if let OutboundHTLCOutcome ::Failure ( reason ) = outcome . clone ( ) { // We really want take() here, but, again, non-mut ref :(
2019-01-14 17:33:43 -05:00
revoked_htlcs . push ( ( htlc . source . clone ( ) , htlc . payment_hash , reason ) ) ;
} else {
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
finalized_claimed_htlcs . push ( htlc . source . clone ( ) ) ;
2019-01-14 17:33:43 -05:00
// They fulfilled, so we sent them money
value_to_self_msat_diff - = htlc . amount_msat as i64 ;
}
false
} else { true }
} ) ;
for htlc in pending_inbound_htlcs . iter_mut ( ) {
let swap = if let & InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( _ ) = & htlc . state {
true
} else if let & InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( _ ) = & htlc . state {
true
} else { false } ;
if swap {
let mut state = InboundHTLCState ::Committed ;
mem ::swap ( & mut state , & mut htlc . state ) ;
if let InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( forward_info ) = state {
2021-06-27 03:20:36 +00:00
log_trace! ( logger , " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2019-01-14 17:33:43 -05:00
htlc . state = InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( forward_info ) ;
require_commitment = true ;
} else if let InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( forward_info ) = state {
match forward_info {
PendingHTLCStatus ::Fail ( fail_msg ) = > {
2021-06-27 03:20:36 +00:00
log_trace! ( logger , " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2019-01-14 17:33:43 -05:00
require_commitment = true ;
match fail_msg {
HTLCFailureMsg ::Relay ( msg ) = > {
htlc . state = InboundHTLCState ::LocalRemoved ( InboundHTLCRemovalReason ::FailRelay ( msg . reason . clone ( ) ) ) ;
update_fail_htlcs . push ( msg )
} ,
HTLCFailureMsg ::Malformed ( msg ) = > {
htlc . state = InboundHTLCState ::LocalRemoved ( InboundHTLCRemovalReason ::FailMalformed ( ( msg . sha256_of_onion , msg . failure_code ) ) ) ;
update_fail_malformed_htlcs . push ( msg )
} ,
}
} ,
PendingHTLCStatus ::Forward ( forward_info ) = > {
2021-06-27 03:20:36 +00:00
log_trace! ( logger , " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2019-01-14 17:33:43 -05:00
to_forward_infos . push ( ( forward_info , htlc . htlc_id ) ) ;
htlc . state = InboundHTLCState ::Committed ;
2018-10-15 14:38:19 -04:00
}
2018-08-26 16:34:47 -04:00
}
2018-08-21 16:57:41 -04:00
}
}
2018-09-09 12:53:57 -04:00
}
2019-01-14 17:33:43 -05:00
for htlc in pending_outbound_htlcs . iter_mut ( ) {
if let OutboundHTLCState ::LocalAnnounced ( _ ) = htlc . state {
log_trace! ( logger , " ...promoting outbound LocalAnnounced {} to Committed " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
htlc . state = OutboundHTLCState ::Committed ;
2019-03-03 14:02:51 -05:00
}
2022-01-18 14:17:52 +01:00
if let & mut OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( ref mut outcome ) = & mut htlc . state {
2019-01-14 17:33:43 -05:00
log_trace! ( logger , " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2022-01-18 14:17:52 +01:00
// Grab the preimage, if it exists, instead of cloning
let mut reason = OutboundHTLCOutcome ::Success ( None ) ;
mem ::swap ( outcome , & mut reason ) ;
htlc . state = OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( reason ) ;
2019-01-14 17:33:43 -05:00
require_commitment = true ;
}
2017-12-25 01:05:27 -05:00
}
}
2018-04-04 11:56:54 -04:00
self . value_to_self_msat = ( self . value_to_self_msat as i64 + value_to_self_msat_diff ) as u64 ;
2017-12-25 01:05:27 -05:00
2021-07-12 15:39:27 +00:00
if let Some ( ( feerate , update_state ) ) = self . pending_update_fee {
match update_state {
FeeUpdateState ::Outbound = > {
debug_assert! ( self . is_outbound ( ) ) ;
log_trace! ( logger , " ...promoting outbound fee update {} to Committed " , feerate ) ;
self . feerate_per_kw = feerate ;
self . pending_update_fee = None ;
} ,
FeeUpdateState ::RemoteAnnounced = > { debug_assert! ( ! self . is_outbound ( ) ) ; } ,
FeeUpdateState ::AwaitingRemoteRevokeToAnnounce = > {
debug_assert! ( ! self . is_outbound ( ) ) ;
log_trace! ( logger , " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed " , feerate ) ;
2018-09-26 19:54:28 -04:00
require_commitment = true ;
2021-07-12 15:39:27 +00:00
self . feerate_per_kw = feerate ;
2018-09-26 19:54:28 -04:00
self . pending_update_fee = None ;
2021-07-12 15:39:27 +00:00
} ,
2018-09-26 19:54:28 -04:00
}
}
2018-10-17 18:19:55 -04:00
if ( self . channel_state & ChannelState ::MonitorUpdateFailed as u32 ) = = ChannelState ::MonitorUpdateFailed as u32 {
// We can't actually generate a new commitment transaction (incl by freeing holding
// cells) while we can't update the monitor, so we just return what we have.
if require_commitment {
self . monitor_pending_commitment_signed = true ;
2019-01-11 13:08:56 -05:00
// When the monitor updating is restored we'll call get_last_commitment_update(),
// which does not update state, but we're definitely now awaiting a remote revoke
// before we can step forward any more, so set it here.
2020-03-02 12:55:53 -05:00
let ( _ , mut additional_update ) = self . send_commitment_no_status_check ( logger ) ? ;
2020-02-07 20:08:31 -05:00
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
2018-10-17 18:19:55 -04:00
}
self . monitor_pending_forwards . append ( & mut to_forward_infos ) ;
self . monitor_pending_failures . append ( & mut revoked_htlcs ) ;
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
self . monitor_pending_finalized_fulfills . append ( & mut finalized_claimed_htlcs ) ;
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply. " , log_bytes! ( self . channel_id ( ) ) ) ;
2021-10-02 01:59:38 +00:00
return Ok ( RAAUpdates {
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
commitment_update : None , finalized_claimed_htlcs : Vec ::new ( ) ,
2021-10-02 01:59:38 +00:00
accepted_htlcs : Vec ::new ( ) , failed_htlcs : Vec ::new ( ) ,
monitor_update ,
holding_cell_failed_htlcs : Vec ::new ( )
} ) ;
2018-10-17 18:19:55 -04:00
}
2020-03-02 12:55:53 -05:00
match self . free_holding_cell_htlcs ( logger ) ? {
2020-05-06 18:15:43 -04:00
( Some ( ( mut commitment_update , mut additional_update ) ) , htlcs_to_fail ) = > {
2020-02-07 20:08:31 -05:00
commitment_update . update_fail_htlcs . reserve ( update_fail_htlcs . len ( ) ) ;
2018-08-26 16:34:47 -04:00
for fail_msg in update_fail_htlcs . drain ( .. ) {
2020-02-07 20:08:31 -05:00
commitment_update . update_fail_htlcs . push ( fail_msg ) ;
2018-08-21 16:57:41 -04:00
}
2020-02-07 20:08:31 -05:00
commitment_update . update_fail_malformed_htlcs . reserve ( update_fail_malformed_htlcs . len ( ) ) ;
2018-08-26 16:34:47 -04:00
for fail_msg in update_fail_malformed_htlcs . drain ( .. ) {
2020-02-07 20:08:31 -05:00
commitment_update . update_fail_malformed_htlcs . push ( fail_msg ) ;
2018-08-26 16:34:47 -04:00
}
2020-02-07 20:08:31 -05:00
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
// strictly increasing by one, so decrement it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
2021-10-02 01:59:38 +00:00
Ok ( RAAUpdates {
commitment_update : Some ( commitment_update ) ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
finalized_claimed_htlcs ,
2021-10-02 01:59:38 +00:00
accepted_htlcs : to_forward_infos ,
failed_htlcs : revoked_htlcs ,
monitor_update ,
holding_cell_failed_htlcs : htlcs_to_fail
} )
2018-04-04 11:56:54 -04:00
} ,
2020-05-06 18:15:43 -04:00
( None , htlcs_to_fail ) = > {
2018-04-04 11:56:54 -04:00
if require_commitment {
2020-03-02 12:55:53 -05:00
let ( commitment_signed , mut additional_update ) = self . send_commitment_no_status_check ( logger ) ? ;
2020-02-07 20:08:31 -05:00
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self . latest_monitor_update_id = monitor_update . update_id ;
monitor_update . updates . append ( & mut additional_update . updates ) ;
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. " ,
log_bytes! ( self . channel_id ( ) ) , update_fail_htlcs . len ( ) + update_fail_malformed_htlcs . len ( ) ) ;
2021-10-02 01:59:38 +00:00
Ok ( RAAUpdates {
commitment_update : Some ( msgs ::CommitmentUpdate {
update_add_htlcs : Vec ::new ( ) ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_htlcs ,
update_fail_malformed_htlcs ,
update_fee : None ,
commitment_signed
} ) ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
finalized_claimed_htlcs ,
2021-10-02 01:59:38 +00:00
accepted_htlcs : to_forward_infos , failed_htlcs : revoked_htlcs ,
monitor_update , holding_cell_failed_htlcs : htlcs_to_fail
} )
2018-04-04 11:56:54 -04:00
} else {
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Received a valid revoke_and_ack for channel {} with no reply necessary. " , log_bytes! ( self . channel_id ( ) ) ) ;
2021-10-02 01:59:38 +00:00
Ok ( RAAUpdates {
commitment_update : None ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
finalized_claimed_htlcs ,
2021-10-02 01:59:38 +00:00
accepted_htlcs : to_forward_infos , failed_htlcs : revoked_htlcs ,
monitor_update , holding_cell_failed_htlcs : htlcs_to_fail
} )
2018-04-04 11:56:54 -04:00
}
}
}
2018-09-26 19:54:28 -04:00
}
/// Adds a pending update to this channel. See the doc for send_htlc for
/// further details on the optionness of the return value.
2021-08-21 18:05:51 -04:00
/// If our balance is too low to cover the cost of the next commitment transaction at the
/// new feerate, the update is cancelled.
2018-09-26 19:54:28 -04:00
/// You MUST call send_commitment prior to any other calls on this Channel
2021-08-21 18:05:51 -04:00
fn send_update_fee < L : Deref > ( & mut self , feerate_per_kw : u32 , logger : & L ) -> Option < msgs ::UpdateFee > where L ::Target : Logger {
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2018-09-26 19:54:28 -04:00
panic! ( " Cannot send fee from inbound channel " ) ;
}
if ! self . is_usable ( ) {
panic! ( " Cannot update fee until channel is fully established and we haven't started shutting down " ) ;
}
2018-10-15 05:37:21 +09:00
if ! self . is_live ( ) {
2018-10-17 18:19:55 -04:00
panic! ( " Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this) " ) ;
2018-10-15 05:37:21 +09:00
}
2018-09-26 19:54:28 -04:00
2021-08-21 18:05:51 -04:00
// Before proposing a feerate update, check that we can actually afford the new fee.
2021-08-21 18:52:05 -04:00
let inbound_stats = self . get_inbound_pending_htlc_stats ( Some ( feerate_per_kw ) ) ;
let outbound_stats = self . get_outbound_pending_htlc_stats ( Some ( feerate_per_kw ) ) ;
2021-08-21 18:05:51 -04:00
let keys = if let Ok ( keys ) = self . build_holder_transaction_keys ( self . cur_holder_commitment_transaction_number ) { keys } else { return None ; } ;
2021-11-18 21:23:41 -05:00
let commitment_stats = self . build_commitment_transaction ( self . cur_holder_commitment_transaction_number , & keys , true , true , logger ) ;
2022-01-04 15:54:54 -08:00
let buffer_fee_msat = Channel ::< Signer > ::commit_tx_fee_sat ( feerate_per_kw , commitment_stats . num_nondust_htlcs + outbound_stats . on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize , self . opt_anchors ( ) ) * 1000 ;
2021-11-18 21:23:41 -05:00
let holder_balance_msat = commitment_stats . local_balance_msat - outbound_stats . holding_cell_msat ;
if holder_balance_msat < buffer_fee_msat + self . counterparty_selected_channel_reserve_satoshis . unwrap ( ) * 1000 {
2021-08-21 18:05:51 -04:00
//TODO: auto-close after a number of failures?
log_debug! ( logger , " Cannot afford to send new feerate at {} " , feerate_per_kw ) ;
return None ;
}
2021-08-21 18:52:05 -04:00
// Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
let holder_tx_dust_exposure = inbound_stats . on_holder_tx_dust_exposure_msat + outbound_stats . on_holder_tx_dust_exposure_msat ;
let counterparty_tx_dust_exposure = inbound_stats . on_counterparty_tx_dust_exposure_msat + outbound_stats . on_counterparty_tx_dust_exposure_msat ;
if holder_tx_dust_exposure > self . get_max_dust_htlc_exposure_msat ( ) {
log_debug! ( logger , " Cannot afford to send new feerate at {} without infringing max dust htlc exposure " , feerate_per_kw ) ;
return None ;
}
if counterparty_tx_dust_exposure > self . get_max_dust_htlc_exposure_msat ( ) {
log_debug! ( logger , " Cannot afford to send new feerate at {} without infringing max dust htlc exposure " , feerate_per_kw ) ;
return None ;
}
Automatically update fees on outbound channels as fees change
Previously we'd been expecting to implement anchor outputs before
shipping 0.1, thus reworking our channel fee update process
entirely and leaving it as a future task. However, due to the
difficulty of working with on-chain anchor pools, we are now likely
to ship 0.1 without requiring anchor outputs.
In either case, there isn't a lot of reason to require that users
call an explicit "prevailing feerates have changed" function now
that we have a timer method which is called regularly. Further, we
really should be the ones deciding on the channel feerate in terms
of the users' FeeEstimator, instead of requiring users implement a
second fee-providing interface by calling an update_fee method.
Finally, there is no reason for an update_fee method to be
channel-specific, as we should be updating all (outbound) channel
fees at once.
Thus, we move the update_fee handling to the background, calling it
on the regular 1-minute timer. We also update the regular 1-minute
timer to fire on startup as well as every minute to ensure we get
fee updates even on mobile clients that are rarely, if ever, open
for more than one minute.
2021-06-28 03:41:44 +00:00
if ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ) ! = 0 {
2018-09-26 19:54:28 -04:00
self . holding_cell_update_fee = Some ( feerate_per_kw ) ;
return None ;
}
debug_assert! ( self . pending_update_fee . is_none ( ) ) ;
2021-07-12 15:39:27 +00:00
self . pending_update_fee = Some ( ( feerate_per_kw , FeeUpdateState ::Outbound ) ) ;
2018-09-26 19:54:28 -04:00
Some ( msgs ::UpdateFee {
channel_id : self . channel_id ,
2020-10-06 16:47:23 -07:00
feerate_per_kw ,
2018-09-26 19:54:28 -04:00
} )
}
2020-06-15 17:28:01 -04:00
pub fn send_update_fee_and_commit < L : Deref > ( & mut self , feerate_per_kw : u32 , logger : & L ) -> Result < Option < ( msgs ::UpdateFee , msgs ::CommitmentSigned , ChannelMonitorUpdate ) > , ChannelError > where L ::Target : Logger {
2021-08-21 18:05:51 -04:00
match self . send_update_fee ( feerate_per_kw , logger ) {
2018-09-26 19:54:28 -04:00
Some ( update_fee ) = > {
2020-03-02 12:55:53 -05:00
let ( commitment_signed , monitor_update ) = self . send_commitment_no_status_check ( logger ) ? ;
2018-09-26 19:54:28 -04:00
Ok ( Some ( ( update_fee , commitment_signed , monitor_update ) ) )
} ,
None = > Ok ( None )
}
2017-12-25 01:05:27 -05:00
}
2020-11-20 15:49:53 -05:00
/// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
/// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
/// resent.
2018-09-08 16:02:46 -04:00
/// No further message handling calls may be made until a channel_reestablish dance has
/// completed.
2020-11-20 15:49:53 -05:00
pub fn remove_uncommitted_htlcs_and_mark_paused < L : Deref > ( & mut self , logger : & L ) where L ::Target : Logger {
2018-09-08 16:01:29 -04:00
assert_eq! ( self . channel_state & ChannelState ::ShutdownComplete as u32 , 0 ) ;
2018-09-07 15:11:52 -04:00
if self . channel_state < ChannelState ::FundingSent as u32 {
self . channel_state = ChannelState ::ShutdownComplete as u32 ;
2020-11-20 15:49:53 -05:00
return ;
2018-09-07 15:11:52 -04:00
}
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
if self . announcement_sigs_state = = AnnouncementSigsState ::MessageSent | | self . announcement_sigs_state = = AnnouncementSigsState ::Committed {
self . announcement_sigs_state = AnnouncementSigsState ::NotSent ;
}
2018-11-01 17:17:28 -04:00
// Upon reconnect we have to start the closing_signed dance over, but shutdown messages
// will be retransmitted.
self . last_sent_closing_fee = None ;
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
self . pending_counterparty_closing_signed = None ;
2021-07-20 03:19:01 +00:00
self . closing_fee_limits = None ;
2018-09-07 15:11:52 -04:00
2018-09-08 16:02:46 -04:00
let mut inbound_drop_count = 0 ;
2018-09-05 18:32:55 -04:00
self . pending_inbound_htlcs . retain ( | htlc | {
match htlc . state {
2018-10-15 14:38:19 -04:00
InboundHTLCState ::RemoteAnnounced ( _ ) = > {
2018-09-05 18:32:55 -04:00
// They sent us an update_add_htlc but we never got the commitment_signed.
// We'll tell them what commitment_signed we're expecting next and they'll drop
// this HTLC accordingly
2018-09-08 16:02:46 -04:00
inbound_drop_count + = 1 ;
2018-09-05 18:32:55 -04:00
false
} ,
2018-10-15 14:38:19 -04:00
InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( _ ) | InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( _ ) = > {
2018-09-05 18:32:55 -04:00
// We received a commitment_signed updating this HTLC and (at least hopefully)
// sent a revoke_and_ack (which we can re-transmit) and have heard nothing
// in response to it yet, so don't touch it.
true
} ,
InboundHTLCState ::Committed = > true ,
2018-10-15 14:38:19 -04:00
InboundHTLCState ::LocalRemoved ( _ ) = > {
2018-09-05 18:32:55 -04:00
// We (hopefully) sent a commitment_signed updating this HTLC (which we can
// re-transmit if needed) and they may have even sent a revoke_and_ack back
// (that we missed). Keep this around for now and if they tell us they missed
// the commitment_signed we can re-transmit the update then.
true
} ,
}
} ) ;
2020-06-08 20:47:55 -04:00
self . next_counterparty_htlc_id - = inbound_drop_count ;
2018-09-05 18:32:55 -04:00
2021-07-12 15:39:27 +00:00
if let Some ( ( _ , update_state ) ) = self . pending_update_fee {
if update_state = = FeeUpdateState ::RemoteAnnounced {
debug_assert! ( ! self . is_outbound ( ) ) ;
self . pending_update_fee = None ;
}
}
2018-09-05 18:32:55 -04:00
for htlc in self . pending_outbound_htlcs . iter_mut ( ) {
2019-03-03 14:02:51 -05:00
if let OutboundHTLCState ::RemoteRemoved ( _ ) = htlc . state {
2018-09-05 18:32:55 -04:00
// They sent us an update to remove this but haven't yet sent the corresponding
// commitment_signed, we need to move it back to Committed and they can re-send
// the update upon reconnection.
htlc . state = OutboundHTLCState ::Committed ;
}
}
2018-09-08 16:02:46 -04:00
self . channel_state | = ChannelState ::PeerDisconnected as u32 ;
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Peer disconnection resulted in {} remote-announced HTLC drops on channel {} " , inbound_drop_count , log_bytes! ( self . channel_id ( ) ) ) ;
2018-09-05 18:32:55 -04:00
}
2018-10-17 18:19:55 -04:00
/// Indicates that a ChannelMonitor update failed to be stored by the client and further
/// updates are partially paused.
/// This must be called immediately after the call which generated the ChannelMonitor update
2019-03-05 15:36:11 -05:00
/// which failed. The messages which were generated from that call which generated the
/// monitor update failure must *not* have been sent to the remote end, and must instead
/// have been dropped. They will be regenerated when monitor_updating_restored is called.
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
pub fn monitor_update_failed ( & mut self , resend_raa : bool , resend_commitment : bool ,
2022-05-30 14:39:04 -07:00
resend_channel_ready : bool , mut pending_forwards : Vec < ( PendingHTLCInfo , u64 ) > ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
mut pending_fails : Vec < ( HTLCSource , PaymentHash , HTLCFailReason ) > ,
mut pending_finalized_claimed_htlcs : Vec < HTLCSource >
) {
2021-09-03 17:24:01 -04:00
self . monitor_pending_revoke_and_ack | = resend_raa ;
self . monitor_pending_commitment_signed | = resend_commitment ;
2022-05-30 14:39:04 -07:00
self . monitor_pending_channel_ready | = resend_channel_ready ;
2021-09-03 17:24:01 -04:00
self . monitor_pending_forwards . append ( & mut pending_forwards ) ;
self . monitor_pending_failures . append ( & mut pending_fails ) ;
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
self . monitor_pending_finalized_fulfills . append ( & mut pending_finalized_claimed_htlcs ) ;
2018-10-17 18:19:55 -04:00
self . channel_state | = ChannelState ::MonitorUpdateFailed as u32 ;
}
/// Indicates that the latest ChannelMonitor update has been committed by the client
/// successfully and we should restore normal operation. Returns messages which should be sent
/// to the remote side.
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub fn monitor_updating_restored < L : Deref > ( & mut self , logger : & L , node_pk : PublicKey , genesis_block_hash : BlockHash , best_block_height : u32 ) -> MonitorRestoreUpdates where L ::Target : Logger {
2018-10-17 18:19:55 -04:00
assert_eq! ( self . channel_state & ChannelState ::MonitorUpdateFailed as u32 , ChannelState ::MonitorUpdateFailed as u32 ) ;
self . channel_state & = ! ( ChannelState ::MonitorUpdateFailed as u32 ) ;
2022-02-01 21:57:01 +00:00
// If we're past (or at) the FundingSent stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
// first received the funding_signed.
let mut funding_broadcastable =
if self . is_outbound ( ) & & self . channel_state & ! MULTI_STATE_FLAGS > = ChannelState ::FundingSent as u32 {
self . funding_transaction . take ( )
} else { None } ;
// That said, if the funding transaction is already confirmed (ie we're active with a
// minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
if self . channel_state & ! MULTI_STATE_FLAGS > = ChannelState ::ChannelFunded as u32 & & self . minimum_depth ! = Some ( 0 ) {
funding_broadcastable = None ;
}
2018-10-17 18:19:55 -04:00
2021-03-26 18:07:24 -04:00
// We will never broadcast the funding transaction when we're in MonitorUpdateFailed (and
// we assume the user never directly broadcasts the funding transaction and waits for us to
2022-05-30 14:39:04 -07:00
// do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
2022-03-04 21:24:39 +00:00
// * an inbound channel that failed to persist the monitor on funding_created and we got
// the funding transaction confirmed before the monitor was persisted, or
2022-05-30 14:39:04 -07:00
// * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
let channel_ready = if self . monitor_pending_channel_ready {
2022-02-01 21:57:01 +00:00
assert! ( ! self . is_outbound ( ) | | self . minimum_depth = = Some ( 0 ) ,
" Funding transaction broadcast by the local client before it should have - LDK didn't do it! " ) ;
2022-05-30 14:39:04 -07:00
self . monitor_pending_channel_ready = false ;
2021-02-20 10:05:55 -05:00
let next_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number , & self . secp_ctx ) ;
2022-05-30 14:39:04 -07:00
Some ( msgs ::ChannelReady {
2019-07-29 13:45:35 -04:00
channel_id : self . channel_id ( ) ,
2020-10-06 16:47:23 -07:00
next_per_commitment_point ,
2022-02-15 23:27:07 +00:00
short_channel_id_alias : Some ( self . outbound_scid_alias ) ,
2019-07-29 13:45:35 -04:00
} )
} else { None } ;
2021-12-07 19:11:18 +00:00
let announcement_sigs = self . get_announcement_sigs ( node_pk , genesis_block_hash , best_block_height , logger ) ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
2021-10-10 23:56:11 +00:00
let mut accepted_htlcs = Vec ::new ( ) ;
mem ::swap ( & mut accepted_htlcs , & mut self . monitor_pending_forwards ) ;
let mut failed_htlcs = Vec ::new ( ) ;
mem ::swap ( & mut failed_htlcs , & mut self . monitor_pending_failures ) ;
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
let mut finalized_claimed_htlcs = Vec ::new ( ) ;
mem ::swap ( & mut finalized_claimed_htlcs , & mut self . monitor_pending_finalized_fulfills ) ;
2018-10-17 18:19:55 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) ! = 0 {
self . monitor_pending_revoke_and_ack = false ;
self . monitor_pending_commitment_signed = false ;
2021-10-10 23:56:11 +00:00
return MonitorRestoreUpdates {
raa : None , commitment_update : None , order : RAACommitmentOrder ::RevokeAndACKFirst ,
2022-05-30 14:39:04 -07:00
accepted_htlcs , failed_htlcs , finalized_claimed_htlcs , funding_broadcastable , channel_ready , announcement_sigs
2021-10-10 23:56:11 +00:00
} ;
2018-10-17 18:19:55 -04:00
}
let raa = if self . monitor_pending_revoke_and_ack {
Some ( self . get_last_revoke_and_ack ( ) )
} else { None } ;
let commitment_update = if self . monitor_pending_commitment_signed {
2020-03-02 12:55:53 -05:00
Some ( self . get_last_commitment_update ( logger ) )
2018-10-17 18:19:55 -04:00
} else { None } ;
self . monitor_pending_revoke_and_ack = false ;
self . monitor_pending_commitment_signed = false ;
2019-03-05 15:36:11 -05:00
let order = self . resend_order . clone ( ) ;
2021-06-22 03:35:52 +00:00
log_debug! ( logger , " Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first " ,
log_bytes! ( self . channel_id ( ) ) , if funding_broadcastable . is_some ( ) { " a funding broadcastable, " } else { " " } ,
if commitment_update . is_some ( ) { " a " } else { " no " } , if raa . is_some ( ) { " an " } else { " no " } ,
2019-03-05 15:35:40 -05:00
match order { RAACommitmentOrder ::CommitmentFirst = > " commitment " , RAACommitmentOrder ::RevokeAndACKFirst = > " RAA " } ) ;
2021-10-10 23:56:11 +00:00
MonitorRestoreUpdates {
2022-05-30 14:39:04 -07:00
raa , commitment_update , order , accepted_htlcs , failed_htlcs , finalized_claimed_htlcs , funding_broadcastable , channel_ready , announcement_sigs
2021-10-10 23:56:11 +00:00
}
2018-10-17 18:19:55 -04:00
}
2022-06-29 15:13:40 +02:00
pub fn update_fee < F : Deref > ( & mut self , fee_estimator : & LowerBoundedFeeEstimator < F > , msg : & msgs ::UpdateFee ) -> Result < ( ) , ChannelError >
2020-02-27 11:33:03 -05:00
where F ::Target : FeeEstimator
{
2020-10-15 13:45:18 +02:00
if self . is_outbound ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Non-funding remote tried to update channel fee " . to_owned ( ) ) ) ;
2018-04-04 11:56:54 -04:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent update_fee when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2021-02-16 16:30:08 -05:00
Channel ::< Signer > ::check_remote_fee ( fee_estimator , msg . feerate_per_kw ) ? ;
2021-08-21 18:52:05 -04:00
let feerate_over_dust_buffer = msg . feerate_per_kw > self . get_dust_buffer_feerate ( None ) ;
2021-06-30 03:16:01 +00:00
2021-07-12 15:39:27 +00:00
self . pending_update_fee = Some ( ( msg . feerate_per_kw , FeeUpdateState ::RemoteAnnounced ) ) ;
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2021-06-30 03:16:01 +00:00
// If the feerate has increased over the previous dust buffer (note that
// `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
// won't be pushed over our dust exposure limit by the feerate increase.
if feerate_over_dust_buffer {
2021-08-21 18:52:05 -04:00
let inbound_stats = self . get_inbound_pending_htlc_stats ( None ) ;
let outbound_stats = self . get_outbound_pending_htlc_stats ( None ) ;
2021-06-30 03:16:01 +00:00
let holder_tx_dust_exposure = inbound_stats . on_holder_tx_dust_exposure_msat + outbound_stats . on_holder_tx_dust_exposure_msat ;
let counterparty_tx_dust_exposure = inbound_stats . on_counterparty_tx_dust_exposure_msat + outbound_stats . on_counterparty_tx_dust_exposure_msat ;
if holder_tx_dust_exposure > self . get_max_dust_htlc_exposure_msat ( ) {
return Err ( ChannelError ::Close ( format! ( " Peer sent update_fee with a feerate ( {} ) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat) " ,
msg . feerate_per_kw , holder_tx_dust_exposure ) ) ) ;
}
if counterparty_tx_dust_exposure > self . get_max_dust_htlc_exposure_msat ( ) {
return Err ( ChannelError ::Close ( format! ( " Peer sent update_fee with a feerate ( {} ) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat) " ,
msg . feerate_per_kw , counterparty_tx_dust_exposure ) ) ) ;
}
}
2017-12-25 01:05:27 -05:00
Ok ( ( ) )
}
2018-10-17 10:35:33 -04:00
fn get_last_revoke_and_ack ( & self ) -> msgs ::RevokeAndACK {
2021-02-20 10:05:55 -05:00
let next_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number , & self . secp_ctx ) ;
let per_commitment_secret = self . holder_signer . release_commitment_secret ( self . cur_holder_commitment_transaction_number + 2 ) ;
2018-10-17 10:35:33 -04:00
msgs ::RevokeAndACK {
channel_id : self . channel_id ,
per_commitment_secret ,
next_per_commitment_point ,
}
}
2020-03-02 12:55:53 -05:00
fn get_last_commitment_update < L : Deref > ( & self , logger : & L ) -> msgs ::CommitmentUpdate where L ::Target : Logger {
2018-10-17 10:35:33 -04:00
let mut update_add_htlcs = Vec ::new ( ) ;
let mut update_fulfill_htlcs = Vec ::new ( ) ;
let mut update_fail_htlcs = Vec ::new ( ) ;
let mut update_fail_malformed_htlcs = Vec ::new ( ) ;
for htlc in self . pending_outbound_htlcs . iter ( ) {
if let & OutboundHTLCState ::LocalAnnounced ( ref onion_packet ) = & htlc . state {
update_add_htlcs . push ( msgs ::UpdateAddHTLC {
channel_id : self . channel_id ( ) ,
htlc_id : htlc . htlc_id ,
amount_msat : htlc . amount_msat ,
payment_hash : htlc . payment_hash ,
cltv_expiry : htlc . cltv_expiry ,
onion_routing_packet : ( * * onion_packet ) . clone ( ) ,
} ) ;
}
}
for htlc in self . pending_inbound_htlcs . iter ( ) {
if let & InboundHTLCState ::LocalRemoved ( ref reason ) = & htlc . state {
match reason {
& InboundHTLCRemovalReason ::FailRelay ( ref err_packet ) = > {
update_fail_htlcs . push ( msgs ::UpdateFailHTLC {
channel_id : self . channel_id ( ) ,
htlc_id : htlc . htlc_id ,
reason : err_packet . clone ( )
} ) ;
} ,
& InboundHTLCRemovalReason ::FailMalformed ( ( ref sha256_of_onion , ref failure_code ) ) = > {
update_fail_malformed_htlcs . push ( msgs ::UpdateFailMalformedHTLC {
channel_id : self . channel_id ( ) ,
htlc_id : htlc . htlc_id ,
sha256_of_onion : sha256_of_onion . clone ( ) ,
failure_code : failure_code . clone ( ) ,
} ) ;
} ,
& InboundHTLCRemovalReason ::Fulfill ( ref payment_preimage ) = > {
update_fulfill_htlcs . push ( msgs ::UpdateFulfillHTLC {
channel_id : self . channel_id ( ) ,
htlc_id : htlc . htlc_id ,
payment_preimage : payment_preimage . clone ( ) ,
} ) ;
} ,
}
}
}
2021-06-30 18:12:51 +00:00
let update_fee = if self . is_outbound ( ) & & self . pending_update_fee . is_some ( ) {
Some ( msgs ::UpdateFee {
channel_id : self . channel_id ( ) ,
2021-07-12 15:39:27 +00:00
feerate_per_kw : self . pending_update_fee . unwrap ( ) . 0 ,
2021-06-30 18:12:51 +00:00
} )
} else { None } ;
log_trace! ( logger , " Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds " ,
log_bytes! ( self . channel_id ( ) ) , if update_fee . is_some ( ) { " update_fee, " } else { " " } ,
update_add_htlcs . len ( ) , update_fulfill_htlcs . len ( ) , update_fail_htlcs . len ( ) , update_fail_malformed_htlcs . len ( ) ) ;
2018-10-17 10:35:33 -04:00
msgs ::CommitmentUpdate {
2021-06-30 18:12:51 +00:00
update_add_htlcs , update_fulfill_htlcs , update_fail_htlcs , update_fail_malformed_htlcs , update_fee ,
2020-03-02 12:55:53 -05:00
commitment_signed : self . send_commitment_no_state_update ( logger ) . expect ( " It looks like we failed to re-generate a commitment_signed we had previously sent? " ) . 0 ,
2018-10-17 10:35:33 -04:00
}
}
2018-09-08 16:02:46 -04:00
/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
2022-06-23 21:25:17 +00:00
///
/// Some links printed in log lines are included here to check them during build (when run with
/// `cargo doc --document-private-items`):
/// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
/// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub fn channel_reestablish < L : Deref > ( & mut self , msg : & msgs ::ChannelReestablish , logger : & L ,
node_pk : PublicKey , genesis_block_hash : BlockHash , best_block : & BestBlock )
-> Result < ReestablishResponses , ChannelError > where L ::Target : Logger {
2018-09-08 16:02:46 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = 0 {
2018-09-30 19:51:15 -04:00
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
// almost certainly indicates we are going to end up out-of-sync in some way, so we
// just close here instead of trying to recover.
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent a loose channel_reestablish not after reconnect " . to_owned ( ) ) ) ;
2018-09-08 16:02:46 -04:00
}
2018-11-26 18:31:51 -05:00
if msg . next_local_commitment_number > = INITIAL_COMMITMENT_NUMBER | | msg . next_remote_commitment_number > = INITIAL_COMMITMENT_NUMBER | |
msg . next_local_commitment_number = = 0 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent a garbage channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:02:46 -04:00
}
2019-07-10 16:39:10 -04:00
if msg . next_remote_commitment_number > 0 {
match msg . data_loss_protect {
OptionalField ::Present ( ref data_loss ) = > {
2021-02-20 10:05:55 -05:00
let expected_point = self . holder_signer . get_per_commitment_point ( INITIAL_COMMITMENT_NUMBER - msg . next_remote_commitment_number + 1 , & self . secp_ctx ) ;
2020-07-11 03:19:43 -07:00
let given_secret = SecretKey ::from_slice ( & data_loss . your_last_per_commitment_secret )
. map_err ( | _ | ChannelError ::Close ( " Peer sent a garbage channel_reestablish with unparseable secret key " . to_owned ( ) ) ) ? ;
if expected_point ! = PublicKey ::from_secret_key ( & self . secp_ctx , & given_secret ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided " . to_owned ( ) ) ) ;
2019-07-10 16:39:10 -04:00
}
2020-06-08 20:47:55 -04:00
if msg . next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self . cur_holder_commitment_transaction_number {
2022-06-23 21:25:17 +00:00
macro_rules ! log_and_panic {
( $err_msg : expr ) = > {
log_error! ( logger , $err_msg , log_bytes! ( self . channel_id ) , log_pubkey! ( self . counterparty_node_id ) ) ;
panic! ( $err_msg , log_bytes! ( self . channel_id ) , log_pubkey! ( self . counterparty_node_id ) ) ;
}
}
log_and_panic! ( " We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds. \n \
This implies you have restarted with lost ChannelMonitor and ChannelManager state , the first of which is a violation of the LDK chain ::Watch requirements . \ n \
More specifically , this means you have a bug in your implementation that can cause loss of funds , or you are running with an old backup , which is unsafe . \ n \
If you have restored from an old backup and wish to force - close channels and return to operation , you should start up , call \ n \
ChannelManager ::force_close_without_broadcasting_txn on channel { } with counterparty { } or \ n \
ChannelManager ::force_close_all_channels_without_broadcasting_txn , then reconnect to peer ( s ) . \ n \
Note that due to a long - standing bug in lnd you may have to reach out to peers running lnd - based nodes to ask them to manually force - close channels \ n \
See https ://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
2019-07-10 16:39:10 -04:00
}
} ,
OptionalField ::Absent = > { }
}
}
2022-05-04 09:23:05 +02:00
// Before we change the state of the channel, we check if the peer is sending a very old
// commitment transaction number, if yes we send a warning message.
let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self . cur_holder_commitment_transaction_number - 1 ;
if msg . next_remote_commitment_number + 1 < our_commitment_transaction {
return Err (
ChannelError ::Warn ( format! ( " Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected) " , msg . next_remote_commitment_number , our_commitment_transaction ) )
) ;
}
2018-09-08 16:02:46 -04:00
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
// remaining cases either succeed or ErrorMessage-fail).
self . channel_state & = ! ( ChannelState ::PeerDisconnected as u32 ) ;
2018-11-01 17:17:28 -04:00
let shutdown_msg = if self . channel_state & ( ChannelState ::LocalShutdownSent as u32 ) ! = 0 {
2021-07-26 14:04:44 -04:00
assert! ( self . shutdown_scriptpubkey . is_some ( ) ) ;
2018-11-01 17:17:28 -04:00
Some ( msgs ::Shutdown {
channel_id : self . channel_id ,
scriptpubkey : self . get_closing_scriptpubkey ( ) ,
} )
} else { None } ;
2021-12-07 19:11:18 +00:00
let announcement_sigs = self . get_announcement_sigs ( node_pk , genesis_block_hash , best_block . height ( ) , logger ) ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
2018-11-26 18:31:51 -05:00
if self . channel_state & ( ChannelState ::FundingSent as u32 ) = = ChannelState ::FundingSent as u32 {
2022-05-30 14:39:04 -07:00
// If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
if self . channel_state & ( ChannelState ::OurChannelReady as u32 ) = = 0 | |
2019-07-29 13:45:35 -04:00
self . channel_state & ( ChannelState ::MonitorUpdateFailed as u32 ) ! = 0 {
2018-11-26 18:31:51 -05:00
if msg . next_remote_commitment_number ! = 0 {
2022-05-30 14:39:04 -07:00
return Err ( ChannelError ::Close ( " Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet " . to_owned ( ) ) ) ;
2018-11-26 18:31:51 -05:00
}
// Short circuit the whole handler as there is nothing we can resend them
2021-11-13 22:47:42 +00:00
return Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready : None ,
2021-11-13 22:47:42 +00:00
raa : None , commitment_update : None , mon_update : None ,
order : RAACommitmentOrder ::CommitmentFirst ,
holding_cell_failed_htlcs : Vec ::new ( ) ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
} ) ;
2018-10-26 16:46:46 -04:00
}
2018-11-26 18:31:51 -05:00
2022-05-30 14:39:04 -07:00
// We have OurChannelReady set!
2021-02-20 10:05:55 -05:00
let next_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number , & self . secp_ctx ) ;
2021-11-13 22:47:42 +00:00
return Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready : Some ( msgs ::ChannelReady {
2021-11-13 22:47:42 +00:00
channel_id : self . channel_id ( ) ,
next_per_commitment_point ,
2022-02-15 23:27:07 +00:00
short_channel_id_alias : Some ( self . outbound_scid_alias ) ,
2021-11-13 22:47:42 +00:00
} ) ,
raa : None , commitment_update : None , mon_update : None ,
order : RAACommitmentOrder ::CommitmentFirst ,
holding_cell_failed_htlcs : Vec ::new ( ) ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
} ) ;
2018-10-26 16:46:46 -04:00
}
2020-06-08 20:47:55 -04:00
let required_revoke = if msg . next_remote_commitment_number + 1 = = INITIAL_COMMITMENT_NUMBER - self . cur_holder_commitment_transaction_number {
2018-09-14 15:54:06 -04:00
// Remote isn't waiting on any RevokeAndACK from us!
2022-05-30 14:39:04 -07:00
// Note that if we need to repeat our ChannelReady we'll do that in the next if block.
2018-10-15 22:03:12 -04:00
None
2020-06-08 20:47:55 -04:00
} else if msg . next_remote_commitment_number + 1 = = ( INITIAL_COMMITMENT_NUMBER - 1 ) - self . cur_holder_commitment_transaction_number {
2018-10-17 18:19:55 -04:00
if self . channel_state & ( ChannelState ::MonitorUpdateFailed as u32 ) ! = 0 {
self . monitor_pending_revoke_and_ack = true ;
None
} else {
Some ( self . get_last_revoke_and_ack ( ) )
}
2018-09-08 16:02:46 -04:00
} else {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer attempted to reestablish channel with a very old local commitment transaction " . to_owned ( ) ) ) ;
2018-10-15 22:03:12 -04:00
} ;
2018-09-08 16:02:46 -04:00
2020-06-08 20:47:55 -04:00
// We increment cur_counterparty_commitment_transaction_number only upon receipt of
2018-10-15 22:03:12 -04:00
// revoke_and_ack, not on sending commitment_signed, so we add one if have
// AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
// the corresponding revoke_and_ack back yet.
2020-06-08 20:47:55 -04:00
let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self . cur_counterparty_commitment_transaction_number + if ( self . channel_state & ChannelState ::AwaitingRemoteRevoke as u32 ) ! = 0 { 1 } else { 0 } ;
2018-10-15 22:03:12 -04:00
2022-05-30 14:39:04 -07:00
let channel_ready = if msg . next_local_commitment_number = = 1 & & INITIAL_COMMITMENT_NUMBER - self . cur_holder_commitment_transaction_number = = 1 {
// We should never have to worry about MonitorUpdateFailed resending ChannelReady
2021-02-20 10:05:55 -05:00
let next_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number , & self . secp_ctx ) ;
2022-05-30 14:39:04 -07:00
Some ( msgs ::ChannelReady {
2018-10-15 22:03:12 -04:00
channel_id : self . channel_id ( ) ,
2020-10-06 16:47:23 -07:00
next_per_commitment_point ,
2022-02-15 23:27:07 +00:00
short_channel_id_alias : Some ( self . outbound_scid_alias ) ,
2018-10-15 22:03:12 -04:00
} )
} else { None } ;
2020-06-08 20:47:55 -04:00
if msg . next_local_commitment_number = = next_counterparty_commitment_number {
2018-10-15 22:03:12 -04:00
if required_revoke . is_some ( ) {
2020-03-02 12:55:53 -05:00
log_debug! ( logger , " Reconnected channel {} with only lost outbound RAA " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-10-15 22:03:12 -04:00
} else {
2020-03-02 12:55:53 -05:00
log_debug! ( logger , " Reconnected channel {} with no loss " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-09-08 16:02:46 -04:00
}
2019-03-05 15:36:11 -05:00
if ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ) = = 0 {
2018-09-08 16:02:46 -04:00
// We're up-to-date and not waiting on a remote revoke (if we are our
// channel_reestablish should result in them sending a revoke_and_ack), but we may
// have received some updates while we were disconnected. Free the holding cell
// now!
2020-03-02 12:55:53 -05:00
match self . free_holding_cell_htlcs ( logger ) {
2021-11-13 22:47:42 +00:00
Err ( ChannelError ::Close ( msg ) ) = > Err ( ChannelError ::Close ( msg ) ) ,
2022-06-23 21:25:17 +00:00
Err ( ChannelError ::Warn ( _ ) ) | Err ( ChannelError ::Ignore ( _ ) ) = >
2021-07-26 20:01:36 +00:00
panic! ( " Got non-channel-failing result from free_holding_cell_htlcs " ) ,
2021-11-13 22:47:42 +00:00
Ok ( ( Some ( ( commitment_update , monitor_update ) ) , holding_cell_failed_htlcs ) ) = > {
Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready , shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
raa : required_revoke ,
commitment_update : Some ( commitment_update ) ,
order : self . resend_order . clone ( ) ,
mon_update : Some ( monitor_update ) ,
holding_cell_failed_htlcs ,
} )
2020-05-06 18:15:43 -04:00
} ,
2021-11-13 22:47:42 +00:00
Ok ( ( None , holding_cell_failed_htlcs ) ) = > {
Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready , shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
raa : required_revoke ,
commitment_update : None ,
order : self . resend_order . clone ( ) ,
mon_update : None ,
holding_cell_failed_htlcs ,
} )
2020-05-06 18:15:43 -04:00
} ,
2018-09-08 16:02:46 -04:00
}
} else {
2021-11-13 22:47:42 +00:00
Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready , shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
raa : required_revoke ,
commitment_update : None ,
order : self . resend_order . clone ( ) ,
mon_update : None ,
holding_cell_failed_htlcs : Vec ::new ( ) ,
} )
2018-09-08 16:02:46 -04:00
}
2020-06-08 20:47:55 -04:00
} else if msg . next_local_commitment_number = = next_counterparty_commitment_number - 1 {
2018-10-15 22:03:12 -04:00
if required_revoke . is_some ( ) {
2020-03-02 12:55:53 -05:00
log_debug! ( logger , " Reconnected channel {} with lost outbound RAA and lost remote commitment tx " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-10-15 22:03:12 -04:00
} else {
2020-03-02 12:55:53 -05:00
log_debug! ( logger , " Reconnected channel {} with only lost remote commitment tx " , log_bytes! ( self . channel_id ( ) ) ) ;
2018-10-15 22:03:12 -04:00
}
2018-10-17 18:19:55 -04:00
if self . channel_state & ( ChannelState ::MonitorUpdateFailed as u32 ) ! = 0 {
self . monitor_pending_commitment_signed = true ;
2021-11-13 22:47:42 +00:00
Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready , shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
commitment_update : None , raa : None , mon_update : None ,
order : self . resend_order . clone ( ) ,
holding_cell_failed_htlcs : Vec ::new ( ) ,
} )
} else {
Ok ( ReestablishResponses {
2022-05-30 14:39:04 -07:00
channel_ready , shutdown_msg , announcement_sigs ,
2021-11-13 22:47:42 +00:00
raa : required_revoke ,
commitment_update : Some ( self . get_last_commitment_update ( logger ) ) ,
order : self . resend_order . clone ( ) ,
mon_update : None ,
holding_cell_failed_htlcs : Vec ::new ( ) ,
} )
2018-10-17 18:19:55 -04:00
}
2018-09-08 16:02:46 -04:00
} else {
2021-11-13 22:47:42 +00:00
Err ( ChannelError ::Close ( " Peer attempted to reestablish channel with a very old remote commitment transaction " . to_owned ( ) ) )
2018-09-08 16:02:46 -04:00
}
}
2021-07-20 03:19:01 +00:00
/// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
/// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
/// at which point they will be recalculated.
2022-06-29 15:13:40 +02:00
fn calculate_closing_fee_limits < F : Deref > ( & mut self , fee_estimator : & LowerBoundedFeeEstimator < F > )
-> ( u64 , u64 )
2021-07-20 03:19:01 +00:00
where F ::Target : FeeEstimator
{
if let Some ( ( min , max ) ) = self . closing_fee_limits { return ( min , max ) ; }
// Propose a range from our current Background feerate to our Normal feerate plus our
// force_close_avoidance_max_fee_satoshis.
// If we fail to come to consensus, we'll have to force-close.
2022-07-25 18:08:44 +00:00
let mut proposed_feerate = fee_estimator . bounded_sat_per_1000_weight ( ConfirmationTarget ::Background ) ;
let normal_feerate = fee_estimator . bounded_sat_per_1000_weight ( ConfirmationTarget ::Normal ) ;
2021-07-20 03:19:01 +00:00
let mut proposed_max_feerate = if self . is_outbound ( ) { normal_feerate } else { u32 ::max_value ( ) } ;
// The spec requires that (when the channel does not have anchors) we only send absolute
// channel fees no greater than the absolute channel fee on the current commitment
// transaction. It's unclear *which* commitment transaction this refers to, and there isn't
// very good reason to apply such a limit in any case. We don't bother doing so, risking
// some force-closure by old nodes, but we wanted to close the channel anyway.
if let Some ( target_feerate ) = self . target_closing_feerate_sats_per_kw {
let min_feerate = if self . is_outbound ( ) { target_feerate } else { cmp ::min ( self . feerate_per_kw , target_feerate ) } ;
proposed_feerate = cmp ::max ( proposed_feerate , min_feerate ) ;
proposed_max_feerate = cmp ::max ( proposed_max_feerate , min_feerate ) ;
}
// Note that technically we could end up with a lower minimum fee if one sides' balance is
// below our dust limit, causing the output to disappear. We don't bother handling this
// case, however, as this should only happen if a channel is closed before any (material)
// payments have been made on it. This may cause slight fee overpayment and/or failure to
// come to consensus with our counterparty on appropriate fees, however it should be a
// relatively rare case. We can revisit this later, though note that in order to determine
// if the funders' output is dust we have to know the absolute fee we're going to use.
let tx_weight = self . get_closing_transaction_weight ( Some ( & self . get_closing_scriptpubkey ( ) ) , Some ( self . counterparty_shutdown_scriptpubkey . as_ref ( ) . unwrap ( ) ) ) ;
let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000 ;
let proposed_max_total_fee_satoshis = if self . is_outbound ( ) {
// We always add force_close_avoidance_max_fee_satoshis to our normal
// feerate-calculated fee, but allow the max to be overridden if we're using a
// target feerate-calculated fee.
2022-06-13 12:53:56 -07:00
cmp ::max ( normal_feerate as u64 * tx_weight / 1000 + self . config . options . force_close_avoidance_max_fee_satoshis ,
2021-07-20 03:19:01 +00:00
proposed_max_feerate as u64 * tx_weight / 1000 )
} else {
2021-09-09 01:09:41 +00:00
self . channel_value_satoshis - ( self . value_to_self_msat + 999 ) / 1000
2021-07-20 03:19:01 +00:00
} ;
self . closing_fee_limits = Some ( ( proposed_total_fee_satoshis , proposed_max_total_fee_satoshis ) ) ;
self . closing_fee_limits . clone ( ) . unwrap ( )
}
2021-07-26 20:43:05 +00:00
/// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
/// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
/// this point if we're the funder we should send the initial closing_signed, and in any case
/// shutdown should complete within a reasonable timeframe.
fn closing_negotiation_ready ( & self ) -> bool {
self . pending_inbound_htlcs . is_empty ( ) & & self . pending_outbound_htlcs . is_empty ( ) & &
self . channel_state &
( BOTH_SIDES_SHUTDOWN_MASK | ChannelState ::AwaitingRemoteRevoke as u32 |
ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 )
= = BOTH_SIDES_SHUTDOWN_MASK & &
self . pending_update_fee . is_none ( )
}
/// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
/// an Err if no progress is being made and the channel should be force-closed instead.
/// Should be called on a one-minute timer.
pub fn timer_check_closing_negotiation_progress ( & mut self ) -> Result < ( ) , ChannelError > {
if self . closing_negotiation_ready ( ) {
if self . closing_signed_in_flight {
return Err ( ChannelError ::Close ( " closing_signed negotiation failed to finish within two timer ticks " . to_owned ( ) ) ) ;
} else {
self . closing_signed_in_flight = true ;
}
}
Ok ( ( ) )
}
2022-06-29 15:13:40 +02:00
pub fn maybe_propose_closing_signed < F : Deref , L : Deref > (
& mut self , fee_estimator : & LowerBoundedFeeEstimator < F > , logger : & L )
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
-> Result < ( Option < msgs ::ClosingSigned > , Option < Transaction > ) , ChannelError >
where F ::Target : FeeEstimator , L ::Target : Logger
2020-02-27 11:33:03 -05:00
{
2021-07-26 20:43:05 +00:00
if self . last_sent_closing_fee . is_some ( ) | | ! self . closing_negotiation_ready ( ) {
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
return Ok ( ( None , None ) ) ;
}
if ! self . is_outbound ( ) {
if let Some ( msg ) = & self . pending_counterparty_closing_signed . take ( ) {
return self . closing_signed ( fee_estimator , & msg ) ;
}
return Ok ( ( None , None ) ) ;
2018-10-30 14:39:20 -04:00
}
2021-07-20 03:19:01 +00:00
let ( our_min_fee , our_max_fee ) = self . calculate_closing_fee_limits ( fee_estimator ) ;
2021-07-26 14:04:44 -04:00
assert! ( self . shutdown_scriptpubkey . is_some ( ) ) ;
2021-07-20 03:19:01 +00:00
let ( closing_tx , total_fee_satoshis ) = self . build_closing_transaction ( our_min_fee , false ) ;
log_trace! ( logger , " Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats) " ,
our_min_fee , our_max_fee , total_fee_satoshis ) ;
2018-10-30 14:39:20 -04:00
2021-02-20 10:05:55 -05:00
let sig = self . holder_signer
2020-01-23 14:32:29 -08:00
. sign_closing_transaction ( & closing_tx , & self . secp_ctx )
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
. map_err ( | ( ) | ChannelError ::Close ( " Failed to get signature for closing transaction. " . to_owned ( ) ) ) ? ;
2018-10-30 14:39:20 -04:00
2021-07-20 03:19:01 +00:00
self . last_sent_closing_fee = Some ( ( total_fee_satoshis , sig . clone ( ) ) ) ;
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
Ok ( ( Some ( msgs ::ClosingSigned {
2018-10-30 14:39:20 -04:00
channel_id : self . channel_id ,
fee_satoshis : total_fee_satoshis ,
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
signature : sig ,
2021-07-20 03:19:01 +00:00
fee_range : Some ( msgs ::ClosingSignedFeeRange {
min_fee_satoshis : our_min_fee ,
max_fee_satoshis : our_max_fee ,
} ) ,
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
} ) , None ) )
2018-10-30 14:39:20 -04:00
}
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
pub fn shutdown < K : Deref > (
& mut self , keys_provider : & K , their_features : & InitFeatures , msg : & msgs ::Shutdown
) -> Result < ( Option < msgs ::Shutdown > , Option < ChannelMonitorUpdate > , Vec < ( HTLCSource , PaymentHash ) > ) , ChannelError >
where K ::Target : KeysInterface < Signer = Signer >
2020-02-27 11:33:03 -05:00
{
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent shutdown when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2018-03-26 16:48:18 -04:00
if self . channel_state < ChannelState ::FundingSent as u32 {
2018-10-29 17:38:16 -04:00
// Spec says we should fail the connection, not the channel, but that's nonsense, there
// are plenty of reasons you may want to fail a channel pre-funding, and spec says you
// can do that via error message without getting a connection fail anyway...
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent shutdown pre-funding generation " . to_owned ( ) ) ) ;
2018-03-26 16:48:18 -04:00
}
2018-09-09 12:53:57 -04:00
for htlc in self . pending_inbound_htlcs . iter ( ) {
2018-10-15 14:38:19 -04:00
if let InboundHTLCState ::RemoteAnnounced ( _ ) = htlc . state {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Got shutdown with remote pending HTLCs " . to_owned ( ) ) ) ;
2018-03-26 16:48:18 -04:00
}
}
assert_eq! ( self . channel_state & ChannelState ::ShutdownComplete as u32 , 0 ) ;
2021-09-01 20:22:49 +00:00
if ! script ::is_bolt2_compliant ( & msg . scriptpubkey , their_features ) {
2021-09-30 22:45:07 +00:00
return Err ( ChannelError ::Warn ( format! ( " Got a nonstandard scriptpubkey ( {} ) from remote peer " , msg . scriptpubkey . to_bytes ( ) . to_hex ( ) ) ) ) ;
2021-09-01 20:22:49 +00:00
}
2018-03-26 16:48:18 -04:00
2020-06-08 20:47:55 -04:00
if self . counterparty_shutdown_scriptpubkey . is_some ( ) {
2021-09-01 20:22:49 +00:00
if Some ( & msg . scriptpubkey ) ! = self . counterparty_shutdown_scriptpubkey . as_ref ( ) {
2021-09-30 22:45:07 +00:00
return Err ( ChannelError ::Warn ( format! ( " Got shutdown request with a scriptpubkey ( {} ) which did not match their previous scriptpubkey. " , msg . scriptpubkey . to_bytes ( ) . to_hex ( ) ) ) ) ;
2018-03-26 16:48:18 -04:00
}
} else {
2021-09-01 20:22:49 +00:00
self . counterparty_shutdown_scriptpubkey = Some ( msg . scriptpubkey . clone ( ) ) ;
2018-03-26 16:48:18 -04:00
}
2021-07-26 14:04:44 -04:00
// If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
// immediately after the commitment dance, but we can send a Shutdown because we won't send
// any further commitment updates after we set LocalShutdownSent.
let send_shutdown = ( self . channel_state & ChannelState ::LocalShutdownSent as u32 ) ! = ChannelState ::LocalShutdownSent as u32 ;
let update_shutdown_script = match self . shutdown_scriptpubkey {
Some ( _ ) = > false ,
None = > {
assert! ( send_shutdown ) ;
let shutdown_scriptpubkey = keys_provider . get_shutdown_scriptpubkey ( ) ;
if ! shutdown_scriptpubkey . is_compatible ( their_features ) {
2021-07-30 23:27:58 -05:00
return Err ( ChannelError ::Close ( format! ( " Provided a scriptpubkey format not accepted by peer: {} " , shutdown_scriptpubkey ) ) ) ;
2021-07-26 14:04:44 -04:00
}
self . shutdown_scriptpubkey = Some ( shutdown_scriptpubkey ) ;
true
} ,
} ;
2018-03-26 16:48:18 -04:00
// From here on out, we may not fail!
self . channel_state | = ChannelState ::RemoteShutdownSent as u32 ;
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2018-03-26 16:48:18 -04:00
2021-07-26 14:04:44 -04:00
let monitor_update = if update_shutdown_script {
self . latest_monitor_update_id + = 1 ;
Some ( ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
updates : vec ! [ ChannelMonitorUpdateStep ::ShutdownScript {
scriptpubkey : self . get_closing_scriptpubkey ( ) ,
} ] ,
} )
} else { None } ;
let shutdown = if send_shutdown {
Some ( msgs ::Shutdown {
channel_id : self . channel_id ,
scriptpubkey : self . get_closing_scriptpubkey ( ) ,
} )
} else { None } ;
2018-03-26 16:48:18 -04:00
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
// cell HTLCs and return them to fail the payment.
2018-11-01 17:12:20 -04:00
self . holding_cell_update_fee = None ;
2018-04-04 11:56:54 -04:00
let mut dropped_outbound_htlcs = Vec ::with_capacity ( self . holding_cell_htlc_updates . len ( ) ) ;
self . holding_cell_htlc_updates . retain ( | htlc_update | {
match htlc_update {
2018-09-11 14:20:40 -04:00
& HTLCUpdateAwaitingACK ::AddHTLC { ref payment_hash , ref source , .. } = > {
dropped_outbound_htlcs . push ( ( source . clone ( ) , payment_hash . clone ( ) ) ) ;
2018-04-04 11:56:54 -04:00
false
} ,
_ = > true
}
} ) ;
2018-03-26 16:48:18 -04:00
self . channel_state | = ChannelState ::LocalShutdownSent as u32 ;
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2020-01-09 17:28:48 -08:00
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
Ok ( ( shutdown , monitor_update , dropped_outbound_htlcs ) )
2018-03-26 16:48:18 -04:00
}
2021-09-01 14:56:50 +02:00
fn build_signed_closing_transaction ( & self , closing_tx : & ClosingTransaction , counterparty_sig : & Signature , sig : & Signature ) -> Transaction {
let mut tx = closing_tx . trust ( ) . built_transaction ( ) . clone ( ) ;
2019-12-13 01:57:45 -05:00
tx . input [ 0 ] . witness . push ( Vec ::new ( ) ) ; // First is the multisig dummy
2020-10-15 13:45:18 +02:00
let funding_key = self . get_holder_pubkeys ( ) . funding_pubkey . serialize ( ) ;
2020-06-08 20:47:55 -04:00
let counterparty_funding_key = self . counterparty_funding_pubkey ( ) . serialize ( ) ;
2022-05-05 17:59:38 +02:00
let mut holder_sig = sig . serialize_der ( ) . to_vec ( ) ;
holder_sig . push ( EcdsaSighashType ::All as u8 ) ;
let mut cp_sig = counterparty_sig . serialize_der ( ) . to_vec ( ) ;
cp_sig . push ( EcdsaSighashType ::All as u8 ) ;
2020-06-08 20:47:55 -04:00
if funding_key [ .. ] < counterparty_funding_key [ .. ] {
2022-05-05 17:59:38 +02:00
tx . input [ 0 ] . witness . push ( holder_sig ) ;
tx . input [ 0 ] . witness . push ( cp_sig ) ;
2019-12-13 01:57:45 -05:00
} else {
2022-05-05 17:59:38 +02:00
tx . input [ 0 ] . witness . push ( cp_sig ) ;
tx . input [ 0 ] . witness . push ( holder_sig ) ;
2019-12-13 01:57:45 -05:00
}
tx . input [ 0 ] . witness . push ( self . get_funding_redeemscript ( ) . into_bytes ( ) ) ;
2021-09-01 14:56:50 +02:00
tx
2019-12-13 01:57:45 -05:00
}
2022-06-29 15:13:40 +02:00
pub fn closing_signed < F : Deref > (
& mut self , fee_estimator : & LowerBoundedFeeEstimator < F > , msg : & msgs ::ClosingSigned )
-> Result < ( Option < msgs ::ClosingSigned > , Option < Transaction > ) , ChannelError >
2020-02-27 11:33:03 -05:00
where F ::Target : FeeEstimator
{
2018-03-26 16:48:18 -04:00
if self . channel_state & BOTH_SIDES_SHUTDOWN_MASK ! = BOTH_SIDES_SHUTDOWN_MASK {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote end sent us a closing_signed before both sides provided a shutdown " . to_owned ( ) ) ) ;
2018-03-26 16:48:18 -04:00
}
2018-09-08 16:01:29 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = ChannelState ::PeerDisconnected as u32 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Peer sent closing_signed when we needed a channel_reestablish " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2018-09-09 12:53:57 -04:00
if ! self . pending_inbound_htlcs . is_empty ( ) | | ! self . pending_outbound_htlcs . is_empty ( ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote end sent us a closing_signed while there were still pending HTLCs " . to_owned ( ) ) ) ;
2018-03-26 16:48:18 -04:00
}
2022-04-15 17:31:20 -04:00
if msg . fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Close ( " Remote tried to send us a closing tx with > 21 million BTC fee " . to_owned ( ) ) ) ;
2018-03-26 16:48:18 -04:00
}
2021-07-20 03:19:01 +00:00
if self . is_outbound ( ) & & self . last_sent_closing_fee . is_none ( ) {
return Err ( ChannelError ::Close ( " Remote tried to send a closing_signed when we were supposed to propose the first one " . to_owned ( ) ) ) ;
}
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
if self . channel_state & ChannelState ::MonitorUpdateFailed as u32 ! = 0 {
self . pending_counterparty_closing_signed = Some ( msg . clone ( ) ) ;
return Ok ( ( None , None ) ) ;
}
2018-03-26 16:48:18 -04:00
let funding_redeemscript = self . get_funding_redeemscript ( ) ;
let ( mut closing_tx , used_total_fee ) = self . build_closing_transaction ( msg . fee_satoshis , false ) ;
if used_total_fee ! = msg . fee_satoshis {
2021-08-13 18:46:50 +00:00
return Err ( ChannelError ::Close ( format! ( " Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {} . Actual closing tx fee: {} " , msg . fee_satoshis , used_total_fee ) ) ) ;
2018-03-26 16:48:18 -04:00
}
2021-09-01 14:56:50 +02:00
let sighash = closing_tx . trust ( ) . get_sighash_all ( & funding_redeemscript , self . channel_value_satoshis ) ;
2018-03-26 16:48:18 -04:00
2022-05-05 17:59:38 +02:00
match self . secp_ctx . verify_ecdsa ( & sighash , & msg . signature , & self . get_counterparty_pubkeys ( ) . funding_pubkey ) {
2018-03-26 16:48:18 -04:00
Ok ( _ ) = > { } ,
2018-07-25 02:34:51 +00:00
Err ( _e ) = > {
2018-03-26 16:48:18 -04:00
// The remote end may have decided to revoke their output due to inconsistent dust
// limits, so check for that case by re-checking the signature here.
closing_tx = self . build_closing_transaction ( msg . fee_satoshis , true ) . 0 ;
2021-09-01 14:56:50 +02:00
let sighash = closing_tx . trust ( ) . get_sighash_all ( & funding_redeemscript , self . channel_value_satoshis ) ;
2022-05-05 17:59:38 +02:00
secp_check! ( self . secp_ctx . verify_ecdsa ( & sighash , & msg . signature , self . counterparty_funding_pubkey ( ) ) , " Invalid closing tx signature from peer " . to_owned ( ) ) ;
2018-03-26 16:48:18 -04:00
} ,
} ;
2021-09-01 20:33:49 +00:00
for outp in closing_tx . trust ( ) . built_transaction ( ) . output . iter ( ) {
if ! outp . script_pubkey . is_witness_program ( ) & & outp . value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
return Err ( ChannelError ::Close ( " Remote sent us a closing_signed with a dust output. Always use segwit closing scripts! " . to_owned ( ) ) ) ;
}
}
2021-07-20 03:19:01 +00:00
assert! ( self . shutdown_scriptpubkey . is_some ( ) ) ;
if let Some ( ( last_fee , sig ) ) = self . last_sent_closing_fee {
2018-03-26 16:48:18 -04:00
if last_fee = = msg . fee_satoshis {
2021-09-01 14:56:50 +02:00
let tx = self . build_signed_closing_transaction ( & mut closing_tx , & msg . signature , & sig ) ;
2018-03-26 16:48:18 -04:00
self . channel_state = ChannelState ::ShutdownComplete as u32 ;
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2021-09-01 14:56:50 +02:00
return Ok ( ( None , Some ( tx ) ) ) ;
2018-03-26 16:48:18 -04:00
}
}
2021-07-20 03:19:01 +00:00
let ( our_min_fee , our_max_fee ) = self . calculate_closing_fee_limits ( fee_estimator ) ;
macro_rules ! propose_fee {
( $new_fee : expr ) = > {
2021-09-01 14:56:50 +02:00
let ( closing_tx , used_fee ) = if $new_fee = = msg . fee_satoshis {
2021-07-20 03:19:01 +00:00
( closing_tx , $new_fee )
} else {
self . build_closing_transaction ( $new_fee , false )
} ;
2021-02-20 10:05:55 -05:00
let sig = self . holder_signer
2021-09-01 14:56:50 +02:00
. sign_closing_transaction ( & closing_tx , & self . secp_ctx )
2020-07-13 13:16:32 +09:00
. map_err ( | _ | ChannelError ::Close ( " External signer refused to sign closing transaction " . to_owned ( ) ) ) ? ;
2021-07-20 03:19:01 +00:00
let signed_tx = if $new_fee = = msg . fee_satoshis {
self . channel_state = ChannelState ::ShutdownComplete as u32 ;
self . update_time_counter + = 1 ;
2021-09-01 14:56:50 +02:00
let tx = self . build_signed_closing_transaction ( & closing_tx , & msg . signature , & sig ) ;
2021-07-20 03:19:01 +00:00
Some ( tx )
} else { None } ;
self . last_sent_closing_fee = Some ( ( used_fee , sig . clone ( ) ) ) ;
2018-03-26 16:48:18 -04:00
return Ok ( ( Some ( msgs ::ClosingSigned {
channel_id : self . channel_id ,
2021-07-20 03:19:01 +00:00
fee_satoshis : used_fee ,
2020-06-08 20:47:55 -04:00
signature : sig ,
2021-07-20 03:19:01 +00:00
fee_range : Some ( msgs ::ClosingSignedFeeRange {
min_fee_satoshis : our_min_fee ,
max_fee_satoshis : our_max_fee ,
} ) ,
} ) , signed_tx ) )
2018-03-26 16:48:18 -04:00
}
}
2021-07-20 03:19:01 +00:00
if let Some ( msgs ::ClosingSignedFeeRange { min_fee_satoshis , max_fee_satoshis } ) = msg . fee_range {
if msg . fee_satoshis < min_fee_satoshis | | msg . fee_satoshis > max_fee_satoshis {
return Err ( ChannelError ::Close ( format! ( " Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat " , msg . fee_satoshis , min_fee_satoshis , max_fee_satoshis ) ) ) ;
}
if max_fee_satoshis < our_min_fee {
return Err ( ChannelError ::Warn ( format! ( " Unable to come to consensus about closing feerate, remote's max fee ( {} sat) was smaller than our min fee ( {} sat) " , max_fee_satoshis , our_min_fee ) ) ) ;
}
if min_fee_satoshis > our_max_fee {
return Err ( ChannelError ::Warn ( format! ( " Unable to come to consensus about closing feerate, remote's min fee ( {} sat) was greater than our max fee ( {} sat) " , min_fee_satoshis , our_max_fee ) ) ) ;
}
if ! self . is_outbound ( ) {
// They have to pay, so pick the highest fee in the overlapping range.
2021-09-09 01:09:41 +00:00
// We should never set an upper bound aside from their full balance
debug_assert_eq! ( our_max_fee , self . channel_value_satoshis - ( self . value_to_self_msat + 999 ) / 1000 ) ;
2021-07-20 03:19:01 +00:00
propose_fee! ( cmp ::min ( max_fee_satoshis , our_max_fee ) ) ;
} else {
if msg . fee_satoshis < our_min_fee | | msg . fee_satoshis > our_max_fee {
return Err ( ChannelError ::Close ( format! ( " Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range. " ,
msg . fee_satoshis , our_min_fee , our_max_fee ) ) ) ;
2018-03-26 16:48:18 -04:00
}
2021-07-20 03:19:01 +00:00
// The proposed fee is in our acceptable range, accept it and broadcast!
propose_fee! ( msg . fee_satoshis ) ;
2018-03-26 16:48:18 -04:00
}
} else {
2021-07-20 03:19:01 +00:00
// Old fee style negotiation. We don't bother to enforce whether they are complying
// with the "making progress" requirements, we just comply and hope for the best.
if let Some ( ( last_fee , _ ) ) = self . last_sent_closing_fee {
if msg . fee_satoshis > last_fee {
if msg . fee_satoshis < our_max_fee {
propose_fee! ( msg . fee_satoshis ) ;
} else if last_fee < our_max_fee {
propose_fee! ( our_max_fee ) ;
} else {
return Err ( ChannelError ::Close ( format! ( " Unable to come to consensus about closing feerate, remote wants something ( {} sat) higher than our max fee ( {} sat) " , msg . fee_satoshis , our_max_fee ) ) ) ;
}
} else {
if msg . fee_satoshis > our_min_fee {
propose_fee! ( msg . fee_satoshis ) ;
} else if last_fee > our_min_fee {
propose_fee! ( our_min_fee ) ;
} else {
return Err ( ChannelError ::Close ( format! ( " Unable to come to consensus about closing feerate, remote wants something ( {} sat) lower than our min fee ( {} sat) " , msg . fee_satoshis , our_min_fee ) ) ) ;
}
}
} else {
if msg . fee_satoshis < our_min_fee {
propose_fee! ( our_min_fee ) ;
} else if msg . fee_satoshis > our_max_fee {
propose_fee! ( our_max_fee ) ;
} else {
propose_fee! ( msg . fee_satoshis ) ;
2018-03-26 16:48:18 -04:00
}
}
}
}
2017-12-25 01:05:27 -05:00
// Public utilities:
2018-07-22 18:19:28 -04:00
pub fn channel_id ( & self ) -> [ u8 ; 32 ] {
2017-12-25 01:05:27 -05:00
self . channel_id
}
2021-07-02 23:54:57 +00:00
pub fn minimum_depth ( & self ) -> Option < u32 > {
self . minimum_depth
}
2017-12-25 01:05:27 -05:00
/// Gets the "user_id" value passed into the construction of this channel. It has no special
/// meaning and exists only to allow users to have a persistent identifier of a channel.
pub fn get_user_id ( & self ) -> u64 {
self . user_id
}
2022-02-16 04:21:29 +00:00
/// Gets the channel's type
pub fn get_channel_type ( & self ) -> & ChannelTypeFeatures {
& self . channel_type
}
2022-05-30 14:39:04 -07:00
/// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
2017-12-25 01:05:27 -05:00
/// is_usable() returns true).
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
2017-12-25 01:05:27 -05:00
pub fn get_short_channel_id ( & self ) -> Option < u64 > {
self . short_channel_id
}
2022-02-01 17:37:16 +00:00
/// Allowed in any state (including after shutdown)
pub fn latest_inbound_scid_alias ( & self ) -> Option < u64 > {
self . latest_inbound_scid_alias
}
2022-02-15 23:27:07 +00:00
/// Allowed in any state (including after shutdown)
pub fn outbound_scid_alias ( & self ) -> u64 {
self . outbound_scid_alias
}
/// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2022-02-01 17:37:28 +00:00
/// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
2022-02-15 23:27:07 +00:00
pub fn set_outbound_scid_alias ( & mut self , outbound_scid_alias : u64 ) {
assert_eq! ( self . outbound_scid_alias , 0 ) ;
self . outbound_scid_alias = outbound_scid_alias ;
}
2017-12-25 01:05:27 -05:00
/// Returns the funding_txo we either got from our peer, or were given by
/// get_outbound_funding_created.
2018-06-27 09:11:58 -04:00
pub fn get_funding_txo ( & self ) -> Option < OutPoint > {
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . funding_outpoint
}
fn get_holder_selected_contest_delay ( & self ) -> u16 {
self . channel_transaction_parameters . holder_selected_contest_delay
}
fn get_holder_pubkeys ( & self ) -> & ChannelPublicKeys {
& self . channel_transaction_parameters . holder_pubkeys
}
2021-07-02 23:54:57 +00:00
pub fn get_counterparty_selected_contest_delay ( & self ) -> Option < u16 > {
2021-07-03 15:27:12 +00:00
self . channel_transaction_parameters . counterparty_parameters
. as_ref ( ) . map ( | params | params . selected_contest_delay )
2020-10-15 13:45:18 +02:00
}
fn get_counterparty_pubkeys ( & self ) -> & ChannelPublicKeys {
& self . channel_transaction_parameters . counterparty_parameters . as_ref ( ) . unwrap ( ) . pubkeys
2017-12-25 01:05:27 -05:00
}
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
2020-06-08 20:47:55 -04:00
pub fn get_counterparty_node_id ( & self ) -> PublicKey {
self . counterparty_node_id
2017-12-25 01:05:27 -05:00
}
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
2020-06-08 20:47:55 -04:00
pub fn get_holder_htlc_minimum_msat ( & self ) -> u64 {
self . holder_htlc_minimum_msat
2017-12-25 01:05:27 -05:00
}
2022-03-22 21:36:18 +01:00
/// Allowed in any state (including after shutdown), but will return none before TheirInitSent
pub fn get_holder_htlc_maximum_msat ( & self ) -> Option < u64 > {
self . get_htlc_maximum_msat ( self . holder_max_htlc_value_in_flight_msat )
}
2020-06-28 14:43:10 +03:00
/// Allowed in any state (including after shutdown)
pub fn get_announced_htlc_max_msat ( & self ) -> u64 {
return cmp ::min (
// Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
// to use full capacity. This is an effort to reduce routing failures, because in many cases
// channel might have been used to route very small values (either by honest users or as DoS).
2020-09-28 16:29:55 +03:00
self . channel_value_satoshis * 1000 * 9 / 10 ,
2020-06-28 14:43:10 +03:00
2022-04-22 00:02:27 +02:00
self . counterparty_max_htlc_value_in_flight_msat
2020-06-28 14:43:10 +03:00
) ;
}
2018-10-14 22:30:21 +09:00
/// Allowed in any state (including after shutdown)
2020-06-08 20:47:55 -04:00
pub fn get_counterparty_htlc_minimum_msat ( & self ) -> u64 {
2020-09-14 17:39:42 -04:00
self . counterparty_htlc_minimum_msat
2018-10-14 22:30:21 +09:00
}
2022-04-19 23:34:53 +02:00
/// Allowed in any state (including after shutdown), but will return none before TheirInitSent
pub fn get_counterparty_htlc_maximum_msat ( & self ) -> Option < u64 > {
self . get_htlc_maximum_msat ( self . counterparty_max_htlc_value_in_flight_msat )
}
2022-03-22 21:36:18 +01:00
fn get_htlc_maximum_msat ( & self , party_max_htlc_value_in_flight_msat : u64 ) -> Option < u64 > {
self . counterparty_selected_channel_reserve_satoshis . map ( | counterparty_reserve | {
let holder_reserve = self . holder_selected_channel_reserve_satoshis ;
cmp ::min (
( self . channel_value_satoshis - counterparty_reserve - holder_reserve ) * 1000 ,
party_max_htlc_value_in_flight_msat
)
} )
}
2017-12-25 01:05:27 -05:00
pub fn get_value_satoshis ( & self ) -> u64 {
self . channel_value_satoshis
}
2018-10-31 14:51:39 -04:00
pub fn get_fee_proportional_millionths ( & self ) -> u32 {
2022-06-13 12:53:56 -07:00
self . config . options . forwarding_fee_proportional_millionths
2018-10-31 14:51:39 -04:00
}
2021-03-17 12:49:49 -04:00
pub fn get_cltv_expiry_delta ( & self ) -> u16 {
2022-06-13 12:53:56 -07:00
cmp ::max ( self . config . options . cltv_expiry_delta , MIN_CLTV_EXPIRY_DELTA )
2021-03-17 12:49:49 -04:00
}
2021-07-28 19:54:20 -04:00
pub fn get_max_dust_htlc_exposure_msat ( & self ) -> u64 {
2022-06-13 12:53:56 -07:00
self . config . options . max_dust_htlc_exposure_msat
2021-07-28 19:54:20 -04:00
}
2022-06-16 16:24:42 -07:00
/// Returns the previous [`ChannelConfig`] applied to this channel, if any.
pub fn prev_config ( & self ) -> Option < ChannelConfig > {
self . prev_config . map ( | prev_config | prev_config . 0 )
}
/// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
/// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
/// no longer be considered when forwarding HTLCs.
pub fn maybe_expire_prev_config ( & mut self ) {
if self . prev_config . is_none ( ) {
return ;
}
let prev_config = self . prev_config . as_mut ( ) . unwrap ( ) ;
prev_config . 1 + = 1 ;
if prev_config . 1 = = EXPIRE_PREV_CONFIG_TICKS {
self . prev_config = None ;
}
}
2022-06-14 16:40:12 -07:00
/// Returns the current [`ChannelConfig`] applied to the channel.
pub fn config ( & self ) -> ChannelConfig {
self . config . options
}
2022-06-14 16:41:32 -07:00
/// Updates the channel's config. A bool is returned indicating whether the config update
/// applied resulted in a new ChannelUpdate message.
pub fn update_config ( & mut self , config : & ChannelConfig ) -> bool {
let did_channel_update =
self . config . options . forwarding_fee_proportional_millionths ! = config . forwarding_fee_proportional_millionths | |
self . config . options . forwarding_fee_base_msat ! = config . forwarding_fee_base_msat | |
self . config . options . cltv_expiry_delta ! = config . cltv_expiry_delta ;
if did_channel_update {
2022-06-16 16:24:42 -07:00
self . prev_config = Some ( ( self . config . options , 0 ) ) ;
2022-06-14 16:41:32 -07:00
// Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
// policy change to propagate throughout the network.
self . update_time_counter + = 1 ;
}
self . config . options = * config ;
did_channel_update
}
2022-06-15 16:33:23 -07:00
fn internal_htlc_satisfies_config (
& self , htlc : & msgs ::UpdateAddHTLC , amt_to_forward : u64 , outgoing_cltv_value : u32 , config : & ChannelConfig ,
) -> Result < ( ) , ( & 'static str , u16 ) > {
let fee = amt_to_forward . checked_mul ( config . forwarding_fee_proportional_millionths as u64 )
. and_then ( | prop_fee | ( prop_fee / 1000000 ) . checked_add ( config . forwarding_fee_base_msat as u64 ) ) ;
if fee . is_none ( ) | | htlc . amount_msat < fee . unwrap ( ) | |
( htlc . amount_msat - fee . unwrap ( ) ) < amt_to_forward {
return Err ( (
" Prior hop has deviated from specified fees parameters or origin node has obsolete ones " ,
0x1000 | 12 , // fee_insufficient
) ) ;
}
if ( htlc . cltv_expiry as u64 ) < outgoing_cltv_value as u64 + config . cltv_expiry_delta as u64 {
return Err ( (
" Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta " ,
0x1000 | 13 , // incorrect_cltv_expiry
) ) ;
}
Ok ( ( ) )
}
/// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
/// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
/// unsuccessful, falls back to the previous one if one exists.
pub fn htlc_satisfies_config (
& self , htlc : & msgs ::UpdateAddHTLC , amt_to_forward : u64 , outgoing_cltv_value : u32 ,
) -> Result < ( ) , ( & 'static str , u16 ) > {
self . internal_htlc_satisfies_config ( & htlc , amt_to_forward , outgoing_cltv_value , & self . config ( ) )
. or_else ( | err | {
if let Some ( prev_config ) = self . prev_config ( ) {
self . internal_htlc_satisfies_config ( htlc , amt_to_forward , outgoing_cltv_value , & prev_config )
} else {
Err ( err )
}
} )
}
2020-06-15 17:28:01 -04:00
pub fn get_feerate ( & self ) -> u32 {
2018-09-26 19:54:28 -04:00
self . feerate_per_kw
}
2021-08-21 18:52:05 -04:00
pub fn get_dust_buffer_feerate ( & self , outbound_feerate_update : Option < u32 > ) -> u32 {
2021-07-28 19:54:20 -04:00
// When calculating our exposure to dust HTLCs, we assume that the channel feerate
// may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
// whichever is higher. This ensures that we aren't suddenly exposed to significantly
// more dust balance if the feerate increases when we have several HTLCs pending
// which are near the dust limit.
2021-06-30 03:16:01 +00:00
let mut feerate_per_kw = self . feerate_per_kw ;
2021-08-16 18:02:59 +00:00
// If there's a pending update fee, use it to ensure we aren't under-estimating
// potential feerate updates coming soon.
2021-06-30 03:16:01 +00:00
if let Some ( ( feerate , _ ) ) = self . pending_update_fee {
feerate_per_kw = cmp ::max ( feerate_per_kw , feerate ) ;
}
2021-08-21 18:52:05 -04:00
if let Some ( feerate ) = outbound_feerate_update {
feerate_per_kw = cmp ::max ( feerate_per_kw , feerate ) ;
}
2021-06-30 03:16:01 +00:00
cmp ::max ( 2530 , feerate_per_kw * 1250 / 1000 )
2021-07-28 19:54:20 -04:00
}
2020-06-08 20:47:55 -04:00
pub fn get_cur_holder_commitment_transaction_number ( & self ) -> u64 {
self . cur_holder_commitment_transaction_number + 1
2018-10-25 12:56:02 -04:00
}
2020-06-08 20:47:55 -04:00
pub fn get_cur_counterparty_commitment_transaction_number ( & self ) -> u64 {
self . cur_counterparty_commitment_transaction_number + 1 - if self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 ) ! = 0 { 1 } else { 0 }
2018-10-25 12:56:02 -04:00
}
2020-09-06 19:51:21 -04:00
pub fn get_revoked_counterparty_commitment_transaction_number ( & self ) -> u64 {
2020-06-08 20:47:55 -04:00
self . cur_counterparty_commitment_transaction_number + 2
2018-10-25 12:56:02 -04:00
}
2018-08-30 01:42:11 +00:00
#[ cfg(test) ]
2021-02-20 10:05:55 -05:00
pub fn get_signer ( & self ) -> & Signer {
& self . holder_signer
2018-08-30 01:42:11 +00:00
}
2018-10-01 18:11:33 +09:00
#[ cfg(test) ]
pub fn get_value_stat ( & self ) -> ChannelValueStat {
ChannelValueStat {
value_to_self_msat : self . value_to_self_msat ,
channel_value_msat : self . channel_value_satoshis * 1000 ,
2021-07-03 15:27:12 +00:00
channel_reserve_msat : self . counterparty_selected_channel_reserve_satoshis . unwrap ( ) * 1000 ,
2018-10-01 18:11:33 +09:00
pending_outbound_htlcs_amount_msat : self . pending_outbound_htlcs . iter ( ) . map ( | ref h | h . amount_msat ) . sum ::< u64 > ( ) ,
pending_inbound_htlcs_amount_msat : self . pending_inbound_htlcs . iter ( ) . map ( | ref h | h . amount_msat ) . sum ::< u64 > ( ) ,
holding_cell_outbound_amount_msat : {
let mut res = 0 ;
for h in self . holding_cell_htlc_updates . iter ( ) {
2018-10-01 18:13:37 +09:00
match h {
2018-10-01 18:11:33 +09:00
& HTLCUpdateAwaitingACK ::AddHTLC { amount_msat , .. } = > {
res + = amount_msat ;
}
_ = > { }
}
}
res
} ,
2020-06-08 20:47:55 -04:00
counterparty_max_htlc_value_in_flight_msat : self . counterparty_max_htlc_value_in_flight_msat ,
counterparty_dust_limit_msat : self . counterparty_dust_limit_satoshis * 1000 ,
2018-10-01 18:11:33 +09:00
}
}
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
2020-03-05 18:01:06 -05:00
pub fn get_update_time_counter ( & self ) -> u32 {
self . update_time_counter
2017-12-25 01:05:27 -05:00
}
2020-02-05 19:39:31 -05:00
pub fn get_latest_monitor_update_id ( & self ) -> u64 {
self . latest_monitor_update_id
}
2018-07-22 16:39:34 -04:00
pub fn should_announce ( & self ) -> bool {
2018-10-31 14:51:39 -04:00
self . config . announced_channel
2018-07-22 16:39:34 -04:00
}
2018-09-26 19:54:28 -04:00
pub fn is_outbound ( & self ) -> bool {
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . is_outbound_from_holder
2018-09-26 19:54:28 -04:00
}
2017-12-25 01:05:27 -05:00
/// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
Make the base fee configurable in ChannelConfig
Currently the base fee we apply is always the expected cost to
claim an HTLC on-chain in case of closure. This results in
significantly higher than market rate fees [1], and doesn't really
match the actual forwarding trust model anyway - as long as
channel counterparties are honest, our HTLCs shouldn't end up
on-chain no matter what the HTLC sender/recipient do.
While some users may wish to use a feerate that implies they will
not lose funds even if they go to chain (assuming no flood-and-loot
style attacks), they should do so by calculating fees themselves;
since they're already charging well above market-rate,
over-estimating some won't have a large impact.
Worse, we current re-calculate fees at forward-time, not based on
the fee we set in the channel_update. This means that the fees
others expect to pay us (and which they calculate their route based
on), is not what we actually want to charge, and that any attempt
to forward through us is inherently race-y.
This commit adds a configuration knob to set the base fee
explicitly, defaulting to 1 sat, which appears to be market-rate
today.
[1] Note that due to an msat-vs-sat bug we currently actually
charge 1000x *less* than the calculated cost.
2021-06-21 20:20:29 +00:00
pub fn get_outbound_forwarding_fee_base_msat ( & self ) -> u32 {
2022-06-13 12:53:56 -07:00
self . config . options . forwarding_fee_base_msat
2017-12-25 01:05:27 -05:00
}
2018-09-08 16:02:46 -04:00
/// Returns true if we've ever received a message from the remote end for this Channel
pub fn have_received_message ( & self ) -> bool {
self . channel_state > ( ChannelState ::OurInitSent as u32 )
}
2017-12-25 01:05:27 -05:00
/// Returns true if this channel is fully established and not known to be closing.
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
2017-12-25 01:05:27 -05:00
pub fn is_usable ( & self ) -> bool {
2018-03-26 16:48:18 -04:00
let mask = ChannelState ::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK ;
2022-05-30 14:39:04 -07:00
( self . channel_state & mask ) = = ( ChannelState ::ChannelFunded as u32 ) & & ! self . monitor_pending_channel_ready
2017-12-25 01:05:27 -05:00
}
/// Returns true if this channel is currently available for use. This is a superset of
/// is_usable() and considers things like the channel being temporarily disabled.
2018-07-28 19:15:45 -04:00
/// Allowed in any state (including after shutdown)
2017-12-25 01:05:27 -05:00
pub fn is_live ( & self ) -> bool {
2021-06-16 22:57:38 +00:00
self . is_usable ( ) & & ( self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = 0 )
2018-10-17 18:19:55 -04:00
}
/// Returns true if this channel has been marked as awaiting a monitor update to move forward.
/// Allowed in any state (including after shutdown)
pub fn is_awaiting_monitor_update ( & self ) -> bool {
( self . channel_state & ChannelState ::MonitorUpdateFailed as u32 ) ! = 0
2017-12-25 01:05:27 -05:00
}
2018-07-31 23:48:54 -04:00
/// Returns true if funding_created was sent/received.
pub fn is_funding_initiated ( & self ) -> bool {
2020-04-18 16:35:01 -04:00
self . channel_state > = ChannelState ::FundingSent as u32
2018-07-31 23:48:54 -04:00
}
2022-05-30 14:39:04 -07:00
/// Returns true if our channel_ready has been sent
pub fn is_our_channel_ready ( & self ) -> bool {
( self . channel_state & ChannelState ::OurChannelReady as u32 ) ! = 0 | | self . channel_state > = ChannelState ::ChannelFunded as u32
2022-02-01 21:57:01 +00:00
}
2021-07-06 23:51:11 +00:00
/// Returns true if our peer has either initiated or agreed to shut down the channel.
pub fn received_shutdown ( & self ) -> bool {
( self . channel_state & ChannelState ::RemoteShutdownSent as u32 ) ! = 0
}
/// Returns true if we either initiated or agreed to shut down the channel.
pub fn sent_shutdown ( & self ) -> bool {
( self . channel_state & ChannelState ::LocalShutdownSent as u32 ) ! = 0
}
2018-03-26 16:48:18 -04:00
/// Returns true if this channel is fully shut down. True here implies that no further actions
/// may/will be taken on this channel, and thus this object should be freed. Any future changes
/// will be handled appropriately by the chain monitor.
pub fn is_shutdown ( & self ) -> bool {
if ( self . channel_state & ChannelState ::ShutdownComplete as u32 ) = = ChannelState ::ShutdownComplete as u32 {
assert! ( self . channel_state = = ChannelState ::ShutdownComplete as u32 ) ;
true
} else { false }
}
2021-05-13 15:33:54 +00:00
pub fn channel_update_status ( & self ) -> ChannelUpdateStatus {
self . channel_update_status
2019-11-29 01:39:33 -05:00
}
2021-05-13 15:33:54 +00:00
pub fn set_channel_update_status ( & mut self , status : ChannelUpdateStatus ) {
2021-11-13 01:06:09 +00:00
self . update_time_counter + = 1 ;
2021-05-13 15:33:54 +00:00
self . channel_update_status = status ;
2019-11-29 01:39:33 -05:00
}
2022-05-30 14:39:04 -07:00
fn check_get_channel_ready ( & mut self , height : u32 ) -> Option < msgs ::ChannelReady > {
2022-02-01 21:57:01 +00:00
if self . funding_tx_confirmation_height = = 0 & & self . minimum_depth ! = Some ( 0 ) {
2021-03-09 22:05:21 -05:00
return None ;
}
let funding_tx_confirmations = height as i64 - self . funding_tx_confirmation_height as i64 + 1 ;
if funding_tx_confirmations < = 0 {
self . funding_tx_confirmation_height = 0 ;
}
2021-07-03 15:27:12 +00:00
if funding_tx_confirmations < self . minimum_depth . unwrap_or ( 0 ) as i64 {
2021-03-09 22:05:21 -05:00
return None ;
}
let non_shutdown_state = self . channel_state & ( ! MULTI_STATE_FLAGS ) ;
let need_commitment_update = if non_shutdown_state = = ChannelState ::FundingSent as u32 {
2022-05-30 14:39:04 -07:00
self . channel_state | = ChannelState ::OurChannelReady as u32 ;
2021-03-09 22:05:21 -05:00
true
2022-05-30 14:39:04 -07:00
} else if non_shutdown_state = = ( ChannelState ::FundingSent as u32 | ChannelState ::TheirChannelReady as u32 ) {
2021-03-09 22:05:21 -05:00
self . channel_state = ChannelState ::ChannelFunded as u32 | ( self . channel_state & MULTI_STATE_FLAGS ) ;
self . update_time_counter + = 1 ;
true
2022-05-30 14:39:04 -07:00
} else if non_shutdown_state = = ( ChannelState ::FundingSent as u32 | ChannelState ::OurChannelReady as u32 ) {
2021-03-09 22:05:21 -05:00
// We got a reorg but not enough to trigger a force close, just ignore.
false
} else {
2022-06-02 03:37:16 +00:00
if self . channel_state < ChannelState ::ChannelFunded as u32 {
// We should never see a funding transaction on-chain until we've received
// funding_signed (if we're an outbound channel), or seen funding_generated (if we're
// an inbound channel - before that we have no known funding TXID). The fuzzer,
// however, may do this and we shouldn't treat it as a bug.
#[ cfg(not(fuzzing)) ]
panic! ( " Started confirming a channel in a state pre-FundingSent: {} . \n \
Do NOT broadcast a funding transaction manually - let LDK do it for you ! " ,
self . channel_state ) ;
}
2021-03-09 22:05:21 -05:00
// We got a reorg but not enough to trigger a force close, just ignore.
false
} ;
if need_commitment_update {
if self . channel_state & ( ChannelState ::MonitorUpdateFailed as u32 ) = = 0 {
2021-11-15 01:09:27 +00:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) = = 0 {
2022-02-01 17:37:28 +00:00
let next_per_commitment_point =
self . holder_signer . get_per_commitment_point ( INITIAL_COMMITMENT_NUMBER - 1 , & self . secp_ctx ) ;
2022-05-30 14:39:04 -07:00
return Some ( msgs ::ChannelReady {
2021-11-15 01:09:27 +00:00
channel_id : self . channel_id ,
next_per_commitment_point ,
2022-02-15 23:27:07 +00:00
short_channel_id_alias : Some ( self . outbound_scid_alias ) ,
2021-11-15 01:09:27 +00:00
} ) ;
}
2021-03-09 22:05:21 -05:00
} else {
2022-05-30 14:39:04 -07:00
self . monitor_pending_channel_ready = true ;
2021-03-09 22:05:21 -05:00
}
}
None
}
2021-03-18 20:32:30 -04:00
/// When a transaction is confirmed, we check whether it is or spends the funding transaction
/// In the first case, we store the confirmation height and calculating the short channel id.
/// In the second, we simply return an Err indicating we need to be force-closed now.
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub fn transactions_confirmed < L : Deref > ( & mut self , block_hash : & BlockHash , height : u32 ,
txdata : & TransactionData , genesis_block_hash : BlockHash , node_pk : PublicKey , logger : & L )
2022-05-30 14:39:04 -07:00
-> Result < ( Option < msgs ::ChannelReady > , Option < msgs ::AnnouncementSignatures > ) , ClosureReason > where L ::Target : Logger {
2021-12-07 01:56:31 +00:00
if let Some ( funding_txo ) = self . get_funding_txo ( ) {
for & ( index_in_block , tx ) in txdata . iter ( ) {
2022-04-01 21:29:59 +00:00
// Check if the transaction is the expected funding transaction, and if it is,
// check that it pays the right amount to the right script.
if self . funding_tx_confirmation_height = = 0 {
2021-03-18 20:32:30 -04:00
if tx . txid ( ) = = funding_txo . txid {
let txo_idx = funding_txo . index as usize ;
if txo_idx > = tx . output . len ( ) | | tx . output [ txo_idx ] . script_pubkey ! = self . get_funding_redeemscript ( ) . to_v0_p2wsh ( ) | |
tx . output [ txo_idx ] . value ! = self . channel_value_satoshis {
if self . is_outbound ( ) {
// If we generated the funding transaction and it doesn't match what it
// should, the client is really broken and we should just panic and
// tell them off. That said, because hash collisions happen with high
2022-02-17 19:29:59 +00:00
// probability in fuzzing mode, if we're fuzzing we just close the
2021-03-18 20:32:30 -04:00
// channel and move on.
2022-02-17 19:29:59 +00:00
#[ cfg(not(fuzzing)) ]
2021-03-18 20:32:30 -04:00
panic! ( " Client called ChannelManager::funding_transaction_generated with bogus transaction! " ) ;
}
self . update_time_counter + = 1 ;
2021-09-30 21:35:40 +00:00
let err_reason = " funding tx had wrong script/value or output index " ;
return Err ( ClosureReason ::ProcessingError { err : err_reason . to_owned ( ) } ) ;
2021-03-18 20:32:30 -04:00
} else {
if self . is_outbound ( ) {
for input in tx . input . iter ( ) {
if input . witness . is_empty ( ) {
// We generated a malleable funding transaction, implying we've
// just exposed ourselves to funds loss to our counterparty.
2022-02-17 19:29:59 +00:00
#[ cfg(not(fuzzing)) ]
2021-03-18 20:32:30 -04:00
panic! ( " Client called ChannelManager::funding_transaction_generated with bogus transaction! " ) ;
}
}
}
2021-03-22 17:01:04 -04:00
self . funding_tx_confirmation_height = height ;
2021-03-18 20:32:30 -04:00
self . funding_tx_confirmed_in = Some ( * block_hash ) ;
self . short_channel_id = match scid_from_parts ( height as u64 , index_in_block as u64 , txo_idx as u64 ) {
Ok ( scid ) = > Some ( scid ) ,
Err ( _ ) = > panic! ( " Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs " ) ,
}
2018-08-31 16:46:55 -04:00
}
2021-03-18 20:32:30 -04:00
}
2022-05-30 14:39:04 -07:00
// If we allow 1-conf funding, we may need to check for channel_ready here and
2021-04-20 13:39:00 -07:00
// send it immediately instead of waiting for a best_block_updated call (which
2021-03-09 22:05:21 -05:00
// may have already happened for this block).
2022-05-30 14:39:04 -07:00
if let Some ( channel_ready ) = self . check_get_channel_ready ( height ) {
log_info! ( logger , " Sending a channel_ready to our peer for channel {} " , log_bytes! ( self . channel_id ) ) ;
2021-12-07 19:11:18 +00:00
let announcement_sigs = self . get_announcement_sigs ( node_pk , genesis_block_hash , height , logger ) ;
2022-05-30 14:39:04 -07:00
return Ok ( ( Some ( channel_ready ) , announcement_sigs ) ) ;
2021-03-09 22:05:21 -05:00
}
2021-03-18 20:32:30 -04:00
}
for inp in tx . input . iter ( ) {
if inp . previous_output = = funding_txo . into_bitcoin_outpoint ( ) {
2021-06-22 03:35:52 +00:00
log_info! ( logger , " Detected channel-closing tx {} spending {}:{}, closing channel {} " , tx . txid ( ) , inp . previous_output . txid , inp . previous_output . vout , log_bytes! ( self . channel_id ( ) ) ) ;
2021-09-30 21:35:40 +00:00
return Err ( ClosureReason ::CommitmentTxConfirmed ) ;
2018-03-30 13:40:08 -04:00
}
2017-12-25 01:05:27 -05:00
}
}
}
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
Ok ( ( None , None ) )
2021-03-15 20:28:22 -04:00
}
2021-03-04 16:58:26 -08:00
2021-03-15 20:28:22 -04:00
/// When a new block is connected, we check the height of the block against outbound holding
/// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
/// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
/// handled by the ChannelMonitor.
///
/// If we return Err, the channel may have been closed, at which point the standard
/// requirements apply - no calls may be made except those explicitly stated to be allowed
/// post-shutdown.
///
/// May return some HTLCs (and their payment_hash) which have timed out and should be failed
/// back.
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub fn best_block_updated < L : Deref > ( & mut self , height : u32 , highest_header_time : u32 , genesis_block_hash : BlockHash , node_pk : PublicKey , logger : & L )
2022-05-30 14:39:04 -07:00
-> Result < ( Option < msgs ::ChannelReady > , Vec < ( HTLCSource , PaymentHash ) > , Option < msgs ::AnnouncementSignatures > ) , ClosureReason > where L ::Target : Logger {
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
self . do_best_block_updated ( height , highest_header_time , Some ( ( genesis_block_hash , node_pk ) ) , logger )
}
fn do_best_block_updated < L : Deref > ( & mut self , height : u32 , highest_header_time : u32 , genesis_node_pk : Option < ( BlockHash , PublicKey ) > , logger : & L )
2022-05-30 14:39:04 -07:00
-> Result < ( Option < msgs ::ChannelReady > , Vec < ( HTLCSource , PaymentHash ) > , Option < msgs ::AnnouncementSignatures > ) , ClosureReason > where L ::Target : Logger {
2021-03-15 20:28:22 -04:00
let mut timed_out_htlcs = Vec ::new ( ) ;
2021-10-13 04:19:13 +00:00
// This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
// forward an HTLC when our counterparty should almost certainly just fail it for expiring
// ~now.
let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS ;
2021-03-15 20:28:22 -04:00
self . holding_cell_htlc_updates . retain ( | htlc_update | {
match htlc_update {
& HTLCUpdateAwaitingACK ::AddHTLC { ref payment_hash , ref source , ref cltv_expiry , .. } = > {
if * cltv_expiry < = unforwarded_htlc_cltv_limit {
timed_out_htlcs . push ( ( source . clone ( ) , payment_hash . clone ( ) ) ) ;
false
} else { true }
} ,
_ = > true
}
} ) ;
2021-03-15 20:13:57 -04:00
2021-03-15 20:28:22 -04:00
self . update_time_counter = cmp ::max ( self . update_time_counter , highest_header_time ) ;
2022-05-30 14:39:04 -07:00
if let Some ( channel_ready ) = self . check_get_channel_ready ( height ) {
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
let announcement_sigs = if let Some ( ( genesis_block_hash , node_pk ) ) = genesis_node_pk {
2021-12-07 19:11:18 +00:00
self . get_announcement_sigs ( node_pk , genesis_block_hash , height , logger )
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
} else { None } ;
2022-05-30 14:39:04 -07:00
log_info! ( logger , " Sending a channel_ready to our peer for channel {} " , log_bytes! ( self . channel_id ) ) ;
return Ok ( ( Some ( channel_ready ) , timed_out_htlcs , announcement_sigs ) ) ;
2021-03-09 22:05:21 -05:00
}
let non_shutdown_state = self . channel_state & ( ! MULTI_STATE_FLAGS ) ;
if non_shutdown_state > = ChannelState ::ChannelFunded as u32 | |
2022-05-30 14:39:04 -07:00
( non_shutdown_state & ChannelState ::OurChannelReady as u32 ) = = ChannelState ::OurChannelReady as u32 {
2021-03-09 22:05:21 -05:00
let mut funding_tx_confirmations = height as i64 - self . funding_tx_confirmation_height as i64 + 1 ;
if self . funding_tx_confirmation_height = = 0 {
2022-05-30 14:39:04 -07:00
// Note that check_get_channel_ready may reset funding_tx_confirmation_height to
2021-03-09 22:05:21 -05:00
// zero if it has been reorged out, however in either case, our state flags
2022-05-30 14:39:04 -07:00
// indicate we've already sent a channel_ready
2021-03-09 22:05:21 -05:00
funding_tx_confirmations = 0 ;
}
2022-05-30 14:39:04 -07:00
// If we've sent channel_ready (or have both sent and received channel_ready), and
2022-05-02 02:51:50 +00:00
// the funding transaction has become unconfirmed,
2021-03-09 22:05:21 -05:00
// close the channel and hope we can get the latest state on chain (because presumably
// the funding transaction is at least still in the mempool of most nodes).
2022-05-02 02:51:50 +00:00
//
2022-02-01 21:57:01 +00:00
// Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
2022-06-10 01:06:42 +02:00
// 0-conf channel, but not doing so may lead to the
// `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
// to.
2022-05-02 02:51:50 +00:00
if funding_tx_confirmations = = 0 & & self . funding_tx_confirmed_in . is_some ( ) {
2021-09-30 21:35:40 +00:00
let err_reason = format! ( " Funding transaction was un-confirmed. Locked at {} confs, now have {} confs. " ,
self . minimum_depth . unwrap ( ) , funding_tx_confirmations ) ;
return Err ( ClosureReason ::ProcessingError { err : err_reason } ) ;
2021-03-15 20:28:22 -04:00
}
2021-09-19 23:49:57 +00:00
} else if ! self . is_outbound ( ) & & self . funding_tx_confirmed_in . is_none ( ) & &
height > = self . channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
log_info! ( logger , " Closing channel {} due to funding timeout " , log_bytes! ( self . channel_id ) ) ;
// If funding_tx_confirmed_in is unset, the channel must not be active
assert! ( non_shutdown_state < = ChannelState ::ChannelFunded as u32 ) ;
2022-05-30 14:39:04 -07:00
assert_eq! ( non_shutdown_state & ChannelState ::OurChannelReady as u32 , 0 ) ;
2021-09-19 23:49:57 +00:00
return Err ( ClosureReason ::FundingTimedOut ) ;
2020-02-10 10:27:23 +01:00
}
2021-03-15 20:28:22 -04:00
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
let announcement_sigs = if let Some ( ( genesis_block_hash , node_pk ) ) = genesis_node_pk {
2021-12-07 19:11:18 +00:00
self . get_announcement_sigs ( node_pk , genesis_block_hash , height , logger )
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
} else { None } ;
Ok ( ( None , timed_out_htlcs , announcement_sigs ) )
2017-12-25 01:05:27 -05:00
}
2021-03-22 17:01:04 -04:00
/// Indicates the funding transaction is no longer confirmed in the main chain. This may
/// force-close the channel, but may also indicate a harmless reorganization of a block or two
2022-05-30 14:39:04 -07:00
/// before the channel has reached channel_ready and we can just wait for more blocks.
2021-09-30 21:35:40 +00:00
pub fn funding_transaction_unconfirmed < L : Deref > ( & mut self , logger : & L ) -> Result < ( ) , ClosureReason > where L ::Target : Logger {
2021-03-22 17:01:04 -04:00
if self . funding_tx_confirmation_height ! = 0 {
2021-04-20 13:39:00 -07:00
// We handle the funding disconnection by calling best_block_updated with a height one
2021-03-22 17:01:04 -04:00
// below where our funding was connected, implying a reorg back to conf_height - 1.
let reorg_height = self . funding_tx_confirmation_height - 1 ;
// We use the time field to bump the current time we set on channel updates if its
// larger. If we don't know that time has moved forward, we can just set it to the last
// time we saw and it will be ignored.
let best_time = self . update_time_counter ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
match self . do_best_block_updated ( reorg_height , best_time , None , logger ) {
2022-05-30 14:39:04 -07:00
Ok ( ( channel_ready , timed_out_htlcs , announcement_sigs ) ) = > {
assert! ( channel_ready . is_none ( ) , " We can't generate a funding with 0 confirmations? " ) ;
2021-03-22 17:01:04 -04:00
assert! ( timed_out_htlcs . is_empty ( ) , " We can't have accepted HTLCs with a timeout before our funding confirmation? " ) ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
assert! ( announcement_sigs . is_none ( ) , " We can't generate an announcement_sigs with 0 confirmations? " ) ;
2021-03-22 17:01:04 -04:00
Ok ( ( ) )
} ,
Err ( e ) = > Err ( e )
}
} else {
// We never learned about the funding confirmation anyway, just ignore
Ok ( ( ) )
}
}
2017-12-25 01:05:27 -05:00
// Methods to get unprompted messages to send to the remote end (or where we already returned
// something in the handler for the message that prompted this message):
2020-06-13 16:46:25 -04:00
pub fn get_open_channel ( & self , chain_hash : BlockHash ) -> msgs ::OpenChannel {
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2017-12-25 01:05:27 -05:00
panic! ( " Tried to open a channel for an inbound channel? " ) ;
}
if self . channel_state ! = ChannelState ::OurInitSent as u32 {
2018-08-15 00:59:42 +09:00
panic! ( " Cannot generate an open_channel after we've moved forward " ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
if self . cur_holder_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER {
2017-12-25 01:05:27 -05:00
panic! ( " Tried to send an open_channel for a channel that has already advanced " ) ;
}
2021-02-20 10:05:55 -05:00
let first_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number , & self . secp_ctx ) ;
2020-10-15 13:45:18 +02:00
let keys = self . get_holder_pubkeys ( ) ;
2017-12-25 01:05:27 -05:00
2018-09-15 07:26:03 +09:00
msgs ::OpenChannel {
2020-10-06 16:47:23 -07:00
chain_hash ,
2017-12-25 01:05:27 -05:00
temporary_channel_id : self . channel_id ,
funding_satoshis : self . channel_value_satoshis ,
2018-08-15 00:59:42 +09:00
push_msat : self . channel_value_satoshis * 1000 - self . value_to_self_msat ,
2020-06-08 20:47:55 -04:00
dust_limit_satoshis : self . holder_dust_limit_satoshis ,
2021-11-09 21:12:30 +00:00
max_htlc_value_in_flight_msat : self . holder_max_htlc_value_in_flight_msat ,
channel_reserve_satoshis : self . holder_selected_channel_reserve_satoshis ,
2020-06-08 20:47:55 -04:00
htlc_minimum_msat : self . holder_htlc_minimum_msat ,
2020-06-13 16:46:25 -04:00
feerate_per_kw : self . feerate_per_kw as u32 ,
2020-10-15 13:45:18 +02:00
to_self_delay : self . get_holder_selected_contest_delay ( ) ,
2017-12-25 01:05:27 -05:00
max_accepted_htlcs : OUR_MAX_HTLCS ,
2020-06-08 20:47:55 -04:00
funding_pubkey : keys . funding_pubkey ,
revocation_basepoint : keys . revocation_basepoint ,
payment_point : keys . payment_point ,
delayed_payment_basepoint : keys . delayed_payment_basepoint ,
htlc_basepoint : keys . htlc_basepoint ,
2020-07-11 03:19:43 -07:00
first_per_commitment_point ,
2018-10-31 14:51:39 -04:00
channel_flags : if self . config . announced_channel { 1 } else { 0 } ,
2021-07-26 14:04:44 -04:00
shutdown_scriptpubkey : OptionalField ::Present ( match & self . shutdown_scriptpubkey {
Some ( script ) = > script . clone ( ) . into_inner ( ) ,
None = > Builder ::new ( ) . into_script ( ) ,
} ) ,
2021-09-17 17:32:36 +00:00
channel_type : Some ( self . channel_type . clone ( ) ) ,
2018-09-15 07:26:03 +09:00
}
2017-12-25 01:05:27 -05:00
}
2022-01-26 00:21:22 +01:00
pub fn inbound_is_awaiting_accept ( & self ) -> bool {
self . inbound_awaiting_accept
}
2022-02-02 23:01:05 +00:00
/// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
pub fn set_0conf ( & mut self ) {
assert! ( self . inbound_awaiting_accept ) ;
self . minimum_depth = Some ( 0 ) ;
}
2022-01-26 00:21:22 +01:00
/// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
/// should be sent back to the counterparty node.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
2022-03-24 02:00:31 +02:00
pub fn accept_inbound_channel ( & mut self , user_id : u64 ) -> msgs ::AcceptChannel {
2020-10-15 13:45:18 +02:00
if self . is_outbound ( ) {
2017-12-25 01:05:27 -05:00
panic! ( " Tried to send accept_channel for an outbound channel? " ) ;
}
if self . channel_state ! = ( ChannelState ::OurInitSent as u32 ) | ( ChannelState ::TheirInitSent as u32 ) {
panic! ( " Tried to send accept_channel after channel had moved forward " ) ;
}
2020-06-08 20:47:55 -04:00
if self . cur_holder_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER {
2017-12-25 01:05:27 -05:00
panic! ( " Tried to send an accept_channel for a channel that has already advanced " ) ;
}
2022-01-26 00:21:22 +01:00
if ! self . inbound_awaiting_accept {
panic! ( " The inbound channel has already been accepted " ) ;
}
2022-03-24 02:00:31 +02:00
self . user_id = user_id ;
2022-01-26 00:21:22 +01:00
self . inbound_awaiting_accept = false ;
self . generate_accept_channel_message ( )
}
2017-12-25 01:05:27 -05:00
2022-01-26 00:21:22 +01:00
/// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
/// inbound channel. If the intention is to accept an inbound channel, use
/// [`Channel::accept_inbound_channel`] instead.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
fn generate_accept_channel_message ( & self ) -> msgs ::AcceptChannel {
2021-02-20 10:05:55 -05:00
let first_per_commitment_point = self . holder_signer . get_per_commitment_point ( self . cur_holder_commitment_transaction_number , & self . secp_ctx ) ;
2020-10-15 13:45:18 +02:00
let keys = self . get_holder_pubkeys ( ) ;
2017-12-25 01:05:27 -05:00
2018-09-04 19:28:06 -04:00
msgs ::AcceptChannel {
2017-12-25 01:05:27 -05:00
temporary_channel_id : self . channel_id ,
2020-06-08 20:47:55 -04:00
dust_limit_satoshis : self . holder_dust_limit_satoshis ,
2021-11-09 21:12:30 +00:00
max_htlc_value_in_flight_msat : self . holder_max_htlc_value_in_flight_msat ,
channel_reserve_satoshis : self . holder_selected_channel_reserve_satoshis ,
2020-06-08 20:47:55 -04:00
htlc_minimum_msat : self . holder_htlc_minimum_msat ,
2021-07-03 15:27:12 +00:00
minimum_depth : self . minimum_depth . unwrap ( ) ,
2020-10-15 13:45:18 +02:00
to_self_delay : self . get_holder_selected_contest_delay ( ) ,
2017-12-25 01:05:27 -05:00
max_accepted_htlcs : OUR_MAX_HTLCS ,
2020-06-08 20:47:55 -04:00
funding_pubkey : keys . funding_pubkey ,
revocation_basepoint : keys . revocation_basepoint ,
payment_point : keys . payment_point ,
delayed_payment_basepoint : keys . delayed_payment_basepoint ,
htlc_basepoint : keys . htlc_basepoint ,
2020-07-11 03:19:43 -07:00
first_per_commitment_point ,
2021-07-26 14:04:44 -04:00
shutdown_scriptpubkey : OptionalField ::Present ( match & self . shutdown_scriptpubkey {
Some ( script ) = > script . clone ( ) . into_inner ( ) ,
None = > Builder ::new ( ) . into_script ( ) ,
} ) ,
2022-02-16 21:34:16 +00:00
channel_type : Some ( self . channel_type . clone ( ) ) ,
2018-09-04 19:28:06 -04:00
}
2017-12-25 01:05:27 -05:00
}
2022-02-08 00:54:41 +01:00
/// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
/// inbound channel without accepting it.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
#[ cfg(test) ]
pub fn get_accept_channel_message ( & self ) -> msgs ::AcceptChannel {
self . generate_accept_channel_message ( )
}
2018-11-22 19:38:28 -05:00
/// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
2020-03-02 12:55:53 -05:00
fn get_outbound_funding_created_signature < L : Deref > ( & mut self , logger : & L ) -> Result < Signature , ChannelError > where L ::Target : Logger {
2020-06-08 20:47:55 -04:00
let counterparty_keys = self . build_remote_transaction_keys ( ) ? ;
2021-11-18 21:23:41 -05:00
let counterparty_initial_commitment_tx = self . build_commitment_transaction ( self . cur_counterparty_commitment_transaction_number , & counterparty_keys , false , false , logger ) . tx ;
2022-01-19 12:19:27 +01:00
Ok ( self . holder_signer . sign_counterparty_commitment ( & counterparty_initial_commitment_tx , Vec ::new ( ) , & self . secp_ctx )
2020-07-13 13:16:32 +09:00
. map_err ( | _ | ChannelError ::Close ( " Failed to get signatures for new commitment_signed " . to_owned ( ) ) ) ? . 0 )
2017-12-25 01:05:27 -05:00
}
/// Updates channel state with knowledge of the funding transaction's txid/index, and generates
/// a funding_created message for the remote peer.
/// Panics if called at some time other than immediately after initial handshake, if called twice,
/// or if called on an inbound channel.
/// Note that channel_id changes during this call!
2018-02-27 23:38:52 +01:00
/// Do NOT broadcast the funding transaction until after a successful funding_signed call!
2018-11-22 19:38:28 -05:00
/// If an Err is returned, it is a ChannelError::Close.
2021-03-26 18:07:24 -04:00
pub fn get_outbound_funding_created < L : Deref > ( & mut self , funding_transaction : Transaction , funding_txo : OutPoint , logger : & L ) -> Result < msgs ::FundingCreated , ChannelError > where L ::Target : Logger {
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2017-12-25 01:05:27 -05:00
panic! ( " Tried to create outbound funding_created message on an inbound channel! " ) ;
}
if self . channel_state ! = ( ChannelState ::OurInitSent as u32 | ChannelState ::TheirInitSent as u32 ) {
panic! ( " Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice) " ) ;
}
2020-02-07 17:48:46 -05:00
if self . commitment_secrets . get_min_seen_secret ( ) ! = ( 1 < < 48 ) | |
2020-06-08 20:47:55 -04:00
self . cur_counterparty_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER | |
self . cur_holder_commitment_transaction_number ! = INITIAL_COMMITMENT_NUMBER {
2017-12-25 01:05:27 -05:00
panic! ( " Should not have advanced channel commitment tx numbers prior to funding_created " ) ;
}
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . funding_outpoint = Some ( funding_txo ) ;
2021-02-20 10:05:55 -05:00
self . holder_signer . ready_channel ( & self . channel_transaction_parameters ) ;
2020-10-15 13:45:18 +02:00
2020-06-08 20:47:55 -04:00
let signature = match self . get_outbound_funding_created_signature ( logger ) {
2018-04-24 00:19:52 -04:00
Ok ( res ) = > res ,
2017-12-25 01:05:27 -05:00
Err ( e ) = > {
2020-03-02 12:55:53 -05:00
log_error! ( logger , " Got bad signatures: {:?}! " , e ) ;
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . funding_outpoint = None ;
2017-12-25 01:05:27 -05:00
return Err ( e ) ;
}
} ;
let temporary_channel_id = self . channel_id ;
// Now that we're past error-generating stuff, update our local state:
2020-02-08 17:45:40 -05:00
2017-12-25 01:05:27 -05:00
self . channel_state = ChannelState ::FundingCreated as u32 ;
2018-06-27 09:11:58 -04:00
self . channel_id = funding_txo . to_channel_id ( ) ;
2021-03-26 18:07:24 -04:00
self . funding_transaction = Some ( funding_transaction ) ;
2017-12-25 01:05:27 -05:00
2020-04-18 16:35:01 -04:00
Ok ( msgs ::FundingCreated {
temporary_channel_id ,
2018-06-27 09:11:58 -04:00
funding_txid : funding_txo . txid ,
funding_output_index : funding_txo . index ,
2020-10-06 16:47:23 -07:00
signature
2020-04-18 16:35:01 -04:00
} )
2017-12-25 01:05:27 -05:00
}
2021-11-14 17:25:39 +00:00
/// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
2022-05-30 14:39:04 -07:00
/// announceable and available for use (have exchanged ChannelReady messages in both
2021-12-07 19:11:18 +00:00
/// directions). Should be used for both broadcasted announcements and in response to an
/// AnnouncementSignatures message from the remote peer.
2021-11-14 17:25:39 +00:00
///
2018-08-28 12:11:45 -04:00
/// Will only fail if we're not in a state where channel_announcement may be sent (including
/// closing).
2021-11-14 17:25:39 +00:00
///
2021-05-06 01:31:39 +00:00
/// This will only return ChannelError::Ignore upon failure.
2021-11-14 17:25:39 +00:00
fn get_channel_announcement ( & self , node_id : PublicKey , chain_hash : BlockHash ) -> Result < msgs ::UnsignedChannelAnnouncement , ChannelError > {
2018-10-31 14:51:39 -04:00
if ! self . config . announced_channel {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Ignore ( " Channel is not available for public announcements " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2021-11-18 21:54:10 +00:00
if ! self . is_usable ( ) {
return Err ( ChannelError ::Ignore ( " Cannot get a ChannelAnnouncement if the channel is not currently usable " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
let were_node_one = node_id . serialize ( ) [ .. ] < self . counterparty_node_id . serialize ( ) [ .. ] ;
2017-12-25 01:05:27 -05:00
let msg = msgs ::UnsignedChannelAnnouncement {
2020-04-15 17:16:45 -07:00
features : ChannelFeatures ::known ( ) ,
2020-10-06 16:47:23 -07:00
chain_hash ,
2017-12-25 01:05:27 -05:00
short_channel_id : self . get_short_channel_id ( ) . unwrap ( ) ,
2020-06-08 20:47:55 -04:00
node_id_1 : if were_node_one { node_id } else { self . get_counterparty_node_id ( ) } ,
node_id_2 : if were_node_one { self . get_counterparty_node_id ( ) } else { node_id } ,
2020-10-15 13:45:18 +02:00
bitcoin_key_1 : if were_node_one { self . get_holder_pubkeys ( ) . funding_pubkey } else { self . counterparty_funding_pubkey ( ) . clone ( ) } ,
bitcoin_key_2 : if were_node_one { self . counterparty_funding_pubkey ( ) . clone ( ) } else { self . get_holder_pubkeys ( ) . funding_pubkey } ,
2018-08-29 14:31:50 -04:00
excess_data : Vec ::new ( ) ,
2017-12-25 01:05:27 -05:00
} ;
2021-11-14 17:25:39 +00:00
Ok ( msg )
}
2021-12-07 19:11:18 +00:00
fn get_announcement_sigs < L : Deref > ( & mut self , node_pk : PublicKey , genesis_block_hash : BlockHash , best_block_height : u32 , logger : & L )
-> Option < msgs ::AnnouncementSignatures > where L ::Target : Logger {
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
if self . funding_tx_confirmation_height = = 0 | | self . funding_tx_confirmation_height + 5 > best_block_height {
2021-12-07 19:11:18 +00:00
return None ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
}
if ! self . is_usable ( ) {
2021-12-07 19:11:18 +00:00
return None ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
}
if self . channel_state & ChannelState ::PeerDisconnected as u32 ! = 0 {
2021-12-07 19:11:18 +00:00
log_trace! ( logger , " Cannot create an announcement_signatures as our peer is disconnected " ) ;
return None ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
}
if self . announcement_sigs_state ! = AnnouncementSigsState ::NotSent {
2021-12-07 19:11:18 +00:00
return None ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
}
2021-12-07 19:11:18 +00:00
log_trace! ( logger , " Creating an announcement_signatures message for channel {} " , log_bytes! ( self . channel_id ( ) ) ) ;
let announcement = match self . get_channel_announcement ( node_pk , genesis_block_hash ) {
Ok ( a ) = > a ,
Err ( _ ) = > {
log_trace! ( logger , " Cannot create an announcement_signatures as channel is not public. " ) ;
return None ;
}
} ;
let ( our_node_sig , our_bitcoin_sig ) = match self . holder_signer . sign_channel_announcement ( & announcement , & self . secp_ctx ) {
Err ( _ ) = > {
log_error! ( logger , " Signer rejected channel_announcement signing. Channel will not be announced! " ) ;
return None ;
} ,
Ok ( v ) = > v
} ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
self . announcement_sigs_state = AnnouncementSigsState ::MessageSent ;
2017-12-25 01:05:27 -05:00
2021-12-07 19:11:18 +00:00
Some ( msgs ::AnnouncementSignatures {
2021-11-14 17:25:39 +00:00
channel_id : self . channel_id ( ) ,
short_channel_id : self . get_short_channel_id ( ) . unwrap ( ) ,
node_signature : our_node_sig ,
bitcoin_signature : our_bitcoin_sig ,
} )
2017-12-25 01:05:27 -05:00
}
2021-05-06 01:31:39 +00:00
/// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
/// available.
2021-11-14 17:25:39 +00:00
fn sign_channel_announcement ( & self , our_node_id : PublicKey , announcement : msgs ::UnsignedChannelAnnouncement ) -> Result < msgs ::ChannelAnnouncement , ChannelError > {
2021-05-06 01:15:35 +00:00
if let Some ( ( their_node_sig , their_bitcoin_sig ) ) = self . announcement_sigs {
let were_node_one = announcement . node_id_1 = = our_node_id ;
2021-11-14 17:25:39 +00:00
let ( our_node_sig , our_bitcoin_sig ) = self . holder_signer . sign_channel_announcement ( & announcement , & self . secp_ctx )
. map_err ( | _ | ChannelError ::Ignore ( " Signer rejected channel_announcement " . to_owned ( ) ) ) ? ;
2021-05-06 01:15:35 +00:00
Ok ( msgs ::ChannelAnnouncement {
node_signature_1 : if were_node_one { our_node_sig } else { their_node_sig } ,
node_signature_2 : if were_node_one { their_node_sig } else { our_node_sig } ,
bitcoin_signature_1 : if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig } ,
bitcoin_signature_2 : if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig } ,
contents : announcement ,
} )
} else {
Err ( ChannelError ::Ignore ( " Attempted to sign channel announcement before we'd received announcement_signatures " . to_string ( ) ) )
}
}
/// Processes an incoming announcement_signatures message, providing a fully-signed
/// channel_announcement message which we can broadcast and storing our counterparty's
/// signatures for later reconstruction/rebroadcast of the channel_announcement.
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub fn announcement_signatures ( & mut self , our_node_id : PublicKey , chain_hash : BlockHash , best_block_height : u32 , msg : & msgs ::AnnouncementSignatures ) -> Result < msgs ::ChannelAnnouncement , ChannelError > {
2021-11-14 17:25:39 +00:00
let announcement = self . get_channel_announcement ( our_node_id . clone ( ) , chain_hash ) ? ;
2021-05-06 01:15:35 +00:00
let msghash = hash_to_message! ( & Sha256d ::hash ( & announcement . encode ( ) [ .. ] ) [ .. ] ) ;
2022-05-05 17:59:38 +02:00
if self . secp_ctx . verify_ecdsa ( & msghash , & msg . node_signature , & self . get_counterparty_node_id ( ) ) . is_err ( ) {
2021-05-06 01:15:35 +00:00
return Err ( ChannelError ::Close ( format! (
" Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?} " ,
& announcement , self . get_counterparty_node_id ( ) ) ) ) ;
}
2022-05-05 17:59:38 +02:00
if self . secp_ctx . verify_ecdsa ( & msghash , & msg . bitcoin_signature , self . counterparty_funding_pubkey ( ) ) . is_err ( ) {
2021-05-06 01:15:35 +00:00
return Err ( ChannelError ::Close ( format! (
" Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?}) " ,
& announcement , self . counterparty_funding_pubkey ( ) ) ) ) ;
}
self . announcement_sigs = Some ( ( msg . node_signature , msg . bitcoin_signature ) ) ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
if self . funding_tx_confirmation_height = = 0 | | self . funding_tx_confirmation_height + 5 > best_block_height {
return Err ( ChannelError ::Ignore (
" Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has " . to_owned ( ) ) ) ;
}
2021-05-06 01:15:35 +00:00
2021-11-14 17:25:39 +00:00
self . sign_channel_announcement ( our_node_id , announcement )
2021-05-06 01:15:35 +00:00
}
2021-05-06 01:31:39 +00:00
/// Gets a signed channel_announcement for this channel, if we previously received an
/// announcement_signatures from our counterparty.
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
pub fn get_signed_channel_announcement ( & self , our_node_id : PublicKey , chain_hash : BlockHash , best_block_height : u32 ) -> Option < msgs ::ChannelAnnouncement > {
if self . funding_tx_confirmation_height = = 0 | | self . funding_tx_confirmation_height + 5 > best_block_height {
return None ;
}
2021-11-14 17:25:39 +00:00
let announcement = match self . get_channel_announcement ( our_node_id . clone ( ) , chain_hash ) {
2021-05-06 01:31:39 +00:00
Ok ( res ) = > res ,
Err ( _ ) = > return None ,
} ;
2021-11-14 17:25:39 +00:00
match self . sign_channel_announcement ( our_node_id , announcement ) {
2021-05-06 01:31:39 +00:00
Ok ( res ) = > Some ( res ) ,
Err ( _ ) = > None ,
}
}
2018-09-08 16:02:46 -04:00
/// May panic if called on a channel that wasn't immediately-previously
/// self.remove_uncommitted_htlcs_and_mark_paused()'d
2020-03-02 12:55:53 -05:00
pub fn get_channel_reestablish < L : Deref > ( & self , logger : & L ) -> msgs ::ChannelReestablish where L ::Target : Logger {
2018-09-08 16:02:46 -04:00
assert_eq! ( self . channel_state & ChannelState ::PeerDisconnected as u32 , ChannelState ::PeerDisconnected as u32 ) ;
2020-06-08 20:47:55 -04:00
assert_ne! ( self . cur_counterparty_commitment_transaction_number , INITIAL_COMMITMENT_NUMBER ) ;
2020-05-02 22:00:08 -04:00
// Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
// current to_remote balances. However, it no longer has any use, and thus is now simply
// set to a dummy (but valid, as required by the spec) public key.
2022-02-17 19:29:59 +00:00
// fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
2020-05-02 22:00:08 -04:00
// branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
2022-02-17 19:29:59 +00:00
// valid, and valid in fuzzing mode's arbitrary validity criteria:
2020-05-02 22:00:08 -04:00
let mut pk = [ 2 ; 33 ] ; pk [ 1 ] = 0xff ;
let dummy_pubkey = PublicKey ::from_slice ( & pk ) . unwrap ( ) ;
2020-06-08 20:47:55 -04:00
let data_loss_protect = if self . cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
let remote_last_secret = self . commitment_secrets . get_secret ( self . cur_counterparty_commitment_transaction_number + 2 ) . unwrap ( ) ;
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {} " , log_bytes! ( remote_last_secret ) , log_bytes! ( self . channel_id ( ) ) ) ;
2019-07-10 15:48:23 -04:00
OptionalField ::Present ( DataLossProtect {
your_last_per_commitment_secret : remote_last_secret ,
2020-05-02 22:00:08 -04:00
my_current_per_commitment_point : dummy_pubkey
2019-07-10 15:48:23 -04:00
} )
} else {
2021-06-22 03:35:52 +00:00
log_info! ( logger , " Sending a data_loss_protect with no previous remote per_commitment_secret for channel {} " , log_bytes! ( self . channel_id ( ) ) ) ;
2019-07-10 15:48:23 -04:00
OptionalField ::Present ( DataLossProtect {
your_last_per_commitment_secret : [ 0 ; 32 ] ,
2020-05-02 22:00:08 -04:00
my_current_per_commitment_point : dummy_pubkey ,
2019-07-10 15:48:23 -04:00
} )
} ;
2018-09-08 16:02:46 -04:00
msgs ::ChannelReestablish {
channel_id : self . channel_id ( ) ,
2018-11-26 18:31:51 -05:00
// The protocol has two different commitment number concepts - the "commitment
// transaction number", which starts from 0 and counts up, and the "revocation key
// index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
// commitment transaction numbers by the index which will be used to reveal the
// revocation key for that commitment transaction, which means we have to convert them
// to protocol-level commitment numbers here...
// next_local_commitment_number is the next commitment_signed number we expect to
// receive (indicating if they need to resend one that we missed).
2020-06-08 20:47:55 -04:00
next_local_commitment_number : INITIAL_COMMITMENT_NUMBER - self . cur_holder_commitment_transaction_number ,
2018-11-26 18:31:51 -05:00
// We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
// receive, however we track it by the next commitment number for a remote transaction
// (which is one further, as they always revoke previous commitment transaction, not
// the one we send) so we have to decrement by 1. Note that if
2020-06-08 20:47:55 -04:00
// cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
2018-11-26 18:31:51 -05:00
// dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
// overflow here.
2020-06-08 20:47:55 -04:00
next_remote_commitment_number : INITIAL_COMMITMENT_NUMBER - self . cur_counterparty_commitment_transaction_number - 1 ,
2019-07-10 15:48:23 -04:00
data_loss_protect ,
2018-09-08 16:02:46 -04:00
}
}
2017-12-25 01:05:27 -05:00
// Send stuff to our remote peers:
/// Adds a pending outbound HTLC to this channel, note that you probably want
/// send_htlc_and_commit instead cause you'll want both messages at once.
2021-06-16 22:57:38 +00:00
///
/// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
/// the wire:
/// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
/// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
/// awaiting ACK.
/// * In cases where we're marked MonitorUpdateFailed, we cannot commit to a new state as we
/// may not yet have sent the previous commitment update messages and will need to regenerate
/// them.
///
/// You MUST call send_commitment prior to calling any other methods on this Channel!
///
2019-01-24 16:41:51 +02:00
/// If an Err is returned, it's a ChannelError::Ignore!
2021-08-21 18:05:51 -04:00
pub fn send_htlc < L : Deref > ( & mut self , amount_msat : u64 , payment_hash : PaymentHash , cltv_expiry : u32 , source : HTLCSource , onion_routing_packet : msgs ::OnionPacket , logger : & L ) -> Result < Option < msgs ::UpdateAddHTLC > , ChannelError > where L ::Target : Logger {
2018-03-26 16:48:18 -04:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Ignore ( " Cannot send HTLC until channel is fully established and we haven't started shutting down " . to_owned ( ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-07-13 13:16:32 +09:00
let channel_total_msat = self . channel_value_satoshis * 1000 ;
if amount_msat > channel_total_msat {
return Err ( ChannelError ::Ignore ( format! ( " Cannot send amount {} , because it is more than the total value of the channel {} " , amount_msat , channel_total_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-02-20 19:20:29 -05:00
if amount_msat = = 0 {
2020-07-13 13:16:32 +09:00
return Err ( ChannelError ::Ignore ( " Cannot send 0-msat HTLC " . to_owned ( ) ) ) ;
2020-02-20 19:20:29 -05:00
}
2020-06-08 20:47:55 -04:00
if amount_msat < self . counterparty_htlc_minimum_msat {
return Err ( ChannelError ::Ignore ( format! ( " Cannot send less than their minimum HTLC value ( {} ) " , self . counterparty_htlc_minimum_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
2021-06-16 22:57:38 +00:00
if ( self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) ) ! = 0 {
2018-09-08 16:01:29 -04:00
// Note that this should never really happen, if we're !is_live() on receipt of an
// incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
// the user to send directly into a !is_live() channel. However, if we
// disconnected during the time the previous hop was doing the commitment dance we may
// end up getting here after the forwarding delay. In any case, returning an
// IgnoreError will get ChannelManager to do the right thing and fail backwards now.
2021-06-16 22:57:38 +00:00
return Err ( ChannelError ::Ignore ( " Cannot send an HTLC while disconnected from channel counterparty " . to_owned ( ) ) ) ;
2018-09-08 16:01:29 -04:00
}
2021-08-21 18:52:05 -04:00
let inbound_stats = self . get_inbound_pending_htlc_stats ( None ) ;
let outbound_stats = self . get_outbound_pending_htlc_stats ( None ) ;
2021-07-28 19:54:20 -04:00
if outbound_stats . pending_htlcs + 1 > self . counterparty_max_accepted_htlcs as u32 {
2020-06-08 20:47:55 -04:00
return Err ( ChannelError ::Ignore ( format! ( " Cannot push more than their max accepted HTLCs ( {} ) " , self . counterparty_max_accepted_htlcs ) ) ) ;
2017-12-25 01:05:27 -05:00
}
// Check their_max_htlc_value_in_flight_msat
2021-07-28 19:54:20 -04:00
if outbound_stats . pending_htlcs_value_msat + amount_msat > self . counterparty_max_htlc_value_in_flight_msat {
2020-06-08 20:47:55 -04:00
return Err ( ChannelError ::Ignore ( format! ( " Cannot send value that would put us over the max HTLC value in flight our peer will accept ( {} ) " , self . counterparty_max_htlc_value_in_flight_msat ) ) ) ;
2018-09-19 20:48:03 +09:00
}
2021-08-21 18:05:51 -04:00
let keys = self . build_holder_transaction_keys ( self . cur_holder_commitment_transaction_number ) ? ;
2021-11-18 21:23:41 -05:00
let commitment_stats = self . build_commitment_transaction ( self . cur_holder_commitment_transaction_number , & keys , true , true , logger ) ;
2020-10-15 13:45:18 +02:00
if ! self . is_outbound ( ) {
2020-05-06 19:18:23 -04:00
// Check that we won't violate the remote channel reserve by adding this HTLC.
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( amount_msat , HTLCInitiator ::LocalOffered ) ;
let counterparty_commit_tx_fee_msat = self . next_remote_commit_tx_fee_msat ( htlc_candidate , None ) ;
2021-11-09 21:12:30 +00:00
let holder_selected_chan_reserve_msat = self . holder_selected_channel_reserve_satoshis * 1000 ;
2021-11-18 21:23:41 -05:00
if commitment_stats . remote_balance_msat < counterparty_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
2020-06-08 20:47:55 -04:00
return Err ( ChannelError ::Ignore ( " Cannot send value that would put counterparty balance under holder-announced channel reserve value " . to_owned ( ) ) ) ;
2020-05-06 19:18:23 -04:00
}
}
2022-01-05 13:40:08 -08:00
let exposure_dust_limit_success_sats = ( self . get_dust_buffer_feerate ( None ) as u64 * htlc_success_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . counterparty_dust_limit_satoshis ;
2021-07-28 19:55:11 -04:00
if amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_counterparty_dust_htlc_exposure_msat = inbound_stats . on_counterparty_tx_dust_exposure_msat + outbound_stats . on_counterparty_tx_dust_exposure_msat + amount_msat ;
if on_counterparty_dust_htlc_exposure_msat > self . get_max_dust_htlc_exposure_msat ( ) {
return Err ( ChannelError ::Ignore ( format! ( " Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx " ,
on_counterparty_dust_htlc_exposure_msat , self . get_max_dust_htlc_exposure_msat ( ) ) ) ) ;
}
}
2022-01-05 13:40:08 -08:00
let exposure_dust_limit_timeout_sats = ( self . get_dust_buffer_feerate ( None ) as u64 * htlc_timeout_tx_weight ( self . opt_anchors ( ) ) / 1000 ) + self . holder_dust_limit_satoshis ;
2021-07-28 19:55:11 -04:00
if amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_holder_dust_htlc_exposure_msat = inbound_stats . on_holder_tx_dust_exposure_msat + outbound_stats . on_holder_tx_dust_exposure_msat + amount_msat ;
if on_holder_dust_htlc_exposure_msat > self . get_max_dust_htlc_exposure_msat ( ) {
return Err ( ChannelError ::Ignore ( format! ( " Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx " ,
on_holder_dust_htlc_exposure_msat , self . get_max_dust_htlc_exposure_msat ( ) ) ) ) ;
}
}
2021-11-18 21:23:41 -05:00
let holder_balance_msat = commitment_stats . local_balance_msat - outbound_stats . holding_cell_msat ;
2021-08-21 18:05:51 -04:00
if holder_balance_msat < amount_msat {
return Err ( ChannelError ::Ignore ( format! ( " Cannot send value that would overdraw remaining funds. Amount: {} , pending value to self {} " , amount_msat , holder_balance_msat ) ) ) ;
2020-05-06 19:18:23 -04:00
}
2020-12-04 16:05:10 -05:00
// `2 *` and extra HTLC are for the fee spike buffer.
2020-10-15 13:45:18 +02:00
let commit_tx_fee_msat = if self . is_outbound ( ) {
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( amount_msat , HTLCInitiator ::LocalOffered ) ;
2021-06-30 03:09:04 +00:00
FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self . next_local_commit_tx_fee_msat ( htlc_candidate , Some ( ( ) ) )
2020-05-06 19:18:23 -04:00
} else { 0 } ;
2021-08-21 18:05:51 -04:00
if holder_balance_msat - amount_msat < commit_tx_fee_msat {
return Err ( ChannelError ::Ignore ( format! ( " Cannot send value that would not leave enough to pay for fees. Pending value to self: {} . local_commit_tx_fee {} " , holder_balance_msat , commit_tx_fee_msat ) ) ) ;
2020-05-06 19:18:23 -04:00
}
2020-06-08 20:47:55 -04:00
// Check self.counterparty_selected_channel_reserve_satoshis (the amount we must keep as
2020-05-01 18:39:18 -04:00
// reserve for the remote to have something to claim if we misbehave)
2021-07-03 15:27:12 +00:00
let chan_reserve_msat = self . counterparty_selected_channel_reserve_satoshis . unwrap ( ) * 1000 ;
2021-08-21 18:05:51 -04:00
if holder_balance_msat - amount_msat - commit_tx_fee_msat < chan_reserve_msat {
2020-06-08 20:47:55 -04:00
return Err ( ChannelError ::Ignore ( format! ( " Cannot send value that would put our balance under counterparty-announced channel reserve value ( {} ) " , chan_reserve_msat ) ) ) ;
2017-12-25 01:05:27 -05:00
}
// Now update local state:
2021-06-16 22:57:38 +00:00
if ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ) ! = 0 {
2018-04-04 11:56:54 -04:00
self . holding_cell_htlc_updates . push ( HTLCUpdateAwaitingACK ::AddHTLC {
2020-05-06 19:18:23 -04:00
amount_msat ,
payment_hash ,
cltv_expiry ,
2018-09-11 14:20:40 -04:00
source ,
2020-05-06 19:18:23 -04:00
onion_routing_packet ,
2017-12-25 01:05:27 -05:00
} ) ;
return Ok ( None ) ;
}
2018-09-09 12:53:57 -04:00
self . pending_outbound_htlcs . push ( OutboundHTLCOutput {
2020-06-08 20:47:55 -04:00
htlc_id : self . next_holder_htlc_id ,
2020-05-06 19:18:23 -04:00
amount_msat ,
2017-12-25 01:05:27 -05:00
payment_hash : payment_hash . clone ( ) ,
2020-05-06 19:18:23 -04:00
cltv_expiry ,
2018-10-15 15:11:02 -04:00
state : OutboundHTLCState ::LocalAnnounced ( Box ::new ( onion_routing_packet . clone ( ) ) ) ,
2018-09-11 14:20:40 -04:00
source ,
2017-12-25 01:05:27 -05:00
} ) ;
let res = msgs ::UpdateAddHTLC {
channel_id : self . channel_id ,
2020-06-08 20:47:55 -04:00
htlc_id : self . next_holder_htlc_id ,
2020-05-06 19:18:23 -04:00
amount_msat ,
payment_hash ,
cltv_expiry ,
onion_routing_packet ,
2017-12-25 01:05:27 -05:00
} ;
2020-06-08 20:47:55 -04:00
self . next_holder_htlc_id + = 1 ;
2017-12-25 01:05:27 -05:00
Ok ( Some ( res ) )
}
/// Creates a signed commitment transaction to send to the remote peer.
2018-11-22 20:50:13 -05:00
/// Always returns a ChannelError::Close if an immediately-preceding (read: the
2018-09-05 14:08:38 -04:00
/// last call to this Channel) send_htlc returned Ok(Some(_)) and there is an Err.
2018-09-08 15:59:18 -04:00
/// May panic if called except immediately after a successful, Ok(Some(_))-returning send_htlc.
2020-03-02 12:55:53 -05:00
pub fn send_commitment < L : Deref > ( & mut self , logger : & L ) -> Result < ( msgs ::CommitmentSigned , ChannelMonitorUpdate ) , ChannelError > where L ::Target : Logger {
2017-12-25 01:05:27 -05:00
if ( self . channel_state & ( ChannelState ::ChannelFunded as u32 ) ) ! = ( ChannelState ::ChannelFunded as u32 ) {
2018-09-08 15:59:18 -04:00
panic! ( " Cannot create commitment tx until channel is fully established " ) ;
2017-12-25 01:05:27 -05:00
}
2018-03-25 15:48:12 -04:00
if ( self . channel_state & ( ChannelState ::AwaitingRemoteRevoke as u32 ) ) = = ( ChannelState ::AwaitingRemoteRevoke as u32 ) {
2018-09-08 15:59:18 -04:00
panic! ( " Cannot create commitment tx until remote revokes their previous commitment " ) ;
2018-03-25 15:48:12 -04:00
}
2018-09-08 16:01:29 -04:00
if ( self . channel_state & ( ChannelState ::PeerDisconnected as u32 ) ) = = ( ChannelState ::PeerDisconnected as u32 ) {
panic! ( " Cannot create commitment tx while disconnected, as send_htlc will have returned an Err so a send_commitment precondition has been violated " ) ;
}
2018-10-27 01:45:15 -04:00
if ( self . channel_state & ( ChannelState ::MonitorUpdateFailed as u32 ) ) = = ( ChannelState ::MonitorUpdateFailed as u32 ) {
2018-10-17 18:19:55 -04:00
panic! ( " Cannot create commitment tx while awaiting monitor update unfreeze, as send_htlc will have returned an Err so a send_commitment precondition has been violated " ) ;
}
2021-07-12 15:39:27 +00:00
let mut have_updates = self . is_outbound ( ) & & self . pending_update_fee . is_some ( ) ;
2018-09-09 12:53:57 -04:00
for htlc in self . pending_outbound_htlcs . iter ( ) {
2018-10-15 15:11:02 -04:00
if let OutboundHTLCState ::LocalAnnounced ( _ ) = htlc . state {
2018-03-25 15:48:12 -04:00
have_updates = true ;
}
if have_updates { break ; }
}
2018-12-20 22:50:25 -05:00
for htlc in self . pending_inbound_htlcs . iter ( ) {
if let InboundHTLCState ::LocalRemoved ( _ ) = htlc . state {
have_updates = true ;
}
if have_updates { break ; }
}
2018-03-25 15:48:12 -04:00
if ! have_updates {
2018-09-08 15:59:18 -04:00
panic! ( " Cannot create commitment tx until we have some updates to send " ) ;
2018-03-25 15:48:12 -04:00
}
2020-03-02 12:55:53 -05:00
self . send_commitment_no_status_check ( logger )
2018-04-04 11:56:54 -04:00
}
/// Only fails in case of bad keys
2020-03-02 12:55:53 -05:00
fn send_commitment_no_status_check < L : Deref > ( & mut self , logger : & L ) -> Result < ( msgs ::CommitmentSigned , ChannelMonitorUpdate ) , ChannelError > where L ::Target : Logger {
2021-07-13 16:07:01 +00:00
log_trace! ( logger , " Updating HTLC state for a newly-sent commitment_signed... " ) ;
2018-04-04 11:56:54 -04:00
// We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
// fail to generate this, we still are at least at a position where upgrading their status
// is acceptable.
2018-09-09 12:53:57 -04:00
for htlc in self . pending_inbound_htlcs . iter_mut ( ) {
2018-10-15 14:38:19 -04:00
let new_state = if let & InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( ref forward_info ) = & htlc . state {
Some ( InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( forward_info . clone ( ) ) )
} else { None } ;
if let Some ( state ) = new_state {
2021-07-13 16:07:01 +00:00
log_trace! ( logger , " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2018-10-15 14:38:19 -04:00
htlc . state = state ;
2018-09-09 12:53:57 -04:00
}
}
for htlc in self . pending_outbound_htlcs . iter_mut ( ) {
2022-01-18 14:17:52 +01:00
if let & mut OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( ref mut outcome ) = & mut htlc . state {
2021-07-13 16:07:01 +00:00
log_trace! ( logger , " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke " , log_bytes! ( htlc . payment_hash . 0 ) ) ;
2022-01-18 14:17:52 +01:00
// Grab the preimage, if it exists, instead of cloning
let mut reason = OutboundHTLCOutcome ::Success ( None ) ;
mem ::swap ( outcome , & mut reason ) ;
htlc . state = OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( reason ) ;
2018-04-04 11:56:54 -04:00
}
}
2021-07-12 15:39:27 +00:00
if let Some ( ( feerate , update_state ) ) = self . pending_update_fee {
if update_state = = FeeUpdateState ::AwaitingRemoteRevokeToAnnounce {
debug_assert! ( ! self . is_outbound ( ) ) ;
2021-07-13 16:07:01 +00:00
log_trace! ( logger , " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed " , feerate ) ;
2021-07-12 15:39:27 +00:00
self . feerate_per_kw = feerate ;
self . pending_update_fee = None ;
}
}
2019-03-05 15:36:11 -05:00
self . resend_order = RAACommitmentOrder ::RevokeAndACKFirst ;
2018-04-04 11:56:54 -04:00
2020-10-15 13:45:18 +02:00
let ( res , counterparty_commitment_txid , htlcs ) = match self . send_commitment_no_state_update ( logger ) {
2020-06-08 20:47:55 -04:00
Ok ( ( res , ( counterparty_commitment_tx , mut htlcs ) ) ) = > {
2018-09-08 16:00:30 -04:00
// Update state now that we've passed all the can-fail calls...
2020-02-07 20:08:31 -05:00
let htlcs_no_ref : Vec < ( HTLCOutputInCommitment , Option < Box < HTLCSource > > ) > =
htlcs . drain ( .. ) . map ( | ( htlc , htlc_source ) | ( htlc , htlc_source . map ( | source_ref | Box ::new ( source_ref . clone ( ) ) ) ) ) . collect ( ) ;
2020-06-08 20:47:55 -04:00
( res , counterparty_commitment_tx , htlcs_no_ref )
2018-09-08 16:00:30 -04:00
} ,
2018-11-30 16:06:28 -05:00
Err ( e ) = > return Err ( e ) ,
} ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
if self . announcement_sigs_state = = AnnouncementSigsState ::MessageSent {
self . announcement_sigs_state = AnnouncementSigsState ::Committed ;
}
2020-02-07 20:08:31 -05:00
self . latest_monitor_update_id + = 1 ;
let monitor_update = ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
2020-09-06 19:51:21 -04:00
updates : vec ! [ ChannelMonitorUpdateStep ::LatestCounterpartyCommitmentTXInfo {
2020-10-15 13:45:18 +02:00
commitment_txid : counterparty_commitment_txid ,
2020-02-07 20:08:31 -05:00
htlc_outputs : htlcs . clone ( ) ,
2020-06-08 20:47:55 -04:00
commitment_number : self . cur_counterparty_commitment_transaction_number ,
2022-05-24 22:02:15 +00:00
their_per_commitment_point : self . counterparty_cur_commitment_point . unwrap ( )
2020-02-07 20:08:31 -05:00
} ]
} ;
2018-11-30 16:06:28 -05:00
self . channel_state | = ChannelState ::AwaitingRemoteRevoke as u32 ;
2020-02-07 20:08:31 -05:00
Ok ( ( res , monitor_update ) )
2018-09-08 16:00:30 -04:00
}
/// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
/// when we shouldn't change HTLC/channel state.
2020-10-15 13:45:18 +02:00
fn send_commitment_no_state_update < L : Deref > ( & self , logger : & L ) -> Result < ( msgs ::CommitmentSigned , ( Txid , Vec < ( HTLCOutputInCommitment , Option < & HTLCSource > ) > ) ) , ChannelError > where L ::Target : Logger {
2020-06-08 20:47:55 -04:00
let counterparty_keys = self . build_remote_transaction_keys ( ) ? ;
2021-11-18 21:23:41 -05:00
let commitment_stats = self . build_commitment_transaction ( self . cur_counterparty_commitment_transaction_number , & counterparty_keys , false , true , logger ) ;
let counterparty_commitment_txid = commitment_stats . tx . trust ( ) . txid ( ) ;
2019-11-27 16:08:48 -05:00
let ( signature , htlc_signatures ) ;
2017-12-25 01:05:27 -05:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
{
if ! self . is_outbound ( ) {
let projected_commit_tx_info = self . next_remote_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) . take ( ) ;
* self . next_local_commitment_tx_fee_info_cached . lock ( ) . unwrap ( ) = None ;
if let Some ( info ) = projected_commit_tx_info {
let total_pending_htlcs = self . pending_inbound_htlcs . len ( ) + self . pending_outbound_htlcs . len ( ) ;
if info . total_pending_htlcs = = total_pending_htlcs
& & info . next_holder_htlc_id = = self . next_holder_htlc_id
& & info . next_counterparty_htlc_id = = self . next_counterparty_htlc_id
& & info . feerate = = self . feerate_per_kw {
2022-01-04 15:54:54 -08:00
let actual_fee = Self ::commit_tx_fee_msat ( self . feerate_per_kw , commitment_stats . num_nondust_htlcs , self . opt_anchors ( ) ) ;
2021-02-05 13:09:23 -05:00
assert_eq! ( actual_fee , info . fee ) ;
}
}
}
}
2019-11-27 16:08:48 -05:00
{
2021-11-18 21:23:41 -05:00
let mut htlcs = Vec ::with_capacity ( commitment_stats . htlcs_included . len ( ) ) ;
for & ( ref htlc , _ ) in commitment_stats . htlcs_included . iter ( ) {
2019-11-27 16:08:48 -05:00
htlcs . push ( htlc ) ;
}
2022-01-19 12:19:27 +01:00
let res = self . holder_signer . sign_counterparty_commitment ( & commitment_stats . tx , commitment_stats . preimages , & self . secp_ctx )
2020-07-13 13:16:32 +09:00
. map_err ( | _ | ChannelError ::Close ( " Failed to get signatures for new commitment_signed " . to_owned ( ) ) ) ? ;
2019-11-27 16:08:48 -05:00
signature = res . 0 ;
htlc_signatures = res . 1 ;
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {} " ,
2021-11-18 21:23:41 -05:00
encode ::serialize_hex ( & commitment_stats . tx . trust ( ) . built_transaction ( ) . transaction ) ,
2021-06-22 03:35:52 +00:00
& counterparty_commitment_txid , encode ::serialize_hex ( & self . get_funding_redeemscript ( ) ) ,
log_bytes! ( signature . serialize_compact ( ) [ .. ] ) , log_bytes! ( self . channel_id ( ) ) ) ;
2019-11-27 16:08:48 -05:00
for ( ref htlc_sig , ref htlc ) in htlc_signatures . iter ( ) . zip ( htlcs ) {
2021-06-22 03:35:52 +00:00
log_trace! ( logger , " Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {} " ,
2021-11-15 19:39:39 -08:00
encode ::serialize_hex ( & chan_utils ::build_htlc_transaction ( & counterparty_commitment_txid , commitment_stats . feerate_per_kw , self . get_holder_selected_contest_delay ( ) , htlc , self . opt_anchors ( ) , & counterparty_keys . broadcaster_delayed_payment_key , & counterparty_keys . revocation_key ) ) ,
2021-11-15 18:03:46 -08:00
encode ::serialize_hex ( & chan_utils ::get_htlc_redeemscript ( & htlc , self . opt_anchors ( ) , & counterparty_keys ) ) ,
2020-06-08 20:47:55 -04:00
log_bytes! ( counterparty_keys . broadcaster_htlc_key . serialize ( ) ) ,
2021-06-22 03:35:52 +00:00
log_bytes! ( htlc_sig . serialize_compact ( ) [ .. ] ) , log_bytes! ( self . channel_id ( ) ) ) ;
2019-01-06 17:02:53 -05:00
}
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
Ok ( ( msgs ::CommitmentSigned {
2017-12-25 01:05:27 -05:00
channel_id : self . channel_id ,
2019-11-27 16:08:48 -05:00
signature ,
htlc_signatures ,
2021-11-18 21:23:41 -05:00
} , ( counterparty_commitment_txid , commitment_stats . htlcs_included ) ) )
2017-12-25 01:05:27 -05:00
}
/// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction
/// to send to the remote peer in one go.
/// Shorthand for calling send_htlc() followed by send_commitment(), see docs on those for
/// more info.
2020-03-02 12:55:53 -05:00
pub fn send_htlc_and_commit < L : Deref > ( & mut self , amount_msat : u64 , payment_hash : PaymentHash , cltv_expiry : u32 , source : HTLCSource , onion_routing_packet : msgs ::OnionPacket , logger : & L ) -> Result < Option < ( msgs ::UpdateAddHTLC , msgs ::CommitmentSigned , ChannelMonitorUpdate ) > , ChannelError > where L ::Target : Logger {
2021-08-21 18:05:51 -04:00
match self . send_htlc ( amount_msat , payment_hash , cltv_expiry , source , onion_routing_packet , logger ) ? {
2018-04-24 00:19:52 -04:00
Some ( update_add_htlc ) = > {
2020-03-02 12:55:53 -05:00
let ( commitment_signed , monitor_update ) = self . send_commitment_no_status_check ( logger ) ? ;
2018-04-24 00:19:52 -04:00
Ok ( Some ( ( update_add_htlc , commitment_signed , monitor_update ) ) )
} ,
2017-12-25 01:05:27 -05:00
None = > Ok ( None )
}
}
2018-03-27 11:16:53 -04:00
2021-03-12 16:02:17 -05:00
/// Get forwarding information for the counterparty.
pub fn counterparty_forwarding_info ( & self ) -> Option < CounterpartyForwardingInfo > {
self . counterparty_forwarding_info . clone ( )
}
2021-03-12 15:25:56 -05:00
pub fn channel_update ( & mut self , msg : & msgs ::ChannelUpdate ) -> Result < ( ) , ChannelError > {
2021-07-03 15:27:12 +00:00
if msg . contents . htlc_minimum_msat > = self . channel_value_satoshis * 1000 {
2021-03-12 15:25:56 -05:00
return Err ( ChannelError ::Close ( " Minimum htlc value is greater than channel value " . to_string ( ) ) ) ;
}
self . counterparty_forwarding_info = Some ( CounterpartyForwardingInfo {
fee_base_msat : msg . contents . fee_base_msat ,
fee_proportional_millionths : msg . contents . fee_proportional_millionths ,
cltv_expiry_delta : msg . contents . cltv_expiry_delta
} ) ;
Ok ( ( ) )
}
2018-03-27 11:16:53 -04:00
/// Begins the shutdown process, getting a message for the remote peer and returning all
/// holding cell HTLCs for payment failure.
2021-07-20 03:19:01 +00:00
pub fn get_shutdown < K : Deref > ( & mut self , keys_provider : & K , their_features : & InitFeatures , target_feerate_sats_per_kw : Option < u32 > )
-> Result < ( msgs ::Shutdown , Option < ChannelMonitorUpdate > , Vec < ( HTLCSource , PaymentHash ) > ) , APIError >
2021-07-26 14:04:44 -04:00
where K ::Target : KeysInterface < Signer = Signer > {
2018-09-09 12:53:57 -04:00
for htlc in self . pending_outbound_htlcs . iter ( ) {
2018-10-15 15:11:02 -04:00
if let OutboundHTLCState ::LocalAnnounced ( _ ) = htlc . state {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::APIMisuseError { err : " Cannot begin shutdown with pending HTLCs. Process pending events first " . to_owned ( ) } ) ;
2018-03-27 11:16:53 -04:00
}
}
if self . channel_state & BOTH_SIDES_SHUTDOWN_MASK ! = 0 {
2018-09-27 12:58:05 +09:00
if ( self . channel_state & ChannelState ::LocalShutdownSent as u32 ) = = ChannelState ::LocalShutdownSent as u32 {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::APIMisuseError { err : " Shutdown already in progress " . to_owned ( ) } ) ;
2018-09-27 12:58:05 +09:00
}
else if ( self . channel_state & ChannelState ::RemoteShutdownSent as u32 ) = = ChannelState ::RemoteShutdownSent as u32 {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::ChannelUnavailable { err : " Shutdown already in progress by remote " . to_owned ( ) } ) ;
2018-09-27 12:58:05 +09:00
}
2018-03-27 11:16:53 -04:00
}
assert_eq! ( self . channel_state & ChannelState ::ShutdownComplete as u32 , 0 ) ;
2018-10-17 18:19:55 -04:00
if self . channel_state & ( ChannelState ::PeerDisconnected as u32 | ChannelState ::MonitorUpdateFailed as u32 ) ! = 0 {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::ChannelUnavailable { err : " Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead? " . to_owned ( ) } ) ;
2018-09-08 16:01:29 -04:00
}
2018-03-27 11:16:53 -04:00
2021-07-26 14:04:44 -04:00
let update_shutdown_script = match self . shutdown_scriptpubkey {
Some ( _ ) = > false ,
None = > {
let shutdown_scriptpubkey = keys_provider . get_shutdown_scriptpubkey ( ) ;
if ! shutdown_scriptpubkey . is_compatible ( their_features ) {
2021-08-01 22:31:07 -05:00
return Err ( APIError ::IncompatibleShutdownScript { script : shutdown_scriptpubkey . clone ( ) } ) ;
2021-07-26 14:04:44 -04:00
}
self . shutdown_scriptpubkey = Some ( shutdown_scriptpubkey ) ;
true
} ,
} ;
2018-03-27 11:16:53 -04:00
// From here on out, we may not fail!
2021-07-20 03:19:01 +00:00
self . target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw ;
2018-03-27 11:16:53 -04:00
if self . channel_state < ChannelState ::FundingSent as u32 {
self . channel_state = ChannelState ::ShutdownComplete as u32 ;
} else {
self . channel_state | = ChannelState ::LocalShutdownSent as u32 ;
}
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2018-03-27 11:16:53 -04:00
2021-07-26 14:04:44 -04:00
let monitor_update = if update_shutdown_script {
self . latest_monitor_update_id + = 1 ;
Some ( ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
updates : vec ! [ ChannelMonitorUpdateStep ::ShutdownScript {
scriptpubkey : self . get_closing_scriptpubkey ( ) ,
} ] ,
} )
} else { None } ;
let shutdown = msgs ::Shutdown {
channel_id : self . channel_id ,
scriptpubkey : self . get_closing_scriptpubkey ( ) ,
} ;
2018-11-01 17:12:20 -04:00
// Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
// our shutdown until we've committed all of the pending changes.
self . holding_cell_update_fee = None ;
2018-04-04 11:56:54 -04:00
let mut dropped_outbound_htlcs = Vec ::with_capacity ( self . holding_cell_htlc_updates . len ( ) ) ;
self . holding_cell_htlc_updates . retain ( | htlc_update | {
match htlc_update {
2018-09-11 14:20:40 -04:00
& HTLCUpdateAwaitingACK ::AddHTLC { ref payment_hash , ref source , .. } = > {
dropped_outbound_htlcs . push ( ( source . clone ( ) , payment_hash . clone ( ) ) ) ;
2018-04-04 11:56:54 -04:00
false
} ,
_ = > true
}
} ) ;
2018-03-27 11:16:53 -04:00
2021-07-26 14:04:44 -04:00
Ok ( ( shutdown , monitor_update , dropped_outbound_htlcs ) )
2018-03-27 11:16:53 -04:00
}
2018-04-24 00:19:52 -04:00
2019-01-24 16:41:51 +02:00
/// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2018-07-28 19:15:45 -04:00
/// shutdown of this channel - no more calls into this Channel may be made afterwards except
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
2022-07-25 11:28:51 -07:00
pub fn force_shutdown ( & mut self , should_broadcast : bool ) -> ( Option < ( OutPoint , ChannelMonitorUpdate ) > , Vec < ( HTLCSource , PaymentHash , PublicKey , [ u8 ; 32 ] ) > ) {
Process monitor update events in block_[dis]connected asynchronously
The instructions for `ChannelManagerReadArgs` indicate that you need
to connect blocks on a newly-deserialized `ChannelManager` in a
separate pass from the newly-deserialized `ChannelMontiors` as the
`ChannelManager` assumes the ability to update the monitors during
block [dis]connected events, saying that users need to:
```
4) Reconnect blocks on your ChannelMonitors
5) Move the ChannelMonitors into your local chain::Watch.
6) Disconnect/connect blocks on the ChannelManager.
```
This is fine for `ChannelManager`'s purpose, but is very awkward
for users. Notably, our new `lightning-block-sync` implemented
on-load reconnection in the most obvious (and performant) way -
connecting the blocks all at once, violating the
`ChannelManagerReadArgs` API.
Luckily, the events in question really don't need to be processed
with the same urgency as most channel monitor updates. The only two
monitor updates which can occur in block_[dis]connected is either
a) in block_connected, we identify a now-confirmed commitment
transaction, closing one of our channels, or
b) in block_disconnected, the funding transaction is reorganized
out of the chain, making our channel no longer funded.
In the case of (a), sending a monitor update which broadcasts a
conflicting holder commitment transaction is far from
time-critical, though we should still ensure we do it. In the case
of (b), we should try to broadcast our holder commitment transaction
when we can, but within a few minutes is fine on the scale of
block mining anyway.
Note that in both cases cannot simply move the logic to
ChannelMonitor::block[dis]_connected, as this could result in us
broadcasting a commitment transaction from ChannelMonitor, then
revoking the now-broadcasted state, and only then receiving the
block_[dis]connected event in the ChannelManager.
Thus, we move both events into an internal invent queue and process
them in timer_chan_freshness_every_min().
2021-02-26 12:02:11 -05:00
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
// be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2018-04-24 00:19:52 -04:00
assert! ( self . channel_state ! = ChannelState ::ShutdownComplete as u32 ) ;
2018-07-28 19:15:45 -04:00
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
let mut dropped_outbound_htlcs = Vec ::with_capacity ( self . holding_cell_htlc_updates . len ( ) ) ;
2022-07-25 11:28:51 -07:00
let counterparty_node_id = self . get_counterparty_node_id ( ) ;
2018-07-28 19:15:45 -04:00
for htlc_update in self . holding_cell_htlc_updates . drain ( .. ) {
match htlc_update {
2018-09-11 14:20:40 -04:00
HTLCUpdateAwaitingACK ::AddHTLC { source , payment_hash , .. } = > {
2022-07-25 11:28:51 -07:00
dropped_outbound_htlcs . push ( ( source , payment_hash , counterparty_node_id , self . channel_id ) ) ;
2018-07-28 19:15:45 -04:00
} ,
_ = > { }
}
}
2021-02-25 21:55:30 -05:00
let monitor_update = if let Some ( funding_txo ) = self . get_funding_txo ( ) {
2020-11-23 13:34:31 -05:00
// If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
// returning a channel monitor update here would imply a channel monitor update before
// we even registered the channel monitor to begin with, which is invalid.
// Thus, if we aren't actually at a point where we could conceivably broadcast the
// funding transaction, don't return a funding txo (which prevents providing the
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self . channel_state & ( ChannelState ::FundingSent as u32 | ChannelState ::ChannelFunded as u32 | ChannelState ::ShutdownComplete as u32 ) ! = 0 {
2021-02-25 21:55:30 -05:00
self . latest_monitor_update_id + = 1 ;
Some ( ( funding_txo , ChannelMonitorUpdate {
update_id : self . latest_monitor_update_id ,
updates : vec ! [ ChannelMonitorUpdateStep ::ChannelForceClosed { should_broadcast } ] ,
} ) )
2020-11-23 13:34:31 -05:00
} else { None }
} else { None } ;
2018-07-28 19:15:45 -04:00
2018-04-24 00:19:52 -04:00
self . channel_state = ChannelState ::ShutdownComplete as u32 ;
2020-03-05 18:01:06 -05:00
self . update_time_counter + = 1 ;
2021-02-25 21:55:30 -05:00
( monitor_update , dropped_outbound_htlcs )
2018-04-24 00:19:52 -04:00
}
2017-12-25 01:05:27 -05:00
}
2021-06-21 19:55:45 +00:00
const SERIALIZATION_VERSION : u8 = 2 ;
2022-02-01 21:57:01 +00:00
const MIN_SERIALIZATION_VERSION : u8 = 2 ;
2018-10-26 14:35:50 -04:00
2021-05-31 16:44:59 +00:00
impl_writeable_tlv_based_enum! ( InboundHTLCRemovalReason , ;
( 0 , FailRelay ) ,
( 1 , FailMalformed ) ,
( 2 , Fulfill ) ,
) ;
2018-10-26 14:35:50 -04:00
2021-05-13 15:33:54 +00:00
impl Writeable for ChannelUpdateStatus {
2021-08-01 18:22:06 +02:00
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , io ::Error > {
2021-05-07 20:56:10 +00:00
// We only care about writing out the current state as it was announced, ie only either
// Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
// channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
match self {
2021-05-13 15:33:54 +00:00
ChannelUpdateStatus ::Enabled = > 0 u8 . write ( writer ) ? ,
ChannelUpdateStatus ::DisabledStaged = > 0 u8 . write ( writer ) ? ,
ChannelUpdateStatus ::EnabledStaged = > 1 u8 . write ( writer ) ? ,
ChannelUpdateStatus ::Disabled = > 1 u8 . write ( writer ) ? ,
2021-05-07 20:56:10 +00:00
}
Ok ( ( ) )
}
}
2021-05-13 15:33:54 +00:00
impl Readable for ChannelUpdateStatus {
2021-08-01 18:22:06 +02:00
fn read < R : io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
2021-05-07 20:56:10 +00:00
Ok ( match < u8 as Readable > ::read ( reader ) ? {
2021-05-13 15:33:54 +00:00
0 = > ChannelUpdateStatus ::Enabled ,
1 = > ChannelUpdateStatus ::Disabled ,
2021-05-07 20:56:10 +00:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
} )
}
}
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
impl Writeable for AnnouncementSigsState {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , io ::Error > {
// We only care about writing out the current state as if we had just disconnected, at
// which point we always set anything but AnnouncementSigsReceived to NotSent.
match self {
AnnouncementSigsState ::NotSent = > 0 u8 . write ( writer ) ,
AnnouncementSigsState ::MessageSent = > 0 u8 . write ( writer ) ,
AnnouncementSigsState ::Committed = > 0 u8 . write ( writer ) ,
AnnouncementSigsState ::PeerReceived = > 1 u8 . write ( writer ) ,
}
}
}
impl Readable for AnnouncementSigsState {
fn read < R : io ::Read > ( reader : & mut R ) -> Result < Self , DecodeError > {
Ok ( match < u8 as Readable > ::read ( reader ) ? {
0 = > AnnouncementSigsState ::NotSent ,
1 = > AnnouncementSigsState ::PeerReceived ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} )
}
}
2021-02-16 16:30:08 -05:00
impl < Signer : Sign > Writeable for Channel < Signer > {
2021-08-01 18:22:06 +02:00
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , io ::Error > {
2018-10-26 14:35:50 -04:00
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
2020-11-20 15:49:53 -05:00
// called.
2018-10-26 14:35:50 -04:00
2021-05-05 22:56:42 +00:00
write_ver_prefix! ( writer , SERIALIZATION_VERSION , MIN_SERIALIZATION_VERSION ) ;
2018-10-26 14:35:50 -04:00
self . user_id . write ( writer ) ? ;
2021-06-21 19:55:45 +00:00
2022-02-01 21:57:01 +00:00
// Version 1 deserializers expected to read parts of the config object here. Version 2
// deserializers (0.0.99) now read config through TLVs, and as we now require them for
// `minimum_depth` we simply write dummy values here.
writer . write_all ( & [ 0 ; 8 ] ) ? ;
2018-10-26 14:35:50 -04:00
self . channel_id . write ( writer ) ? ;
( self . channel_state | ChannelState ::PeerDisconnected as u32 ) . write ( writer ) ? ;
self . channel_value_satoshis . write ( writer ) ? ;
2020-02-05 19:39:31 -05:00
self . latest_monitor_update_id . write ( writer ) ? ;
2020-11-25 16:18:12 -05:00
let mut key_data = VecWriter ( Vec ::new ( ) ) ;
2021-02-20 10:05:55 -05:00
self . holder_signer . write ( & mut key_data ) ? ;
2021-05-23 23:22:46 +00:00
assert! ( key_data . 0. len ( ) < core ::usize ::MAX ) ;
assert! ( key_data . 0. len ( ) < core ::u32 ::MAX as usize ) ;
2020-11-25 16:18:12 -05:00
( key_data . 0. len ( ) as u32 ) . write ( writer ) ? ;
writer . write_all ( & key_data . 0 [ .. ] ) ? ;
2021-07-26 12:31:24 -04:00
// Write out the old serialization for shutdown_pubkey for backwards compatibility, if
// deserialized from that format.
match self . shutdown_scriptpubkey . as_ref ( ) . and_then ( | script | script . as_legacy_pubkey ( ) ) {
Some ( shutdown_pubkey ) = > shutdown_pubkey . write ( writer ) ? ,
None = > [ 0 u8 ; PUBLIC_KEY_SIZE ] . write ( writer ) ? ,
}
2020-02-08 17:45:40 -05:00
self . destination_script . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
self . cur_holder_commitment_transaction_number . write ( writer ) ? ;
self . cur_counterparty_commitment_transaction_number . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
self . value_to_self_msat . write ( writer ) ? ;
let mut dropped_inbound_htlcs = 0 ;
for htlc in self . pending_inbound_htlcs . iter ( ) {
if let InboundHTLCState ::RemoteAnnounced ( _ ) = htlc . state {
dropped_inbound_htlcs + = 1 ;
}
}
( self . pending_inbound_htlcs . len ( ) as u64 - dropped_inbound_htlcs ) . write ( writer ) ? ;
for htlc in self . pending_inbound_htlcs . iter ( ) {
2020-02-17 13:50:46 -05:00
if let & InboundHTLCState ::RemoteAnnounced ( _ ) = & htlc . state {
continue ; // Drop
}
2018-10-26 14:35:50 -04:00
htlc . htlc_id . write ( writer ) ? ;
htlc . amount_msat . write ( writer ) ? ;
htlc . cltv_expiry . write ( writer ) ? ;
htlc . payment_hash . write ( writer ) ? ;
match & htlc . state {
2020-02-17 13:50:46 -05:00
& InboundHTLCState ::RemoteAnnounced ( _ ) = > unreachable! ( ) ,
2018-10-26 14:35:50 -04:00
& InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( ref htlc_state ) = > {
1 u8 . write ( writer ) ? ;
htlc_state . write ( writer ) ? ;
} ,
& InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( ref htlc_state ) = > {
2 u8 . write ( writer ) ? ;
htlc_state . write ( writer ) ? ;
} ,
& InboundHTLCState ::Committed = > {
3 u8 . write ( writer ) ? ;
} ,
& InboundHTLCState ::LocalRemoved ( ref removal_reason ) = > {
4 u8 . write ( writer ) ? ;
removal_reason . write ( writer ) ? ;
} ,
}
}
2022-01-18 14:17:52 +01:00
let mut preimages : Vec < & Option < PaymentPreimage > > = vec! [ ] ;
2018-10-26 14:35:50 -04:00
( self . pending_outbound_htlcs . len ( ) as u64 ) . write ( writer ) ? ;
for htlc in self . pending_outbound_htlcs . iter ( ) {
htlc . htlc_id . write ( writer ) ? ;
htlc . amount_msat . write ( writer ) ? ;
htlc . cltv_expiry . write ( writer ) ? ;
htlc . payment_hash . write ( writer ) ? ;
htlc . source . write ( writer ) ? ;
match & htlc . state {
& OutboundHTLCState ::LocalAnnounced ( ref onion_packet ) = > {
0 u8 . write ( writer ) ? ;
onion_packet . write ( writer ) ? ;
} ,
& OutboundHTLCState ::Committed = > {
1 u8 . write ( writer ) ? ;
} ,
2021-04-21 20:47:24 +00:00
& OutboundHTLCState ::RemoteRemoved ( _ ) = > {
// Treat this as a Committed because we haven't received the CS - they'll
// resend the claim/fail on reconnect as we all (hopefully) the missing CS.
1 u8 . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
} ,
2022-01-18 14:17:52 +01:00
& OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( ref outcome ) = > {
2018-10-26 14:35:50 -04:00
3 u8 . write ( writer ) ? ;
2022-01-18 14:17:52 +01:00
if let OutboundHTLCOutcome ::Success ( preimage ) = outcome {
preimages . push ( preimage ) ;
}
let reason : Option < & HTLCFailReason > = outcome . into ( ) ;
reason . write ( writer ) ? ;
}
& OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( ref outcome ) = > {
2018-10-26 14:35:50 -04:00
4 u8 . write ( writer ) ? ;
2022-01-18 14:17:52 +01:00
if let OutboundHTLCOutcome ::Success ( preimage ) = outcome {
preimages . push ( preimage ) ;
}
let reason : Option < & HTLCFailReason > = outcome . into ( ) ;
reason . write ( writer ) ? ;
}
2018-10-26 14:35:50 -04:00
}
}
( self . holding_cell_htlc_updates . len ( ) as u64 ) . write ( writer ) ? ;
for update in self . holding_cell_htlc_updates . iter ( ) {
match update {
2019-07-18 18:07:27 -04:00
& HTLCUpdateAwaitingACK ::AddHTLC { ref amount_msat , ref cltv_expiry , ref payment_hash , ref source , ref onion_routing_packet } = > {
2018-10-26 14:35:50 -04:00
0 u8 . write ( writer ) ? ;
amount_msat . write ( writer ) ? ;
cltv_expiry . write ( writer ) ? ;
payment_hash . write ( writer ) ? ;
source . write ( writer ) ? ;
onion_routing_packet . write ( writer ) ? ;
} ,
& HTLCUpdateAwaitingACK ::ClaimHTLC { ref payment_preimage , ref htlc_id } = > {
1 u8 . write ( writer ) ? ;
payment_preimage . write ( writer ) ? ;
htlc_id . write ( writer ) ? ;
} ,
& HTLCUpdateAwaitingACK ::FailHTLC { ref htlc_id , ref err_packet } = > {
2 u8 . write ( writer ) ? ;
htlc_id . write ( writer ) ? ;
err_packet . write ( writer ) ? ;
}
}
}
2019-03-05 15:36:11 -05:00
match self . resend_order {
RAACommitmentOrder ::CommitmentFirst = > 0 u8 . write ( writer ) ? ,
RAACommitmentOrder ::RevokeAndACKFirst = > 1 u8 . write ( writer ) ? ,
}
2022-05-30 14:39:04 -07:00
self . monitor_pending_channel_ready . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
self . monitor_pending_revoke_and_ack . write ( writer ) ? ;
self . monitor_pending_commitment_signed . write ( writer ) ? ;
( self . monitor_pending_forwards . len ( ) as u64 ) . write ( writer ) ? ;
for & ( ref pending_forward , ref htlc_id ) in self . monitor_pending_forwards . iter ( ) {
pending_forward . write ( writer ) ? ;
htlc_id . write ( writer ) ? ;
}
( self . monitor_pending_failures . len ( ) as u64 ) . write ( writer ) ? ;
for & ( ref htlc_source , ref payment_hash , ref fail_reason ) in self . monitor_pending_failures . iter ( ) {
htlc_source . write ( writer ) ? ;
payment_hash . write ( writer ) ? ;
fail_reason . write ( writer ) ? ;
}
2021-07-12 15:39:27 +00:00
if self . is_outbound ( ) {
self . pending_update_fee . map ( | ( a , _ ) | a ) . write ( writer ) ? ;
} else if let Some ( ( feerate , FeeUpdateState ::AwaitingRemoteRevokeToAnnounce ) ) = self . pending_update_fee {
Some ( feerate ) . write ( writer ) ? ;
} else {
2021-08-16 18:02:59 +00:00
// As for inbound HTLCs, if the update was only announced and never committed in a
// commitment_signed, drop it.
2021-07-12 15:39:27 +00:00
None ::< u32 > . write ( writer ) ? ;
}
2020-02-23 23:25:43 -05:00
self . holding_cell_update_fee . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
self . next_holder_htlc_id . write ( writer ) ? ;
( self . next_counterparty_htlc_id - dropped_inbound_htlcs ) . write ( writer ) ? ;
2020-03-05 18:01:06 -05:00
self . update_time_counter . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
self . feerate_per_kw . write ( writer ) ? ;
2021-07-19 18:32:11 +00:00
// Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
// however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
// `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
// consider the stale state on reload.
0 u8 . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2020-02-23 23:25:43 -05:00
self . funding_tx_confirmed_in . write ( writer ) ? ;
2021-03-15 20:13:57 -04:00
self . funding_tx_confirmation_height . write ( writer ) ? ;
2020-02-23 23:25:43 -05:00
self . short_channel_id . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
self . counterparty_dust_limit_satoshis . write ( writer ) ? ;
self . holder_dust_limit_satoshis . write ( writer ) ? ;
self . counterparty_max_htlc_value_in_flight_msat . write ( writer ) ? ;
2021-07-06 00:27:35 +00:00
// Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
2021-07-03 15:27:12 +00:00
self . counterparty_selected_channel_reserve_satoshis . unwrap_or ( 0 ) . write ( writer ) ? ;
2021-07-06 00:27:35 +00:00
2020-06-08 20:47:55 -04:00
self . counterparty_htlc_minimum_msat . write ( writer ) ? ;
self . holder_htlc_minimum_msat . write ( writer ) ? ;
self . counterparty_max_accepted_htlcs . write ( writer ) ? ;
2021-07-06 00:27:35 +00:00
// Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
2021-07-03 15:27:12 +00:00
self . minimum_depth . unwrap_or ( 0 ) . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2021-03-12 14:23:20 -05:00
match & self . counterparty_forwarding_info {
Some ( info ) = > {
1 u8 . write ( writer ) ? ;
info . fee_base_msat . write ( writer ) ? ;
info . fee_proportional_millionths . write ( writer ) ? ;
info . cltv_expiry_delta . write ( writer ) ? ;
} ,
None = > 0 u8 . write ( writer ) ?
}
2020-10-15 13:45:18 +02:00
self . channel_transaction_parameters . write ( writer ) ? ;
2021-03-26 18:07:24 -04:00
self . funding_transaction . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2021-03-26 18:07:24 -04:00
self . counterparty_cur_commitment_point . write ( writer ) ? ;
2020-06-08 20:47:55 -04:00
self . counterparty_prev_commitment_point . write ( writer ) ? ;
self . counterparty_node_id . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
self . counterparty_shutdown_scriptpubkey . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
2020-02-07 17:48:46 -05:00
self . commitment_secrets . write ( writer ) ? ;
2021-05-07 20:56:10 +00:00
2021-05-13 15:33:54 +00:00
self . channel_update_status . write ( writer ) ? ;
2021-05-05 22:56:42 +00:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
( self . historical_inbound_htlc_fulfills . len ( ) as u64 ) . write ( writer ) ? ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
for htlc in self . historical_inbound_htlc_fulfills . iter ( ) {
htlc . write ( writer ) ? ;
}
2021-11-09 21:22:47 +00:00
// If the channel type is something other than only-static-remote-key, then we need to have
// older clients fail to deserialize this channel at all. If the type is
// only-static-remote-key, we simply consider it "default" and don't write the channel type
// out at all.
let chan_type = if self . channel_type ! = ChannelTypeFeatures ::only_static_remote_key ( ) {
Some ( & self . channel_type ) } else { None } ;
2022-04-27 00:37:45 +02:00
// The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
// the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
// a different percentage of the channel value then 10%, which older versions of LDK used
// to set it to before the percentage was made configurable.
2021-11-09 21:12:30 +00:00
let serialized_holder_selected_reserve =
2022-07-18 16:58:10 -07:00
if self . holder_selected_channel_reserve_satoshis ! = Self ::get_legacy_default_holder_selected_channel_reserve_satoshis ( self . channel_value_satoshis )
2021-11-09 21:12:30 +00:00
{ Some ( self . holder_selected_channel_reserve_satoshis ) } else { None } ;
2022-04-27 00:37:45 +02:00
2022-06-13 12:53:56 -07:00
let mut old_max_in_flight_percent_config = UserConfig ::default ( ) . channel_handshake_config ;
2022-04-27 00:37:45 +02:00
old_max_in_flight_percent_config . max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY ;
2021-11-09 21:12:30 +00:00
let serialized_holder_htlc_max_in_flight =
2022-04-27 00:37:45 +02:00
if self . holder_max_htlc_value_in_flight_msat ! = Self ::get_holder_max_htlc_value_in_flight_msat ( self . channel_value_satoshis , & old_max_in_flight_percent_config )
2021-11-09 21:12:30 +00:00
{ Some ( self . holder_max_htlc_value_in_flight_msat ) } else { None } ;
2021-07-03 15:27:12 +00:00
write_tlv_fields! ( writer , {
( 0 , self . announcement_sigs , option ) ,
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
// default value instead of being Option<>al. Thus, to maintain compatibility we write
// them twice, once with their original default values above, and once as an option
// here. On the read side, old versions will simply ignore the odd-type entries here,
// and new versions map the default values to None and allow the TLV entries here to
// override that.
( 1 , self . minimum_depth , option ) ,
2021-11-09 21:22:47 +00:00
( 2 , chan_type , option ) ,
2021-07-03 15:27:12 +00:00
( 3 , self . counterparty_selected_channel_reserve_satoshis , option ) ,
2021-11-09 21:12:30 +00:00
( 4 , serialized_holder_selected_reserve , option ) ,
2021-06-21 19:55:45 +00:00
( 5 , self . config , required ) ,
2021-11-09 21:12:30 +00:00
( 6 , serialized_holder_htlc_max_in_flight , option ) ,
2021-07-26 12:31:24 -04:00
( 7 , self . shutdown_scriptpubkey , option ) ,
2021-07-20 03:19:01 +00:00
( 9 , self . target_closing_feerate_sats_per_kw , option ) ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
( 11 , self . monitor_pending_finalized_fulfills , vec_type ) ,
2021-09-19 23:49:57 +00:00
( 13 , self . channel_creation_height , required ) ,
2022-01-18 14:17:52 +01:00
( 15 , preimages , vec_type ) ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
( 17 , self . announcement_sigs_state , required ) ,
2022-02-01 17:37:16 +00:00
( 19 , self . latest_inbound_scid_alias , option ) ,
2022-02-15 23:27:07 +00:00
( 21 , self . outbound_scid_alias , required ) ,
2021-07-03 15:27:12 +00:00
} ) ;
2021-05-05 22:56:42 +00:00
2018-10-26 14:35:50 -04:00
Ok ( ( ) )
}
}
2020-11-25 16:18:12 -05:00
const MAX_ALLOC_SIZE : usize = 64 * 1024 ;
2021-09-19 23:49:57 +00:00
impl < ' a , Signer : Sign , K : Deref > ReadableArgs < ( & ' a K , u32 ) > for Channel < Signer >
2021-02-16 16:30:08 -05:00
where K ::Target : KeysInterface < Signer = Signer > {
2021-09-19 23:49:57 +00:00
fn read < R : io ::Read > ( reader : & mut R , args : ( & ' a K , u32 ) ) -> Result < Self , DecodeError > {
let ( keys_source , serialized_height ) = args ;
2021-06-21 19:55:45 +00:00
let ver = read_ver_prefix! ( reader , SERIALIZATION_VERSION ) ;
2018-10-26 14:35:50 -04:00
let user_id = Readable ::read ( reader ) ? ;
2021-06-21 19:55:45 +00:00
2022-06-09 14:01:56 -07:00
let mut config = Some ( LegacyChannelConfig ::default ( ) ) ;
2021-06-21 19:55:45 +00:00
if ver = = 1 {
// Read the old serialization of the ChannelConfig from version 0.0.98.
2022-06-13 12:53:56 -07:00
config . as_mut ( ) . unwrap ( ) . options . forwarding_fee_proportional_millionths = Readable ::read ( reader ) ? ;
config . as_mut ( ) . unwrap ( ) . options . cltv_expiry_delta = Readable ::read ( reader ) ? ;
2021-06-21 19:55:45 +00:00
config . as_mut ( ) . unwrap ( ) . announced_channel = Readable ::read ( reader ) ? ;
config . as_mut ( ) . unwrap ( ) . commit_upfront_shutdown_pubkey = Readable ::read ( reader ) ? ;
} else {
// Read the 8 bytes of backwards-compatibility ChannelConfig data.
let mut _val : u64 = Readable ::read ( reader ) ? ;
}
2018-10-26 14:35:50 -04:00
let channel_id = Readable ::read ( reader ) ? ;
let channel_state = Readable ::read ( reader ) ? ;
let channel_value_satoshis = Readable ::read ( reader ) ? ;
2020-02-05 19:39:31 -05:00
let latest_monitor_update_id = Readable ::read ( reader ) ? ;
2020-11-25 16:18:12 -05:00
let keys_len : u32 = Readable ::read ( reader ) ? ;
let mut keys_data = Vec ::with_capacity ( cmp ::min ( keys_len as usize , MAX_ALLOC_SIZE ) ) ;
while keys_data . len ( ) ! = keys_len as usize {
// Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
let mut data = [ 0 ; 1024 ] ;
let read_slice = & mut data [ 0 .. cmp ::min ( 1024 , keys_len as usize - keys_data . len ( ) ) ] ;
reader . read_exact ( read_slice ) ? ;
keys_data . extend_from_slice ( read_slice ) ;
}
2021-02-20 10:05:55 -05:00
let holder_signer = keys_source . read_chan_signer ( & keys_data ) ? ;
2020-11-25 16:18:12 -05:00
2021-07-26 12:31:24 -04:00
// Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
let mut shutdown_scriptpubkey = match < PublicKey as Readable > ::read ( reader ) {
Ok ( pubkey ) = > Some ( ShutdownScript ::new_p2wpkh_from_pubkey ( pubkey ) ) ,
Err ( _ ) = > None ,
} ;
2020-02-08 17:45:40 -05:00
let destination_script = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
let cur_holder_commitment_transaction_number = Readable ::read ( reader ) ? ;
let cur_counterparty_commitment_transaction_number = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
let value_to_self_msat = Readable ::read ( reader ) ? ;
let pending_inbound_htlc_count : u64 = Readable ::read ( reader ) ? ;
let mut pending_inbound_htlcs = Vec ::with_capacity ( cmp ::min ( pending_inbound_htlc_count as usize , OUR_MAX_HTLCS as usize ) ) ;
for _ in 0 .. pending_inbound_htlc_count {
pending_inbound_htlcs . push ( InboundHTLCOutput {
htlc_id : Readable ::read ( reader ) ? ,
amount_msat : Readable ::read ( reader ) ? ,
cltv_expiry : Readable ::read ( reader ) ? ,
payment_hash : Readable ::read ( reader ) ? ,
2020-02-23 23:12:19 -05:00
state : match < u8 as Readable > ::read ( reader ) ? {
2018-10-26 14:35:50 -04:00
1 = > InboundHTLCState ::AwaitingRemoteRevokeToAnnounce ( Readable ::read ( reader ) ? ) ,
2 = > InboundHTLCState ::AwaitingAnnouncedRemoteRevoke ( Readable ::read ( reader ) ? ) ,
3 = > InboundHTLCState ::Committed ,
4 = > InboundHTLCState ::LocalRemoved ( Readable ::read ( reader ) ? ) ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ,
} ) ;
}
let pending_outbound_htlc_count : u64 = Readable ::read ( reader ) ? ;
let mut pending_outbound_htlcs = Vec ::with_capacity ( cmp ::min ( pending_outbound_htlc_count as usize , OUR_MAX_HTLCS as usize ) ) ;
for _ in 0 .. pending_outbound_htlc_count {
pending_outbound_htlcs . push ( OutboundHTLCOutput {
htlc_id : Readable ::read ( reader ) ? ,
amount_msat : Readable ::read ( reader ) ? ,
cltv_expiry : Readable ::read ( reader ) ? ,
payment_hash : Readable ::read ( reader ) ? ,
source : Readable ::read ( reader ) ? ,
2020-02-23 23:12:19 -05:00
state : match < u8 as Readable > ::read ( reader ) ? {
2018-10-26 14:35:50 -04:00
0 = > OutboundHTLCState ::LocalAnnounced ( Box ::new ( Readable ::read ( reader ) ? ) ) ,
1 = > OutboundHTLCState ::Committed ,
2022-01-18 14:17:52 +01:00
2 = > {
let option : Option < HTLCFailReason > = Readable ::read ( reader ) ? ;
OutboundHTLCState ::RemoteRemoved ( option . into ( ) )
} ,
3 = > {
let option : Option < HTLCFailReason > = Readable ::read ( reader ) ? ;
OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( option . into ( ) )
} ,
4 = > {
let option : Option < HTLCFailReason > = Readable ::read ( reader ) ? ;
OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( option . into ( ) )
} ,
2018-10-26 14:35:50 -04:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ,
} ) ;
}
let holding_cell_htlc_update_count : u64 = Readable ::read ( reader ) ? ;
let mut holding_cell_htlc_updates = Vec ::with_capacity ( cmp ::min ( holding_cell_htlc_update_count as usize , OUR_MAX_HTLCS as usize * 2 ) ) ;
for _ in 0 .. holding_cell_htlc_update_count {
2020-02-23 23:12:19 -05:00
holding_cell_htlc_updates . push ( match < u8 as Readable > ::read ( reader ) ? {
2018-10-26 14:35:50 -04:00
0 = > HTLCUpdateAwaitingACK ::AddHTLC {
amount_msat : Readable ::read ( reader ) ? ,
cltv_expiry : Readable ::read ( reader ) ? ,
payment_hash : Readable ::read ( reader ) ? ,
source : Readable ::read ( reader ) ? ,
onion_routing_packet : Readable ::read ( reader ) ? ,
} ,
1 = > HTLCUpdateAwaitingACK ::ClaimHTLC {
payment_preimage : Readable ::read ( reader ) ? ,
htlc_id : Readable ::read ( reader ) ? ,
} ,
2 = > HTLCUpdateAwaitingACK ::FailHTLC {
htlc_id : Readable ::read ( reader ) ? ,
err_packet : Readable ::read ( reader ) ? ,
} ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ) ;
}
2020-02-23 23:12:19 -05:00
let resend_order = match < u8 as Readable > ::read ( reader ) ? {
2019-03-05 15:36:11 -05:00
0 = > RAACommitmentOrder ::CommitmentFirst ,
1 = > RAACommitmentOrder ::RevokeAndACKFirst ,
2018-10-26 14:35:50 -04:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2022-05-30 14:39:04 -07:00
let monitor_pending_channel_ready = Readable ::read ( reader ) ? ;
2019-03-05 15:36:11 -05:00
let monitor_pending_revoke_and_ack = Readable ::read ( reader ) ? ;
let monitor_pending_commitment_signed = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
let monitor_pending_forwards_count : u64 = Readable ::read ( reader ) ? ;
let mut monitor_pending_forwards = Vec ::with_capacity ( cmp ::min ( monitor_pending_forwards_count as usize , OUR_MAX_HTLCS as usize ) ) ;
for _ in 0 .. monitor_pending_forwards_count {
monitor_pending_forwards . push ( ( Readable ::read ( reader ) ? , Readable ::read ( reader ) ? ) ) ;
}
let monitor_pending_failures_count : u64 = Readable ::read ( reader ) ? ;
let mut monitor_pending_failures = Vec ::with_capacity ( cmp ::min ( monitor_pending_failures_count as usize , OUR_MAX_HTLCS as usize ) ) ;
for _ in 0 .. monitor_pending_failures_count {
monitor_pending_failures . push ( ( Readable ::read ( reader ) ? , Readable ::read ( reader ) ? , Readable ::read ( reader ) ? ) ) ;
}
2021-07-12 15:39:27 +00:00
let pending_update_fee_value : Option < u32 > = Readable ::read ( reader ) ? ;
2019-01-23 11:26:32 -05:00
let holding_cell_update_fee = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
let next_holder_htlc_id = Readable ::read ( reader ) ? ;
let next_counterparty_htlc_id = Readable ::read ( reader ) ? ;
2020-03-05 18:01:06 -05:00
let update_time_counter = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
let feerate_per_kw = Readable ::read ( reader ) ? ;
2021-07-19 18:32:11 +00:00
// Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
// however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
// `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
// consider the stale state on reload.
match < u8 as Readable > ::read ( reader ) ? {
0 = > { } ,
1 = > {
let _ : u32 = Readable ::read ( reader ) ? ;
let _ : u64 = Readable ::read ( reader ) ? ;
let _ : Signature = Readable ::read ( reader ) ? ;
} ,
2018-10-26 14:35:50 -04:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
2021-07-19 18:32:11 +00:00
}
2018-10-26 14:35:50 -04:00
2019-01-23 11:26:32 -05:00
let funding_tx_confirmed_in = Readable ::read ( reader ) ? ;
2021-03-15 20:13:57 -04:00
let funding_tx_confirmation_height = Readable ::read ( reader ) ? ;
2019-01-23 11:26:32 -05:00
let short_channel_id = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
let counterparty_dust_limit_satoshis = Readable ::read ( reader ) ? ;
let holder_dust_limit_satoshis = Readable ::read ( reader ) ? ;
let counterparty_max_htlc_value_in_flight_msat = Readable ::read ( reader ) ? ;
2021-07-06 00:27:35 +00:00
let mut counterparty_selected_channel_reserve_satoshis = None ;
if ver = = 1 {
// Read the old serialization from version 0.0.98.
counterparty_selected_channel_reserve_satoshis = Some ( Readable ::read ( reader ) ? ) ;
} else {
// Read the 8 bytes of backwards-compatibility data.
let _dummy : u64 = Readable ::read ( reader ) ? ;
2021-07-03 15:27:12 +00:00
}
2020-06-08 20:47:55 -04:00
let counterparty_htlc_minimum_msat = Readable ::read ( reader ) ? ;
let holder_htlc_minimum_msat = Readable ::read ( reader ) ? ;
let counterparty_max_accepted_htlcs = Readable ::read ( reader ) ? ;
2021-07-06 00:27:35 +00:00
let mut minimum_depth = None ;
if ver = = 1 {
// Read the old serialization from version 0.0.98.
minimum_depth = Some ( Readable ::read ( reader ) ? ) ;
} else {
// Read the 4 bytes of backwards-compatibility data.
let _dummy : u32 = Readable ::read ( reader ) ? ;
2021-07-03 15:27:12 +00:00
}
2018-10-26 14:35:50 -04:00
2021-03-12 14:23:20 -05:00
let counterparty_forwarding_info = match < u8 as Readable > ::read ( reader ) ? {
0 = > None ,
1 = > Some ( CounterpartyForwardingInfo {
fee_base_msat : Readable ::read ( reader ) ? ,
fee_proportional_millionths : Readable ::read ( reader ) ? ,
cltv_expiry_delta : Readable ::read ( reader ) ? ,
} ) ,
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ;
2021-07-12 15:39:27 +00:00
let channel_parameters : ChannelTransactionParameters = Readable ::read ( reader ) ? ;
2021-03-26 18:07:24 -04:00
let funding_transaction = Readable ::read ( reader ) ? ;
2020-06-08 20:47:55 -04:00
let counterparty_cur_commitment_point = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
let counterparty_prev_commitment_point = Readable ::read ( reader ) ? ;
let counterparty_node_id = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
let counterparty_shutdown_scriptpubkey = Readable ::read ( reader ) ? ;
2020-02-07 17:48:46 -05:00
let commitment_secrets = Readable ::read ( reader ) ? ;
2021-05-13 15:33:54 +00:00
let channel_update_status = Readable ::read ( reader ) ? ;
2021-05-07 20:56:10 +00:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
let mut historical_inbound_htlc_fulfills = HashSet ::new ( ) ;
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
{
let htlc_fulfills_len : u64 = Readable ::read ( reader ) ? ;
for _ in 0 .. htlc_fulfills_len {
assert! ( historical_inbound_htlc_fulfills . insert ( Readable ::read ( reader ) ? ) ) ;
}
}
2021-07-12 15:39:27 +00:00
let pending_update_fee = if let Some ( feerate ) = pending_update_fee_value {
Some ( ( feerate , if channel_parameters . is_outbound_from_holder {
FeeUpdateState ::Outbound
} else {
FeeUpdateState ::AwaitingRemoteRevokeToAnnounce
} ) )
} else {
None
} ;
2021-05-06 01:15:35 +00:00
let mut announcement_sigs = None ;
2021-07-20 03:19:01 +00:00
let mut target_closing_feerate_sats_per_kw = None ;
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
let mut monitor_pending_finalized_fulfills = Some ( Vec ::new ( ) ) ;
2022-07-18 16:58:10 -07:00
let mut holder_selected_channel_reserve_satoshis = Some ( Self ::get_legacy_default_holder_selected_channel_reserve_satoshis ( channel_value_satoshis ) ) ;
2022-06-13 12:53:56 -07:00
let mut holder_max_htlc_value_in_flight_msat = Some ( Self ::get_holder_max_htlc_value_in_flight_msat ( channel_value_satoshis , & UserConfig ::default ( ) . channel_handshake_config ) ) ;
2021-09-17 17:32:36 +00:00
// Prior to supporting channel type negotiation, all of our channels were static_remotekey
// only, so we default to that if none was written.
let mut channel_type = Some ( ChannelTypeFeatures ::only_static_remote_key ( ) ) ;
2021-09-19 23:49:57 +00:00
let mut channel_creation_height = Some ( serialized_height ) ;
2022-01-18 14:17:52 +01:00
let mut preimages_opt : Option < Vec < Option < PaymentPreimage > > > = None ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
// If we read an old Channel, for simplicity we just treat it as "we never sent an
// AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
let mut announcement_sigs_state = Some ( AnnouncementSigsState ::NotSent ) ;
2022-02-01 17:37:16 +00:00
let mut latest_inbound_scid_alias = None ;
2022-02-15 23:27:07 +00:00
let mut outbound_scid_alias = None ;
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
2021-07-03 15:27:12 +00:00
read_tlv_fields! ( reader , {
( 0 , announcement_sigs , option ) ,
( 1 , minimum_depth , option ) ,
2021-11-09 21:22:47 +00:00
( 2 , channel_type , option ) ,
2021-07-03 15:27:12 +00:00
( 3 , counterparty_selected_channel_reserve_satoshis , option ) ,
2021-11-09 21:12:30 +00:00
( 4 , holder_selected_channel_reserve_satoshis , option ) ,
2021-06-21 19:55:45 +00:00
( 5 , config , option ) , // Note that if none is provided we will *not* overwrite the existing one.
2021-11-09 21:12:30 +00:00
( 6 , holder_max_htlc_value_in_flight_msat , option ) ,
2021-07-26 12:31:24 -04:00
( 7 , shutdown_scriptpubkey , option ) ,
2021-07-20 03:19:01 +00:00
( 9 , target_closing_feerate_sats_per_kw , option ) ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
( 11 , monitor_pending_finalized_fulfills , vec_type ) ,
2021-09-19 23:49:57 +00:00
( 13 , channel_creation_height , option ) ,
2022-01-18 14:17:52 +01:00
( 15 , preimages_opt , vec_type ) ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
( 17 , announcement_sigs_state , option ) ,
2022-02-01 17:37:16 +00:00
( 19 , latest_inbound_scid_alias , option ) ,
2022-02-15 23:27:07 +00:00
( 21 , outbound_scid_alias , option ) ,
2021-07-03 15:27:12 +00:00
} ) ;
2021-05-05 22:56:42 +00:00
2022-01-18 14:17:52 +01:00
if let Some ( preimages ) = preimages_opt {
let mut iter = preimages . into_iter ( ) ;
for htlc in pending_outbound_htlcs . iter_mut ( ) {
match & htlc . state {
OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( OutboundHTLCOutcome ::Success ( None ) ) = > {
htlc . state = OutboundHTLCState ::AwaitingRemoteRevokeToRemove ( OutboundHTLCOutcome ::Success ( iter . next ( ) . ok_or ( DecodeError ::InvalidValue ) ? ) ) ;
}
OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( OutboundHTLCOutcome ::Success ( None ) ) = > {
htlc . state = OutboundHTLCState ::AwaitingRemovedRemoteRevoke ( OutboundHTLCOutcome ::Success ( iter . next ( ) . ok_or ( DecodeError ::InvalidValue ) ? ) ) ;
}
_ = > { }
}
}
// We expect all preimages to be consumed above
if iter . next ( ) . is_some ( ) {
return Err ( DecodeError ::InvalidValue ) ;
}
}
2021-09-17 17:32:36 +00:00
let chan_features = channel_type . as_ref ( ) . unwrap ( ) ;
if chan_features . supports_unknown_bits ( ) | | chan_features . requires_unknown_bits ( ) {
// If the channel was written by a new version and negotiated with features we don't
// understand yet, refuse to read it.
return Err ( DecodeError ::UnknownRequiredFeature ) ;
}
2021-11-15 18:03:46 -08:00
if channel_parameters . opt_anchors . is_some ( ) {
// Relax this check when ChannelTypeFeatures supports anchors.
return Err ( DecodeError ::InvalidValue ) ;
}
2021-02-13 11:20:07 -05:00
let mut secp_ctx = Secp256k1 ::new ( ) ;
secp_ctx . seeded_randomize ( & keys_source . get_secure_random_bytes ( ) ) ;
2018-10-26 14:35:50 -04:00
Ok ( Channel {
user_id ,
2021-06-21 19:55:45 +00:00
config : config . unwrap ( ) ,
2022-02-01 21:16:27 +00:00
2022-06-16 16:24:42 -07:00
prev_config : None ,
2022-02-01 21:16:27 +00:00
// Note that we don't care about serializing handshake limits as we only ever serialize
// channel data after the handshake has completed.
inbound_handshake_limits_override : None ,
2018-10-26 14:35:50 -04:00
channel_id ,
channel_state ,
Disconect `announcement_signatures` sending from `funding_locked`
The spec actually requires we never send `announcement_signatures`
(and, thus, `channel_announcement`s) until after six confirmations.
However, we would happily have sent them prior to that as long as
we exchange `funding_locked` messages with our countarparty. Thanks
to re-broadcasting this issue is largely harmless, however it could
have some negative interactions with less-robust peers. Much more
importantly, this represents an important step towards supporting
0-conf channels, where `funding_locked` messages may be exchanged
before we even have an SCID to construct the messages with.
Because there is no ACK mechanism for `announcement_signatures` we
rely on existing channel updates to stop rebroadcasting them - if
we sent a `commitment_signed` after an `announcement_signatures`
and later receive a `revoke_and_ack`, we know our counterparty also
received our `announcement_signatures`. This may resolve some rare
edge-cases where we send a `funding_locked` which our counterparty
receives, but lose connection before the `announcement_signatures`
(usually the very next message) arrives.
Sadly, because the set of places where an `announcement_signatures`
may now be generated more closely mirrors where `funding_locked`
messages may be generated, but they are now separate, there is a
substantial amount of code motion providing relevant parameters
about current block information and ensuring we can return new
`announcement_signatures` messages.
2021-11-18 21:54:13 +00:00
announcement_sigs_state : announcement_sigs_state . unwrap ( ) ,
2021-02-13 11:20:07 -05:00
secp_ctx ,
2018-10-26 14:35:50 -04:00
channel_value_satoshis ,
2020-02-05 19:39:31 -05:00
latest_monitor_update_id ,
2021-02-20 10:05:55 -05:00
holder_signer ,
2021-07-26 12:31:24 -04:00
shutdown_scriptpubkey ,
2020-02-08 17:45:40 -05:00
destination_script ,
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
cur_holder_commitment_transaction_number ,
cur_counterparty_commitment_transaction_number ,
2018-10-26 14:35:50 -04:00
value_to_self_msat ,
pending_inbound_htlcs ,
pending_outbound_htlcs ,
holding_cell_htlc_updates ,
2019-03-05 15:36:11 -05:00
resend_order ,
2022-05-30 14:39:04 -07:00
monitor_pending_channel_ready ,
2018-10-26 14:35:50 -04:00
monitor_pending_revoke_and_ack ,
monitor_pending_commitment_signed ,
monitor_pending_forwards ,
monitor_pending_failures ,
Inform ChannelManager when fulfilled HTLCs are finalized
When an HTLC has been failed, we track it up until the point there
exists no broadcastable commitment transaction which has the HTLC
present, at which point Channel returns the HTLCSource back to the
ChannelManager, which fails the HTLC backwards appropriately.
When an HTLC is fulfilled, however, we fulfill on the backwards path
immediately. This is great for claiming upstream HTLCs, but when we
want to track pending payments, we need to ensure we can check with
ChannelMonitor data to rebuild pending payments. In order to do so,
we need an event similar to the HTLC failure event, but for
fulfills instead.
Specifically, if we force-close a channel, we remove its off-chain
`Channel` object entirely, at which point, on reload, we may notice
HTLC(s) which are not present in our pending payments map (as they
may have received a payment preimage, but not fully committed to
it). Thus, we'd conclude we still have a retryable payment, which
is untrue.
This commit does so, informing the ChannelManager via a new return
element where appropriate of the HTLCSource corresponding to the
failed HTLC.
2021-10-02 22:35:07 +00:00
monitor_pending_finalized_fulfills : monitor_pending_finalized_fulfills . unwrap ( ) ,
2018-10-26 14:35:50 -04:00
pending_update_fee ,
holding_cell_update_fee ,
2020-06-08 20:47:55 -04:00
next_holder_htlc_id ,
next_counterparty_htlc_id ,
2020-03-05 18:01:06 -05:00
update_time_counter ,
2018-10-26 14:35:50 -04:00
feerate_per_kw ,
#[ cfg(debug_assertions) ]
2021-07-19 16:13:00 +02:00
holder_max_commitment_tx_output : Mutex ::new ( ( 0 , 0 ) ) ,
2018-10-26 14:35:50 -04:00
#[ cfg(debug_assertions) ]
2021-07-19 16:13:00 +02:00
counterparty_max_commitment_tx_output : Mutex ::new ( ( 0 , 0 ) ) ,
2018-10-26 14:35:50 -04:00
2021-07-19 18:32:11 +00:00
last_sent_closing_fee : None ,
Send initial closing_signed message asynchronously and handle errs
When we added the support for external signing, many of the
signing functions were allowed to return an error, closing the
channel in such a case. `sign_closing_transaction` is one such
function which can now return an error, except instead of handling
it properly we'd simply never send a `closing_signed` message,
hanging the channel until users intervene and force-close it.
Piping the channel-closing error back through the various callsites
(several of which already have pending results by the time they
call `maybe_propose_first_closing_signed`) may be rather
complicated, so instead we simply attempt to propose the initial
`closing_signed` in `get_and_clear_pending_msg_events` like we do
for holding-cell freeing.
Further, since we now (possibly) generate a `ChannelMonitorUpdate`
on `shutdown`, we may need to wait for monitor updating to complete
before we can send a `closing_signed`, meaning we need to handle
the send asynchronously anyway.
This simplifies a few function interfaces and has no impact on
behavior, aside from a few message-ordering edge-cases, as seen in
the two small test changes required.
2021-07-19 19:57:37 +00:00
pending_counterparty_closing_signed : None ,
2021-07-20 03:19:01 +00:00
closing_fee_limits : None ,
target_closing_feerate_sats_per_kw ,
2018-10-26 14:35:50 -04:00
2022-01-26 00:21:22 +01:00
inbound_awaiting_accept : false ,
2018-10-26 14:35:50 -04:00
funding_tx_confirmed_in ,
2021-03-15 20:13:57 -04:00
funding_tx_confirmation_height ,
2018-10-26 14:35:50 -04:00
short_channel_id ,
2021-09-19 23:49:57 +00:00
channel_creation_height : channel_creation_height . unwrap ( ) ,
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
counterparty_dust_limit_satoshis ,
holder_dust_limit_satoshis ,
counterparty_max_htlc_value_in_flight_msat ,
2021-11-09 21:12:30 +00:00
holder_max_htlc_value_in_flight_msat : holder_max_htlc_value_in_flight_msat . unwrap ( ) ,
2020-06-08 20:47:55 -04:00
counterparty_selected_channel_reserve_satoshis ,
2021-11-09 21:12:30 +00:00
holder_selected_channel_reserve_satoshis : holder_selected_channel_reserve_satoshis . unwrap ( ) ,
2020-06-08 20:47:55 -04:00
counterparty_htlc_minimum_msat ,
holder_htlc_minimum_msat ,
counterparty_max_accepted_htlcs ,
2018-10-31 14:38:07 -04:00
minimum_depth ,
2018-10-26 14:35:50 -04:00
2021-03-12 14:23:20 -05:00
counterparty_forwarding_info ,
2020-10-15 13:45:18 +02:00
channel_transaction_parameters : channel_parameters ,
2021-03-26 18:07:24 -04:00
funding_transaction ,
2018-10-26 14:35:50 -04:00
2021-03-26 18:07:24 -04:00
counterparty_cur_commitment_point ,
2020-06-08 20:47:55 -04:00
counterparty_prev_commitment_point ,
counterparty_node_id ,
2018-10-26 14:35:50 -04:00
2020-06-08 20:47:55 -04:00
counterparty_shutdown_scriptpubkey ,
2018-10-26 14:35:50 -04:00
2020-02-07 17:48:46 -05:00
commitment_secrets ,
2018-10-26 14:35:50 -04:00
2021-05-13 15:33:54 +00:00
channel_update_status ,
2021-07-26 20:43:05 +00:00
closing_signed_in_flight : false ,
2021-02-05 13:09:23 -05:00
2021-05-06 01:15:35 +00:00
announcement_sigs ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_local_commitment_tx_fee_info_cached : Mutex ::new ( None ) ,
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
2021-02-05 13:09:23 -05:00
next_remote_commitment_tx_fee_info_cached : Mutex ::new ( None ) ,
2021-06-23 16:39:27 +00:00
workaround_lnd_bug_4006 : None ,
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
2022-02-01 17:37:16 +00:00
latest_inbound_scid_alias ,
2022-02-15 23:27:07 +00:00
// Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
outbound_scid_alias : outbound_scid_alias . unwrap_or ( 0 ) ,
2022-02-01 17:37:16 +00:00
2022-02-17 19:29:59 +00:00
#[ cfg(any(test, fuzzing)) ]
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately
forward the claim backwards along the payment path before waiting
for a full commitment_signed dance. This is great, but can cause
duplicative claims if a node sends an update_fulfill_htlc message,
disconnects, reconnects, and then has to re-send its
update_fulfill_htlc message again.
While there was code to handle this, it treated it as a channel
error on the inbound channel, which is incorrect - this is an
expected, albeit incredibly rare, condition. Instead, we handle
these double-claims correctly, simply ignoring them.
With debug_assertions enabled, we also check that the previous
close of the same HTLC was a fulfill, and that we are not moving
from a HTLC failure to an HTLC claim after its too late.
A test is also added, which hits all three failure cases in
`Channel::get_update_fulfill_htlc`.
Found by the chanmon_consistency fuzzer.
2021-06-29 21:05:45 +00:00
historical_inbound_htlc_fulfills ,
2021-09-17 17:32:36 +00:00
channel_type : channel_type . unwrap ( ) ,
2018-10-26 14:35:50 -04:00
} )
}
}
2017-12-25 01:05:27 -05:00
#[ cfg(test) ]
mod tests {
2022-07-18 16:58:10 -07:00
use std ::cmp ;
2018-10-26 13:41:07 -04:00
use bitcoin ::blockdata ::script ::{ Script , Builder } ;
2022-03-30 14:43:39 -05:00
use bitcoin ::blockdata ::transaction ::{ Transaction , TxOut } ;
2020-03-17 19:54:16 -04:00
use bitcoin ::blockdata ::constants ::genesis_block ;
2018-10-26 13:41:07 -04:00
use bitcoin ::blockdata ::opcodes ;
2020-03-17 19:54:16 -04:00
use bitcoin ::network ::constants ::Network ;
2018-07-27 17:06:14 -07:00
use hex ;
2022-03-30 14:43:39 -05:00
use ln ::PaymentHash ;
2021-09-23 16:06:12 -04:00
use ln ::channelmanager ::{ HTLCSource , PaymentId } ;
2022-03-30 14:43:39 -05:00
use ln ::channel ::{ Channel , InboundHTLCOutput , OutboundHTLCOutput , InboundHTLCState , OutboundHTLCState , HTLCCandidate , HTLCInitiator } ;
2022-07-18 16:58:10 -07:00
use ln ::channel ::{ MAX_FUNDING_SATOSHIS_NO_WUMBO , TOTAL_BITCOIN_SUPPLY_SATOSHIS , MIN_THEIR_CHAN_RESERVE_SATOSHIS } ;
2022-06-01 17:05:17 -07:00
use ln ::features ::{ InitFeatures , ChannelTypeFeatures } ;
2022-07-25 20:35:51 +02:00
use ln ::msgs ::{ ChannelUpdate , DataLossProtect , DecodeError , OptionalField , UnsignedChannelUpdate , MAX_VALUE_MSAT } ;
2021-07-26 12:31:24 -04:00
use ln ::script ::ShutdownScript ;
2017-12-25 01:05:27 -05:00
use ln ::chan_utils ;
2022-03-30 14:43:39 -05:00
use ln ::chan_utils ::{ htlc_success_tx_weight , htlc_timeout_tx_weight } ;
2021-07-03 01:58:30 +00:00
use chain ::BestBlock ;
2022-06-29 15:13:40 +02:00
use chain ::chaininterface ::{ FeeEstimator , LowerBoundedFeeEstimator , ConfirmationTarget } ;
2022-03-30 14:43:39 -05:00
use chain ::keysinterface ::{ InMemorySigner , Recipient , KeyMaterial , KeysInterface } ;
2018-06-29 17:23:50 -04:00
use chain ::transaction ::OutPoint ;
2018-10-31 14:51:39 -04:00
use util ::config ::UserConfig ;
2021-02-16 16:30:08 -05:00
use util ::enforcing_trait_impls ::EnforcingSigner ;
2021-07-26 14:04:44 -04:00
use util ::errors ::APIError ;
2018-07-25 02:34:51 +00:00
use util ::test_utils ;
2021-07-26 14:04:44 -04:00
use util ::test_utils ::OnGetShutdownScriptpubkey ;
2022-08-10 18:04:59 +02:00
use bitcoin ::secp256k1 ::{ Secp256k1 , ecdsa ::Signature , Scalar } ;
2021-03-12 16:02:17 -05:00
use bitcoin ::secp256k1 ::ffi ::Signature as FFISignature ;
2022-05-05 17:59:38 +02:00
use bitcoin ::secp256k1 ::{ SecretKey , PublicKey } ;
2022-07-11 16:27:10 -04:00
use bitcoin ::secp256k1 ::ecdh ::SharedSecret ;
2022-05-05 17:59:38 +02:00
use bitcoin ::secp256k1 ::ecdsa ::RecoverableSignature ;
2020-04-27 16:41:54 +02:00
use bitcoin ::hashes ::sha256 ::Hash as Sha256 ;
use bitcoin ::hashes ::Hash ;
2022-03-30 14:43:39 -05:00
use bitcoin ::hash_types ::WPubkeyHash ;
2022-01-21 11:33:39 +01:00
use bitcoin ::bech32 ::u5 ;
2022-08-09 17:39:51 +02:00
use bitcoin ::PackedLockTime ;
2022-05-05 18:04:55 +02:00
use bitcoin ::util ::address ::WitnessVersion ;
2021-05-19 04:21:39 +00:00
use prelude ::* ;
2017-12-25 01:05:27 -05:00
struct TestFeeEstimator {
2020-06-15 17:28:01 -04:00
fee_est : u32
2017-12-25 01:05:27 -05:00
}
impl FeeEstimator for TestFeeEstimator {
2020-06-15 17:28:01 -04:00
fn get_est_sat_per_1000_weight ( & self , _ : ConfirmationTarget ) -> u32 {
2017-12-25 01:05:27 -05:00
self . fee_est
}
}
2018-06-30 10:32:23 -04:00
#[ test ]
2022-04-15 17:31:20 -04:00
fn test_max_funding_satoshis_no_wumbo ( ) {
assert_eq! ( TOTAL_BITCOIN_SUPPLY_SATOSHIS , 21_000_000 * 100_000_000 ) ;
assert! ( MAX_FUNDING_SATOSHIS_NO_WUMBO < = TOTAL_BITCOIN_SUPPLY_SATOSHIS ,
" MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence " ) ;
2018-06-30 10:32:23 -04:00
}
2017-12-25 01:05:27 -05:00
2022-01-26 00:10:19 +00:00
#[ test ]
fn test_no_fee_check_overflow ( ) {
// Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
// arithmetic, causing a panic with debug assertions enabled.
2022-06-29 15:13:40 +02:00
let fee_est = TestFeeEstimator { fee_est : 42 } ;
let bounded_fee_estimator = LowerBoundedFeeEstimator ::new ( & fee_est ) ;
assert! ( Channel ::< InMemorySigner > ::check_remote_fee ( & bounded_fee_estimator , u32 ::max_value ( ) ) . is_err ( ) ) ;
2022-01-26 00:10:19 +00:00
}
2018-10-26 11:40:01 -04:00
struct Keys {
2021-02-20 10:05:55 -05:00
signer : InMemorySigner ,
2018-10-26 11:40:01 -04:00
}
impl KeysInterface for Keys {
2021-02-16 16:30:08 -05:00
type Signer = InMemorySigner ;
2019-11-26 16:46:33 -05:00
2022-02-09 17:22:53 -05:00
fn get_node_secret ( & self , _recipient : Recipient ) -> Result < SecretKey , ( ) > { panic! ( ) ; }
2022-08-10 18:04:59 +02:00
fn ecdh ( & self , _recipient : Recipient , _other_key : & PublicKey , _tweak : Option < & Scalar > ) -> Result < SharedSecret , ( ) > { panic! ( ) ; }
2021-11-29 12:50:47 -05:00
fn get_inbound_payment_key_material ( & self ) -> KeyMaterial { panic! ( ) ; }
2018-10-26 13:41:07 -04:00
fn get_destination_script ( & self ) -> Script {
let secp_ctx = Secp256k1 ::signing_only ( ) ;
2019-01-16 15:45:05 -05:00
let channel_monitor_claim_key = SecretKey ::from_slice ( & hex ::decode ( " 0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2020-06-08 20:47:55 -04:00
let channel_monitor_claim_key_hash = WPubkeyHash ::hash ( & PublicKey ::from_secret_key ( & secp_ctx , & channel_monitor_claim_key ) . serialize ( ) ) ;
Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_PUSHBYTES_0 ) . push_slice ( & channel_monitor_claim_key_hash [ .. ] ) . into_script ( )
2018-10-26 13:41:07 -04:00
}
2018-10-26 13:35:57 -04:00
2021-07-26 12:31:24 -04:00
fn get_shutdown_scriptpubkey ( & self ) -> ShutdownScript {
2018-10-26 13:35:57 -04:00
let secp_ctx = Secp256k1 ::signing_only ( ) ;
2019-01-16 15:45:05 -05:00
let channel_close_key = SecretKey ::from_slice ( & hex ::decode ( " 0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2021-07-26 12:31:24 -04:00
ShutdownScript ::new_p2wpkh_from_pubkey ( PublicKey ::from_secret_key ( & secp_ctx , & channel_close_key ) )
2018-10-26 13:35:57 -04:00
}
2021-02-16 16:30:08 -05:00
fn get_channel_signer ( & self , _inbound : bool , _channel_value_satoshis : u64 ) -> InMemorySigner {
2021-02-20 10:05:55 -05:00
self . signer . clone ( )
2020-01-23 13:33:31 -08:00
}
2020-08-23 17:06:33 -04:00
fn get_secure_random_bytes ( & self ) -> [ u8 ; 32 ] { [ 0 ; 32 ] }
2021-02-16 16:30:08 -05:00
fn read_chan_signer ( & self , _data : & [ u8 ] ) -> Result < Self ::Signer , DecodeError > { panic! ( ) ; }
2022-02-01 17:33:57 -05:00
fn sign_invoice ( & self , _hrp_bytes : & [ u8 ] , _invoice_data : & [ u5 ] , _recipient : Recipient ) -> Result < RecoverableSignature , ( ) > { panic! ( ) ; }
2018-10-26 11:40:01 -04:00
}
2022-03-30 14:43:39 -05:00
#[ cfg(not(feature = " grind_signatures " )) ]
fn public_from_secret_hex ( secp_ctx : & Secp256k1 < bitcoin ::secp256k1 ::All > , hex : & str ) -> PublicKey {
2020-01-17 14:31:29 -08:00
PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & hex ::decode ( hex ) . unwrap ( ) [ .. ] ) . unwrap ( ) )
}
2021-07-26 14:04:44 -04:00
#[ test ]
fn upfront_shutdown_script_incompatibility ( ) {
let features = InitFeatures ::known ( ) . clear_shutdown_anysegwit ( ) ;
let non_v0_segwit_shutdown_script =
2022-05-05 18:04:55 +02:00
ShutdownScript ::new_witness_program ( WitnessVersion ::V16 , & [ 0 , 40 ] ) . unwrap ( ) ;
2021-07-26 14:04:44 -04:00
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
2021-08-01 22:31:07 -05:00
keys_provider . expect ( OnGetShutdownScriptpubkey {
returns : non_v0_segwit_shutdown_script . clone ( ) ,
} ) ;
2021-07-26 14:04:44 -04:00
let secp_ctx = Secp256k1 ::new ( ) ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
match Channel ::< EnforcingSigner > ::new_outbound ( & LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 253 } ) , & & keys_provider , node_id , & features , 10000000 , 100000 , 42 , & config , 0 , 42 ) {
2021-08-01 22:31:07 -05:00
Err ( APIError ::IncompatibleShutdownScript { script } ) = > {
assert_eq! ( script . into_inner ( ) , non_v0_segwit_shutdown_script . into_inner ( ) ) ;
2021-07-26 14:04:44 -04:00
} ,
Err ( e ) = > panic! ( " Unexpected error: {:?} " , e ) ,
Ok ( _ ) = > panic! ( " Expected error " ) ,
}
}
2020-06-13 16:46:25 -04:00
// Check that, during channel creation, we use the same feerate in the open channel message
// as we do in the Channel object creation itself.
#[ test ]
fn test_open_channel_msg_fee ( ) {
let original_fee = 253 ;
let mut fee_est = TestFeeEstimator { fee_est : original_fee } ;
2022-06-29 15:13:40 +02:00
let bounded_fee_estimator = LowerBoundedFeeEstimator ::new ( & fee_est ) ;
2020-06-13 16:46:25 -04:00
let secp_ctx = Secp256k1 ::new ( ) ;
2020-06-17 08:29:30 -07:00
let seed = [ 42 ; 32 ] ;
2020-06-13 16:46:25 -04:00
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
let node_a_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
let node_a_chan = Channel ::< EnforcingSigner > ::new_outbound ( & bounded_fee_estimator , & & keys_provider , node_a_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ;
2020-06-13 16:46:25 -04:00
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
fee_est . fee_est = 500 ;
2020-08-25 17:12:00 -04:00
let open_channel_msg = node_a_chan . get_open_channel ( genesis_block ( network ) . header . block_hash ( ) ) ;
2020-06-15 17:28:01 -04:00
assert_eq! ( open_channel_msg . feerate_per_kw , original_fee ) ;
2020-06-13 16:46:25 -04:00
}
2020-12-04 16:05:10 -05:00
#[ test ]
fn test_holder_vs_counterparty_dust_limit ( ) {
// Test that when calculating the local and remote commitment transaction fees, the correct
// dust limits are used.
2022-06-29 15:13:40 +02:00
let feeest = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 15000 } ) ;
2020-12-04 16:05:10 -05:00
let secp_ctx = Secp256k1 ::new ( ) ;
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
2021-11-09 21:25:33 +00:00
let logger = test_utils ::TestLogger ::new ( ) ;
2020-12-04 16:05:10 -05:00
// Go through the flow of opening a channel between two nodes, making sure
// they have different dust limits.
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
let mut node_a_chan = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , node_b_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ;
2020-12-04 16:05:10 -05:00
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
let open_channel_msg = node_a_chan . get_open_channel ( genesis_block ( network ) . header . block_hash ( ) ) ;
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 7 ; 32 ] ) . unwrap ( ) ) ;
2022-06-29 15:13:40 +02:00
let mut node_b_chan = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider , node_b_node_id , & InitFeatures ::known ( ) , & open_channel_msg , 7 , & config , 0 , & & logger , 42 ) . unwrap ( ) ;
2020-12-04 16:05:10 -05:00
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
2022-03-24 02:00:31 +02:00
let mut accept_channel_msg = node_b_chan . accept_inbound_channel ( 0 ) ;
2020-12-04 16:05:10 -05:00
accept_channel_msg . dust_limit_satoshis = 546 ;
2022-06-13 12:53:56 -07:00
node_a_chan . accept_channel ( & accept_channel_msg , & config . channel_handshake_limits , & InitFeatures ::known ( ) ) . unwrap ( ) ;
2021-03-16 18:07:22 -04:00
node_a_chan . holder_dust_limit_satoshis = 1560 ;
2020-12-04 16:05:10 -05:00
// Put some inbound and outbound HTLCs in A's channel.
let htlc_amount_msat = 11_092_000 ; // put an amount below A's effective dust limit but above B's.
node_a_chan . pending_inbound_htlcs . push ( InboundHTLCOutput {
htlc_id : 0 ,
amount_msat : htlc_amount_msat ,
payment_hash : PaymentHash ( Sha256 ::hash ( & [ 42 ; 32 ] ) . into_inner ( ) ) ,
cltv_expiry : 300000000 ,
state : InboundHTLCState ::Committed ,
} ) ;
node_a_chan . pending_outbound_htlcs . push ( OutboundHTLCOutput {
htlc_id : 1 ,
amount_msat : htlc_amount_msat , // put an amount below A's dust amount but above B's.
payment_hash : PaymentHash ( Sha256 ::hash ( & [ 43 ; 32 ] ) . into_inner ( ) ) ,
cltv_expiry : 200000000 ,
state : OutboundHTLCState ::Committed ,
source : HTLCSource ::OutboundRoute {
path : Vec ::new ( ) ,
session_priv : SecretKey ::from_slice ( & hex ::decode ( " 0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
first_hop_htlc_msat : 548 ,
2021-09-23 16:06:12 -04:00
payment_id : PaymentId ( [ 42 ; 32 ] ) ,
2021-10-04 04:04:46 +00:00
payment_secret : None ,
2022-01-21 10:39:01 +01:00
payment_params : None ,
2020-12-04 16:05:10 -05:00
}
} ) ;
// Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
// the dust limit check.
let htlc_candidate = HTLCCandidate ::new ( htlc_amount_msat , HTLCInitiator ::LocalOffered ) ;
let local_commit_tx_fee = node_a_chan . next_local_commit_tx_fee_msat ( htlc_candidate , None ) ;
2022-01-04 15:54:54 -08:00
let local_commit_fee_0_htlcs = Channel ::< EnforcingSigner > ::commit_tx_fee_msat ( node_a_chan . feerate_per_kw , 0 , node_a_chan . opt_anchors ( ) ) ;
2020-12-04 16:05:10 -05:00
assert_eq! ( local_commit_tx_fee , local_commit_fee_0_htlcs ) ;
// Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
// of the HTLCs are seen to be above the dust limit.
node_a_chan . channel_transaction_parameters . is_outbound_from_holder = false ;
2022-01-04 15:54:54 -08:00
let remote_commit_fee_3_htlcs = Channel ::< EnforcingSigner > ::commit_tx_fee_msat ( node_a_chan . feerate_per_kw , 3 , node_a_chan . opt_anchors ( ) ) ;
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( htlc_amount_msat , HTLCInitiator ::LocalOffered ) ;
let remote_commit_tx_fee = node_a_chan . next_remote_commit_tx_fee_msat ( htlc_candidate , None ) ;
assert_eq! ( remote_commit_tx_fee , remote_commit_fee_3_htlcs ) ;
}
#[ test ]
fn test_timeout_vs_success_htlc_dust_limit ( ) {
// Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
// calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
// *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
// `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
2022-06-29 15:13:40 +02:00
let fee_est = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 253 } ) ;
2020-12-04 16:05:10 -05:00
let secp_ctx = Secp256k1 ::new ( ) ;
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
let node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
let mut chan = Channel ::< EnforcingSigner > ::new_outbound ( & fee_est , & & keys_provider , node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ;
2020-12-04 16:05:10 -05:00
2022-01-04 15:54:54 -08:00
let commitment_tx_fee_0_htlcs = Channel ::< EnforcingSigner > ::commit_tx_fee_msat ( chan . feerate_per_kw , 0 , chan . opt_anchors ( ) ) ;
let commitment_tx_fee_1_htlc = Channel ::< EnforcingSigner > ::commit_tx_fee_msat ( chan . feerate_per_kw , 1 , chan . opt_anchors ( ) ) ;
2020-12-04 16:05:10 -05:00
// If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
// counted as dust when it shouldn't be.
2022-01-05 13:40:08 -08:00
let htlc_amt_above_timeout = ( ( 253 * htlc_timeout_tx_weight ( chan . opt_anchors ( ) ) / 1000 ) + chan . holder_dust_limit_satoshis + 1 ) * 1000 ;
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( htlc_amt_above_timeout , HTLCInitiator ::LocalOffered ) ;
let commitment_tx_fee = chan . next_local_commit_tx_fee_msat ( htlc_candidate , None ) ;
assert_eq! ( commitment_tx_fee , commitment_tx_fee_1_htlc ) ;
// If swapped: this HTLC would be counted as non-dust when it shouldn't be.
2022-01-05 13:40:08 -08:00
let dust_htlc_amt_below_success = ( ( 253 * htlc_success_tx_weight ( chan . opt_anchors ( ) ) / 1000 ) + chan . holder_dust_limit_satoshis - 1 ) * 1000 ;
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( dust_htlc_amt_below_success , HTLCInitiator ::RemoteOffered ) ;
let commitment_tx_fee = chan . next_local_commit_tx_fee_msat ( htlc_candidate , None ) ;
assert_eq! ( commitment_tx_fee , commitment_tx_fee_0_htlcs ) ;
chan . channel_transaction_parameters . is_outbound_from_holder = false ;
// If swapped: this HTLC would be counted as non-dust when it shouldn't be.
2022-01-05 13:40:08 -08:00
let dust_htlc_amt_above_timeout = ( ( 253 * htlc_timeout_tx_weight ( chan . opt_anchors ( ) ) / 1000 ) + chan . counterparty_dust_limit_satoshis + 1 ) * 1000 ;
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( dust_htlc_amt_above_timeout , HTLCInitiator ::LocalOffered ) ;
let commitment_tx_fee = chan . next_remote_commit_tx_fee_msat ( htlc_candidate , None ) ;
assert_eq! ( commitment_tx_fee , commitment_tx_fee_0_htlcs ) ;
// If swapped: this HTLC would be counted as dust when it shouldn't be.
2022-01-05 13:40:08 -08:00
let htlc_amt_below_success = ( ( 253 * htlc_success_tx_weight ( chan . opt_anchors ( ) ) / 1000 ) + chan . counterparty_dust_limit_satoshis - 1 ) * 1000 ;
2020-12-04 16:05:10 -05:00
let htlc_candidate = HTLCCandidate ::new ( htlc_amt_below_success , HTLCInitiator ::RemoteOffered ) ;
let commitment_tx_fee = chan . next_remote_commit_tx_fee_msat ( htlc_candidate , None ) ;
assert_eq! ( commitment_tx_fee , commitment_tx_fee_1_htlc ) ;
}
2017-12-25 01:05:27 -05:00
#[ test ]
2020-03-17 19:54:16 -04:00
fn channel_reestablish_no_updates ( ) {
2022-06-29 15:13:40 +02:00
let feeest = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 15000 } ) ;
2020-03-02 12:55:53 -05:00
let logger = test_utils ::TestLogger ::new ( ) ;
2020-03-17 19:54:16 -04:00
let secp_ctx = Secp256k1 ::new ( ) ;
2020-06-17 08:29:30 -07:00
let seed = [ 42 ; 32 ] ;
2020-03-17 19:54:16 -04:00
let network = Network ::Testnet ;
2021-04-06 11:34:17 -07:00
let best_block = BestBlock ::from_genesis ( network ) ;
let chain_hash = best_block . block_hash ( ) ;
2020-03-02 12:55:53 -05:00
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
2020-03-17 19:54:16 -04:00
// Go through the flow of opening a channel between two nodes.
2020-06-08 20:47:55 -04:00
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
2020-03-17 19:54:16 -04:00
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
let mut node_a_chan = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , node_b_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ;
2020-03-17 19:54:16 -04:00
// Create Node B's channel by receiving Node A's open_channel message
2021-03-04 18:22:37 -08:00
let open_channel_msg = node_a_chan . get_open_channel ( chain_hash ) ;
2020-03-17 19:54:16 -04:00
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 7 ; 32 ] ) . unwrap ( ) ) ;
2022-06-29 15:13:40 +02:00
let mut node_b_chan = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider , node_b_node_id , & InitFeatures ::known ( ) , & open_channel_msg , 7 , & config , 0 , & & logger , 42 ) . unwrap ( ) ;
2020-03-17 19:54:16 -04:00
// Node B --> Node A: accept channel
2022-03-24 02:00:31 +02:00
let accept_channel_msg = node_b_chan . accept_inbound_channel ( 0 ) ;
2022-06-13 12:53:56 -07:00
node_a_chan . accept_channel ( & accept_channel_msg , & config . channel_handshake_limits , & InitFeatures ::known ( ) ) . unwrap ( ) ;
2020-03-17 19:54:16 -04:00
// Node A --> Node B: funding created
let output_script = node_a_chan . get_funding_redeemscript ( ) ;
2022-08-09 17:39:51 +02:00
let tx = Transaction { version : 1 , lock_time : PackedLockTime ::ZERO , input : Vec ::new ( ) , output : vec ! [ TxOut {
2020-03-17 19:54:16 -04:00
value : 10000000 , script_pubkey : output_script . clone ( ) ,
} ] } ;
2020-05-12 13:17:49 -04:00
let funding_outpoint = OutPoint { txid : tx . txid ( ) , index : 0 } ;
2021-03-26 18:07:24 -04:00
let funding_created_msg = node_a_chan . get_outbound_funding_created ( tx . clone ( ) , funding_outpoint , & & logger ) . unwrap ( ) ;
2022-02-10 00:09:42 +00:00
let ( funding_signed_msg , _ , _ ) = node_b_chan . funding_created ( & funding_created_msg , best_block , & & logger ) . unwrap ( ) ;
2020-03-17 19:54:16 -04:00
// Node B --> Node A: funding signed
2021-04-06 11:34:17 -07:00
let _ = node_a_chan . funding_signed ( & funding_signed_msg , best_block , & & logger ) ;
2020-03-17 19:54:16 -04:00
// Now disconnect the two nodes and check that the commitment point in
// Node B's channel_reestablish message is sane.
2020-03-02 12:55:53 -05:00
node_b_chan . remove_uncommitted_htlcs_and_mark_paused ( & & logger ) ;
let msg = node_b_chan . get_channel_reestablish ( & & logger ) ;
2020-05-02 22:00:08 -04:00
assert_eq! ( msg . next_local_commitment_number , 1 ) ; // now called next_commitment_number
assert_eq! ( msg . next_remote_commitment_number , 0 ) ; // now called next_revocation_number
2020-03-17 19:54:16 -04:00
match msg . data_loss_protect {
2020-05-02 22:00:08 -04:00
OptionalField ::Present ( DataLossProtect { your_last_per_commitment_secret , .. } ) = > {
assert_eq! ( your_last_per_commitment_secret , [ 0 ; 32 ] ) ;
2020-03-17 19:54:16 -04:00
} ,
_ = > panic! ( )
}
// Check that the commitment point in Node A's channel_reestablish message
// is sane.
2020-03-02 12:55:53 -05:00
node_a_chan . remove_uncommitted_htlcs_and_mark_paused ( & & logger ) ;
let msg = node_a_chan . get_channel_reestablish ( & & logger ) ;
2020-05-02 22:00:08 -04:00
assert_eq! ( msg . next_local_commitment_number , 1 ) ; // now called next_commitment_number
assert_eq! ( msg . next_remote_commitment_number , 0 ) ; // now called next_revocation_number
2020-03-17 19:54:16 -04:00
match msg . data_loss_protect {
2020-05-02 22:00:08 -04:00
OptionalField ::Present ( DataLossProtect { your_last_per_commitment_secret , .. } ) = > {
assert_eq! ( your_last_per_commitment_secret , [ 0 ; 32 ] ) ;
2020-03-17 19:54:16 -04:00
} ,
_ = > panic! ( )
}
}
2022-04-27 00:39:52 +02:00
#[ test ]
fn test_configured_holder_max_htlc_value_in_flight ( ) {
2022-06-29 15:13:40 +02:00
let feeest = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 15000 } ) ;
2022-04-27 00:39:52 +02:00
let logger = test_utils ::TestLogger ::new ( ) ;
let secp_ctx = Secp256k1 ::new ( ) ;
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
let outbound_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let inbound_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 7 ; 32 ] ) . unwrap ( ) ) ;
let mut config_2_percent = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config_2_percent . channel_handshake_config . max_inbound_htlc_value_in_flight_percent_of_channel = 2 ;
2022-04-27 00:39:52 +02:00
let mut config_99_percent = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config_99_percent . channel_handshake_config . max_inbound_htlc_value_in_flight_percent_of_channel = 99 ;
2022-04-27 00:39:52 +02:00
let mut config_0_percent = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config_0_percent . channel_handshake_config . max_inbound_htlc_value_in_flight_percent_of_channel = 0 ;
2022-04-27 00:39:52 +02:00
let mut config_101_percent = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config_101_percent . channel_handshake_config . max_inbound_htlc_value_in_flight_percent_of_channel = 101 ;
2022-04-27 00:39:52 +02:00
// Test that `new_outbound` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound + 1 (2%) of the `channel_value`.
2022-06-29 15:13:40 +02:00
let chan_1 = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , outbound_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config_2_percent , 0 , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_1_value_msat = chan_1 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_1 . holder_max_htlc_value_in_flight_msat , ( chan_1_value_msat as f64 * 0.02 ) as u64 ) ;
// Test with the upper bound - 1 of valid values (99%).
2022-06-29 15:13:40 +02:00
let chan_2 = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , outbound_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config_99_percent , 0 , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_2_value_msat = chan_2 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_2 . holder_max_htlc_value_in_flight_msat , ( chan_2_value_msat as f64 * 0.99 ) as u64 ) ;
let chan_1_open_channel_msg = chan_1 . get_open_channel ( genesis_block ( network ) . header . block_hash ( ) ) ;
// Test that `new_from_req` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound - 1 (2%) of the `channel_value`.
2022-06-29 15:13:40 +02:00
let chan_3 = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider , inbound_node_id , & InitFeatures ::known ( ) , & chan_1_open_channel_msg , 7 , & config_2_percent , 0 , & & logger , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_3_value_msat = chan_3 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_3 . holder_max_htlc_value_in_flight_msat , ( chan_3_value_msat as f64 * 0.02 ) as u64 ) ;
// Test with the upper bound - 1 of valid values (99%).
2022-06-29 15:13:40 +02:00
let chan_4 = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider , inbound_node_id , & InitFeatures ::known ( ) , & chan_1_open_channel_msg , 7 , & config_99_percent , 0 , & & logger , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_4_value_msat = chan_4 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_4 . holder_max_htlc_value_in_flight_msat , ( chan_4_value_msat as f64 * 0.99 ) as u64 ) ;
// Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
2022-06-29 15:13:40 +02:00
let chan_5 = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , outbound_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config_0_percent , 0 , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_5_value_msat = chan_5 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_5 . holder_max_htlc_value_in_flight_msat , ( chan_5_value_msat as f64 * 0.01 ) as u64 ) ;
// Test that `new_outbound` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
2022-06-29 15:13:40 +02:00
let chan_6 = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , outbound_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config_101_percent , 0 , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_6_value_msat = chan_6 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_6 . holder_max_htlc_value_in_flight_msat , chan_6_value_msat ) ;
// Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
2022-06-29 15:13:40 +02:00
let chan_7 = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider , inbound_node_id , & InitFeatures ::known ( ) , & chan_1_open_channel_msg , 7 , & config_0_percent , 0 , & & logger , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_7_value_msat = chan_7 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_7 . holder_max_htlc_value_in_flight_msat , ( chan_7_value_msat as f64 * 0.01 ) as u64 ) ;
// Test that `new_from_req` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
2022-06-29 15:13:40 +02:00
let chan_8 = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider , inbound_node_id , & InitFeatures ::known ( ) , & chan_1_open_channel_msg , 7 , & config_101_percent , 0 , & & logger , 42 ) . unwrap ( ) ;
2022-04-27 00:39:52 +02:00
let chan_8_value_msat = chan_8 . channel_value_satoshis * 1000 ;
assert_eq! ( chan_8 . holder_max_htlc_value_in_flight_msat , chan_8_value_msat ) ;
}
2022-07-18 16:58:10 -07:00
#[ test ]
fn test_configured_holder_selected_channel_reserve_satoshis ( ) {
// Test that `new_outbound` and `new_from_req` create a channel with the correct
// channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
test_self_and_counterparty_channel_reserve ( 10_000_000 , 0.02 , 0.02 ) ;
// Test with valid but unreasonably high channel reserves
// Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
test_self_and_counterparty_channel_reserve ( 10_000_000 , 0.49 , 0.49 ) ;
test_self_and_counterparty_channel_reserve ( 10_000_000 , 0.60 , 0.30 ) ;
// Test with calculated channel reserve less than lower bound
// i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
test_self_and_counterparty_channel_reserve ( 100_000 , 0.00002 , 0.30 ) ;
// Test with invalid channel reserves since sum of both is greater than or equal
// to channel value
test_self_and_counterparty_channel_reserve ( 10_000_000 , 0.50 , 0.50 ) ;
test_self_and_counterparty_channel_reserve ( 10_000_000 , 0.60 , 0.50 ) ;
}
fn test_self_and_counterparty_channel_reserve ( channel_value_satoshis : u64 , outbound_selected_channel_reserve_perc : f64 , inbound_selected_channel_reserve_perc : f64 ) {
let fee_est = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 15_000 } ) ;
let logger = test_utils ::TestLogger ::new ( ) ;
let secp_ctx = Secp256k1 ::new ( ) ;
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
let outbound_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let inbound_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 7 ; 32 ] ) . unwrap ( ) ) ;
let mut outbound_node_config = UserConfig ::default ( ) ;
outbound_node_config . channel_handshake_config . their_channel_reserve_proportional_millionths = ( outbound_selected_channel_reserve_perc * 1_000_000.0 ) as u32 ;
let chan = Channel ::< EnforcingSigner > ::new_outbound ( & & fee_est , & & keys_provider , outbound_node_id , & InitFeatures ::known ( ) , channel_value_satoshis , 100_000 , 42 , & outbound_node_config , 0 , 42 ) . unwrap ( ) ;
let expected_outbound_selected_chan_reserve = cmp ::max ( MIN_THEIR_CHAN_RESERVE_SATOSHIS , ( chan . channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc ) as u64 ) ;
assert_eq! ( chan . holder_selected_channel_reserve_satoshis , expected_outbound_selected_chan_reserve ) ;
let chan_open_channel_msg = chan . get_open_channel ( genesis_block ( network ) . header . block_hash ( ) ) ;
let mut inbound_node_config = UserConfig ::default ( ) ;
inbound_node_config . channel_handshake_config . their_channel_reserve_proportional_millionths = ( inbound_selected_channel_reserve_perc * 1_000_000.0 ) as u32 ;
if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
let chan_inbound_node = Channel ::< EnforcingSigner > ::new_from_req ( & & fee_est , & & keys_provider , inbound_node_id , & InitFeatures ::known ( ) , & chan_open_channel_msg , 7 , & inbound_node_config , 0 , & & logger , 42 ) . unwrap ( ) ;
let expected_inbound_selected_chan_reserve = cmp ::max ( MIN_THEIR_CHAN_RESERVE_SATOSHIS , ( chan . channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc ) as u64 ) ;
assert_eq! ( chan_inbound_node . holder_selected_channel_reserve_satoshis , expected_inbound_selected_chan_reserve ) ;
assert_eq! ( chan_inbound_node . counterparty_selected_channel_reserve_satoshis . unwrap ( ) , expected_outbound_selected_chan_reserve ) ;
} else {
// Channel Negotiations failed
let result = Channel ::< EnforcingSigner > ::new_from_req ( & & fee_est , & & keys_provider , inbound_node_id , & InitFeatures ::known ( ) , & chan_open_channel_msg , 7 , & inbound_node_config , 0 , & & logger , 42 ) ;
assert! ( result . is_err ( ) ) ;
}
}
2021-03-12 16:02:17 -05:00
#[ test ]
fn channel_update ( ) {
2022-06-29 15:13:40 +02:00
let feeest = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 15000 } ) ;
2021-03-12 16:02:17 -05:00
let secp_ctx = Secp256k1 ::new ( ) ;
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let chain_hash = genesis_block ( network ) . header . block_hash ( ) ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
// Create a channel.
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
let mut node_a_chan = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider , node_b_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ;
2021-03-12 16:02:17 -05:00
assert! ( node_a_chan . counterparty_forwarding_info . is_none ( ) ) ;
assert_eq! ( node_a_chan . holder_htlc_minimum_msat , 1 ) ; // the default
assert! ( node_a_chan . counterparty_forwarding_info ( ) . is_none ( ) ) ;
// Make sure that receiving a channel update will update the Channel as expected.
let update = ChannelUpdate {
contents : UnsignedChannelUpdate {
chain_hash ,
short_channel_id : 0 ,
timestamp : 0 ,
flags : 0 ,
cltv_expiry_delta : 100 ,
htlc_minimum_msat : 5 ,
2022-07-25 20:35:51 +02:00
htlc_maximum_msat : MAX_VALUE_MSAT ,
2021-03-12 16:02:17 -05:00
fee_base_msat : 110 ,
fee_proportional_millionths : 11 ,
excess_data : Vec ::new ( ) ,
} ,
signature : Signature ::from ( unsafe { FFISignature ::new ( ) } )
} ;
node_a_chan . channel_update ( & update ) . unwrap ( ) ;
// The counterparty can send an update with a higher minimum HTLC, but that shouldn't
// change our official htlc_minimum_msat.
assert_eq! ( node_a_chan . holder_htlc_minimum_msat , 1 ) ;
match node_a_chan . counterparty_forwarding_info ( ) {
Some ( info ) = > {
assert_eq! ( info . cltv_expiry_delta , 100 ) ;
assert_eq! ( info . fee_base_msat , 110 ) ;
assert_eq! ( info . fee_proportional_millionths , 11 ) ;
} ,
None = > panic! ( " expected counterparty forwarding info to be Some " )
}
}
2022-03-25 20:34:02 +01:00
#[ cfg(not(feature = " grind_signatures " )) ]
2020-03-17 19:54:16 -04:00
#[ test ]
2017-12-25 01:05:27 -05:00
fn outbound_commitment_test ( ) {
2022-05-05 17:59:38 +02:00
use bitcoin ::util ::sighash ;
2022-03-30 14:43:39 -05:00
use bitcoin ::consensus ::encode ::serialize ;
2022-05-05 17:59:38 +02:00
use bitcoin ::blockdata ::transaction ::EcdsaSighashType ;
2022-03-30 14:43:39 -05:00
use bitcoin ::hashes ::hex ::FromHex ;
use bitcoin ::hash_types ::Txid ;
use bitcoin ::secp256k1 ::Message ;
use chain ::keysinterface ::BaseSign ;
use ln ::PaymentPreimage ;
use ln ::channel ::{ HTLCOutputInCommitment , TxCreationKeys } ;
use ln ::chan_utils ::{ ChannelPublicKeys , HolderCommitmentTransaction , CounterpartyChannelTransactionParameters } ;
use util ::logger ::Logger ;
use sync ::Arc ;
2022-01-04 14:53:44 -08:00
// Test vectors from BOLT 3 Appendices C and F (anchors):
2018-07-24 20:34:56 -04:00
let feeest = TestFeeEstimator { fee_est : 15000 } ;
2018-07-25 02:34:51 +00:00
let logger : Arc < Logger > = Arc ::new ( test_utils ::TestLogger ::new ( ) ) ;
2017-12-25 01:05:27 -05:00
let secp_ctx = Secp256k1 ::new ( ) ;
2021-02-20 10:05:55 -05:00
let mut signer = InMemorySigner ::new (
2020-02-04 09:15:59 -08:00
& secp_ctx ,
2021-11-14 17:25:39 +00:00
SecretKey ::from_slice ( & hex ::decode ( " 4242424242424242424242424242424242424242424242424242424242424242 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
2020-02-04 09:15:59 -08:00
SecretKey ::from_slice ( & hex ::decode ( " 30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & hex ::decode ( " 0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & hex ::decode ( " 1111111111111111111111111111111111111111111111111111111111111111 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & hex ::decode ( " 3333333333333333333333333333333333333333333333333333333333333333 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
SecretKey ::from_slice ( & hex ::decode ( " 1111111111111111111111111111111111111111111111111111111111111111 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
2018-04-17 12:32:52 -04:00
// These aren't set in the test vectors:
2020-02-04 09:15:59 -08:00
[ 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff ] ,
2020-04-17 21:26:38 -04:00
10_000_000 ,
2021-02-06 13:11:23 -05:00
[ 0 ; 32 ]
2020-02-04 09:15:59 -08:00
) ;
2021-02-20 10:05:55 -05:00
assert_eq! ( signer . pubkeys ( ) . funding_pubkey . serialize ( ) [ .. ] ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb " ) . unwrap ( ) [ .. ] ) ;
2021-02-20 10:05:55 -05:00
let keys_provider = Keys { signer : signer . clone ( ) } ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
let counterparty_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
2019-10-18 14:19:49 +01:00
let mut config = UserConfig ::default ( ) ;
2022-06-13 12:53:56 -07:00
config . channel_handshake_config . announced_channel = false ;
2022-06-29 15:13:40 +02:00
let mut chan = Channel ::< InMemorySigner > ::new_outbound ( & LowerBoundedFeeEstimator ::new ( & feeest ) , & & keys_provider , counterparty_node_id , & InitFeatures ::known ( ) , 10_000_000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ; // Nothing uses their network key in this test
2020-06-08 20:47:55 -04:00
chan . holder_dust_limit_satoshis = 546 ;
2021-07-03 15:27:12 +00:00
chan . counterparty_selected_channel_reserve_satoshis = Some ( 0 ) ; // Filled in in accept_channel
2017-12-25 01:05:27 -05:00
2020-05-12 13:17:49 -04:00
let funding_info = OutPoint { txid : Txid ::from_hex ( " 8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be " ) . unwrap ( ) , index : 0 } ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
let counterparty_pubkeys = ChannelPublicKeys {
2020-01-17 14:31:29 -08:00
funding_pubkey : public_from_secret_hex ( & secp_ctx , " 1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13 " ) ,
revocation_basepoint : PublicKey ::from_slice ( & hex ::decode ( " 02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ,
2020-03-08 20:38:16 -04:00
payment_point : public_from_secret_hex ( & secp_ctx , " 4444444444444444444444444444444444444444444444444444444444444444 " ) ,
2020-01-17 14:31:29 -08:00
delayed_payment_basepoint : public_from_secret_hex ( & secp_ctx , " 1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13 " ) ,
htlc_basepoint : public_from_secret_hex ( & secp_ctx , " 4444444444444444444444444444444444444444444444444444444444444444 " )
} ;
2020-10-15 13:45:18 +02:00
chan . channel_transaction_parameters . counterparty_parameters = Some (
CounterpartyChannelTransactionParameters {
pubkeys : counterparty_pubkeys . clone ( ) ,
selected_contest_delay : 144
} ) ;
chan . channel_transaction_parameters . funding_outpoint = Some ( funding_info ) ;
2021-02-20 10:05:55 -05:00
signer . ready_channel ( & chan . channel_transaction_parameters ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
assert_eq! ( counterparty_pubkeys . payment_point . serialize ( ) [ .. ] ,
2020-01-17 14:31:29 -08:00
hex ::decode ( " 032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
assert_eq! ( counterparty_pubkeys . funding_pubkey . serialize ( ) [ .. ] ,
2020-01-17 14:31:29 -08:00
hex ::decode ( " 030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
assert_eq! ( counterparty_pubkeys . htlc_basepoint . serialize ( ) [ .. ] ,
2020-01-17 14:31:29 -08:00
hex ::decode ( " 032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
// We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
2017-12-25 01:05:27 -05:00
// derived from a commitment_seed, so instead we copy it here and call
// build_commitment_transaction.
2021-02-20 10:05:55 -05:00
let delayed_payment_base = & chan . holder_signer . pubkeys ( ) . delayed_payment_basepoint ;
2019-01-16 15:45:05 -05:00
let per_commitment_secret = SecretKey ::from_slice ( & hex ::decode ( " 1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2018-08-20 17:13:07 -04:00
let per_commitment_point = PublicKey ::from_secret_key ( & secp_ctx , & per_commitment_secret ) ;
2021-02-20 10:05:55 -05:00
let htlc_basepoint = & chan . holder_signer . pubkeys ( ) . htlc_basepoint ;
2020-06-08 20:47:55 -04:00
let keys = TxCreationKeys ::derive_new ( & secp_ctx , & per_commitment_point , delayed_payment_base , htlc_basepoint , & counterparty_pubkeys . revocation_basepoint , & counterparty_pubkeys . htlc_basepoint ) . unwrap ( ) ;
2020-01-17 14:31:29 -08:00
2017-12-25 01:05:27 -05:00
macro_rules ! test_commitment {
2022-01-04 14:53:44 -08:00
( $counterparty_sig_hex : expr , $sig_hex : expr , $tx_hex : expr , $( $remain :tt ) * ) = > {
chan . channel_transaction_parameters . opt_anchors = None ;
test_commitment_common! ( $counterparty_sig_hex , $sig_hex , $tx_hex , false , $( $remain ) * ) ;
} ;
}
macro_rules ! test_commitment_with_anchors {
( $counterparty_sig_hex : expr , $sig_hex : expr , $tx_hex : expr , $( $remain :tt ) * ) = > {
chan . channel_transaction_parameters . opt_anchors = Some ( ( ) ) ;
test_commitment_common! ( $counterparty_sig_hex , $sig_hex , $tx_hex , true , $( $remain ) * ) ;
} ;
}
macro_rules ! test_commitment_common {
( $counterparty_sig_hex : expr , $sig_hex : expr , $tx_hex : expr , $opt_anchors : expr , {
2020-06-08 20:47:55 -04:00
$( { $htlc_idx : expr , $counterparty_htlc_sig_hex : expr , $htlc_sig_hex : expr , $htlc_tx_hex : expr } ) , *
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
} ) = > { {
2020-10-15 13:45:18 +02:00
let ( commitment_tx , htlcs ) : ( _ , Vec < HTLCOutputInCommitment > ) = {
2021-11-18 21:23:41 -05:00
let mut commitment_stats = chan . build_commitment_transaction ( 0xffffffffffff - 42 , & keys , true , false , & logger ) ;
2020-10-15 13:45:18 +02:00
2021-11-18 21:23:41 -05:00
let htlcs = commitment_stats . htlcs_included . drain ( .. )
2019-01-04 14:37:48 -05:00
. filter_map ( | ( htlc , _ ) | if htlc . transaction_output_index . is_some ( ) { Some ( htlc ) } else { None } )
. collect ( ) ;
2021-11-18 21:23:41 -05:00
( commitment_stats . tx , htlcs )
2018-11-30 10:58:44 -05:00
} ;
2020-10-15 13:45:18 +02:00
let trusted_tx = commitment_tx . trust ( ) ;
let unsigned_tx = trusted_tx . built_transaction ( ) ;
2019-12-13 01:58:08 -05:00
let redeemscript = chan . get_funding_redeemscript ( ) ;
2020-06-08 20:47:55 -04:00
let counterparty_signature = Signature ::from_der ( & hex ::decode ( $counterparty_sig_hex ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2020-10-15 13:45:18 +02:00
let sighash = unsigned_tx . get_sighash_all ( & redeemscript , chan . channel_value_satoshis ) ;
2022-01-04 14:53:44 -08:00
log_trace! ( logger , " unsigned_tx = {} " , hex ::encode ( serialize ( & unsigned_tx . transaction ) ) ) ;
2022-05-05 17:59:38 +02:00
assert! ( secp_ctx . verify_ecdsa ( & sighash , & counterparty_signature , chan . counterparty_funding_pubkey ( ) ) . is_ok ( ) , " verify counterparty commitment sig " ) ;
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
let mut per_htlc : Vec < ( HTLCOutputInCommitment , Option < Signature > ) > = Vec ::new ( ) ;
2020-04-18 00:10:24 -04:00
per_htlc . clear ( ) ; // Don't warn about excess mut for no-HTLC calls
2020-10-15 13:45:18 +02:00
let mut counterparty_htlc_sigs = Vec ::new ( ) ;
counterparty_htlc_sigs . clear ( ) ; // Don't warn about excess mut for no-HTLC calls
2020-04-18 00:10:24 -04:00
$( {
2020-06-08 20:47:55 -04:00
let remote_signature = Signature ::from_der ( & hex ::decode ( $counterparty_htlc_sig_hex ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2020-10-15 13:45:18 +02:00
per_htlc . push ( ( htlcs [ $htlc_idx ] . clone ( ) , Some ( remote_signature ) ) ) ;
counterparty_htlc_sigs . push ( remote_signature ) ;
2020-04-18 00:10:24 -04:00
} ) *
2020-10-15 13:45:18 +02:00
assert_eq! ( htlcs . len ( ) , per_htlc . len ( ) ) ;
2020-04-18 00:10:24 -04:00
2020-10-15 13:45:18 +02:00
let holder_commitment_tx = HolderCommitmentTransaction ::new (
commitment_tx . clone ( ) ,
counterparty_signature ,
counterparty_htlc_sigs ,
2021-02-20 10:05:55 -05:00
& chan . holder_signer . pubkeys ( ) . funding_pubkey ,
2020-10-15 13:45:18 +02:00
chan . counterparty_funding_pubkey ( )
) ;
2021-02-20 10:05:55 -05:00
let ( holder_sig , htlc_sigs ) = signer . sign_holder_commitment_and_htlcs ( & holder_commitment_tx , & secp_ctx ) . unwrap ( ) ;
2020-10-15 13:45:18 +02:00
assert_eq! ( Signature ::from_der ( & hex ::decode ( $sig_hex ) . unwrap ( ) [ .. ] ) . unwrap ( ) , holder_sig , " holder_sig " ) ;
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
let funding_redeemscript = chan . get_funding_redeemscript ( ) ;
let tx = holder_commitment_tx . add_holder_sig ( & funding_redeemscript , holder_sig ) ;
assert_eq! ( serialize ( & tx ) [ .. ] , hex ::decode ( $tx_hex ) . unwrap ( ) [ .. ] , " tx " ) ;
2017-12-25 01:05:27 -05:00
2020-10-15 13:45:18 +02:00
// ((htlc, counterparty_sig), (index, holder_sig))
let mut htlc_sig_iter = holder_commitment_tx . htlcs ( ) . iter ( ) . zip ( & holder_commitment_tx . counterparty_htlc_sigs ) . zip ( htlc_sigs . iter ( ) . enumerate ( ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
2020-04-18 00:10:24 -04:00
$( {
2022-01-04 14:53:44 -08:00
log_trace! ( logger , " verifying htlc {} " , $htlc_idx ) ;
2020-06-08 20:47:55 -04:00
let remote_signature = Signature ::from_der ( & hex ::decode ( $counterparty_htlc_sig_hex ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2020-04-18 00:10:24 -04:00
2020-10-15 13:45:18 +02:00
let ref htlc = htlcs [ $htlc_idx ] ;
2021-07-04 14:13:10 +00:00
let htlc_tx = chan_utils ::build_htlc_transaction ( & unsigned_tx . txid , chan . feerate_per_kw ,
chan . get_counterparty_selected_contest_delay ( ) . unwrap ( ) ,
2022-01-04 14:53:44 -08:00
& htlc , $opt_anchors , & keys . broadcaster_delayed_payment_key , & keys . revocation_key ) ;
let htlc_redeemscript = chan_utils ::get_htlc_redeemscript ( & htlc , $opt_anchors , & keys ) ;
2022-05-05 17:59:38 +02:00
let htlc_sighashtype = if $opt_anchors { EcdsaSighashType ::SinglePlusAnyoneCanPay } else { EcdsaSighashType ::All } ;
let htlc_sighash = Message ::from_slice ( & sighash ::SighashCache ::new ( & htlc_tx ) . segwit_signature_hash ( 0 , & htlc_redeemscript , htlc . amount_msat / 1000 , htlc_sighashtype ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
assert! ( secp_ctx . verify_ecdsa ( & htlc_sighash , & remote_signature , & keys . countersignatory_htlc_key ) . is_ok ( ) , " verify counterparty htlc sig " ) ;
2020-04-18 00:10:24 -04:00
let mut preimage : Option < PaymentPreimage > = None ;
if ! htlc . offered {
for i in 0 .. 5 {
let out = PaymentHash ( Sha256 ::hash ( & [ i ; 32 ] ) . into_inner ( ) ) ;
if out = = htlc . payment_hash {
preimage = Some ( PaymentPreimage ( [ i ; 32 ] ) ) ;
}
2017-12-25 01:05:27 -05:00
}
2020-04-18 00:10:24 -04:00
assert! ( preimage . is_some ( ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-10-15 13:45:18 +02:00
let htlc_sig = htlc_sig_iter . next ( ) . unwrap ( ) ;
2022-01-04 14:53:44 -08:00
let num_anchors = if $opt_anchors { 2 } else { 0 } ;
assert_eq! ( ( htlc_sig . 0 ) . 0. transaction_output_index , Some ( $htlc_idx + num_anchors ) , " output index " ) ;
2017-12-25 01:05:27 -05:00
2020-06-08 20:47:55 -04:00
let signature = Signature ::from_der ( & hex ::decode ( $htlc_sig_hex ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2020-10-15 13:45:18 +02:00
assert_eq! ( signature , * ( htlc_sig . 1 ) . 1 , " htlc sig " ) ;
let index = ( htlc_sig . 1 ) . 0 ;
let channel_parameters = chan . channel_transaction_parameters . as_holder_broadcastable ( ) ;
let trusted_tx = holder_commitment_tx . trust ( ) ;
2022-01-04 14:53:44 -08:00
log_trace! ( logger , " htlc_tx = {} " , hex ::encode ( serialize ( & trusted_tx . get_signed_htlc_tx ( & channel_parameters , index , & ( htlc_sig . 0 ) . 1 , ( htlc_sig . 1 ) . 1 , & preimage ) ) ) ) ;
2020-10-15 13:45:18 +02:00
assert_eq! ( serialize ( & trusted_tx . get_signed_htlc_tx ( & channel_parameters , index , & ( htlc_sig . 0 ) . 1 , ( htlc_sig . 1 ) . 1 , & preimage ) ) [ .. ] ,
hex ::decode ( $htlc_tx_hex ) . unwrap ( ) [ .. ] , " htlc tx " ) ;
2020-04-18 00:10:24 -04:00
} ) *
2020-10-15 13:45:18 +02:00
assert! ( htlc_sig_iter . next ( ) . is_none ( ) ) ;
Batch-sign local HTLC txn with a well-doc'd API, returning sigs
1107ab06c33bd360bdee7ee64f4b690e753003f6 introduced an API to have a
ChannelKeys implementer sign HTLC transactions by calling into the
LocalCommitmentTransaction object, which would then store the tx.
This API was incredibly awkward, both because it required an
external signer trust our own internal interfaces, but also because
it didn't allow for any inspection of what was about to be signed.
Further, it signed the HTLC transactions one-by-one in a somewhat
inefficient way, and there isn't a clear way to resolve this (as
the which-HTLC parameter has to refer to something in between the
HTLC's arbitrary index, and its index in the commitment tx, which
has "holes" for the non-HTLC outputs and skips some HTLCs).
We replace it with a new function in ChannelKeys which allows us
to sign all HTLCs in a given commitment transaction (which allows
for a bit more effeciency on the signers' part, as well as
sidesteps the which-HTLC issue). This may also simplify the signer
implementation as we will always want to sign all HTLCs spending a
given commitment transaction at once anyway.
We also de-mut the LocalCommitmentTransaction passed to the
ChanKeys, instead opting to make LocalCommitmentTransaction const
and avoid storing any new HTLC-related data in it.
2020-04-19 22:59:53 -04:00
} }
2017-12-25 01:05:27 -05:00
}
2020-05-02 22:00:08 -04:00
// simple commitment tx with no HTLCs
chan . value_to_self_msat = 7000000000 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0 " ,
" 30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: simple commitment tx with no HTLCs
test_commitment_with_anchors! ( " 3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3 " ,
" 30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2018-09-09 12:53:57 -04:00
chan . pending_inbound_htlcs . push ( {
let mut out = InboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : 0 ,
amount_msat : 1000000 ,
cltv_expiry : 500 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
2018-09-09 12:53:57 -04:00
state : InboundHTLCState ::Committed ,
2017-12-25 01:05:27 -05:00
} ;
2018-12-17 23:58:02 -05:00
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0000000000000000000000000000000000000000000000000000000000000000 " ) . unwrap ( ) ) . into_inner ( ) ;
2017-12-25 01:05:27 -05:00
out
} ) ;
2018-09-09 12:53:57 -04:00
chan . pending_inbound_htlcs . push ( {
let mut out = InboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : 1 ,
amount_msat : 2000000 ,
cltv_expiry : 501 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
2018-09-09 12:53:57 -04:00
state : InboundHTLCState ::Committed ,
2017-12-25 01:05:27 -05:00
} ;
2018-12-17 23:58:02 -05:00
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0101010101010101010101010101010101010101010101010101010101010101 " ) . unwrap ( ) ) . into_inner ( ) ;
2017-12-25 01:05:27 -05:00
out
} ) ;
2018-09-09 12:53:57 -04:00
chan . pending_outbound_htlcs . push ( {
let mut out = OutboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : 2 ,
amount_msat : 2000000 ,
cltv_expiry : 502 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
2018-09-09 12:53:57 -04:00
state : OutboundHTLCState ::Committed ,
2018-09-11 14:20:40 -04:00
source : HTLCSource ::dummy ( ) ,
2017-12-25 01:05:27 -05:00
} ;
2018-12-17 23:58:02 -05:00
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0202020202020202020202020202020202020202020202020202020202020202 " ) . unwrap ( ) ) . into_inner ( ) ;
2017-12-25 01:05:27 -05:00
out
} ) ;
2018-09-09 12:53:57 -04:00
chan . pending_outbound_htlcs . push ( {
let mut out = OutboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : 3 ,
amount_msat : 3000000 ,
cltv_expiry : 503 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
2018-09-09 12:53:57 -04:00
state : OutboundHTLCState ::Committed ,
2018-09-11 14:20:40 -04:00
source : HTLCSource ::dummy ( ) ,
2017-12-25 01:05:27 -05:00
} ;
2018-12-17 23:58:02 -05:00
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0303030303030303030303030303030303030303030303030303030303030303 " ) . unwrap ( ) ) . into_inner ( ) ;
2017-12-25 01:05:27 -05:00
out
} ) ;
2018-09-09 12:53:57 -04:00
chan . pending_inbound_htlcs . push ( {
let mut out = InboundHTLCOutput {
2017-12-25 01:05:27 -05:00
htlc_id : 4 ,
amount_msat : 4000000 ,
cltv_expiry : 504 ,
2018-11-22 21:18:16 -05:00
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
2018-09-09 12:53:57 -04:00
state : InboundHTLCState ::Committed ,
2017-12-25 01:05:27 -05:00
} ;
2018-12-17 23:58:02 -05:00
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0404040404040404040404040404040404040404040404040404040404040404 " ) . unwrap ( ) ) . into_inner ( ) ;
2017-12-25 01:05:27 -05:00
out
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with all five HTLCs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 0 ;
2020-05-02 22:00:08 -04:00
test_commitment! ( " 3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5 " ,
" 304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2020-04-18 00:10:24 -04:00
{ 0 ,
2020-05-02 22:00:08 -04:00
" 3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b " ,
" 30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce " ,
" 02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000 " } ,
2020-04-18 00:10:24 -04:00
{ 1 ,
2020-05-02 22:00:08 -04:00
" 30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004 " ,
" 3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f " ,
" 02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000 " } ,
2020-04-18 00:10:24 -04:00
{ 2 ,
2020-05-02 22:00:08 -04:00
" 30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352 " ,
" 3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa " ,
" 02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000 " } ,
2020-04-18 00:10:24 -04:00
{ 3 ,
2020-05-02 22:00:08 -04:00
" 304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363 " ,
" 304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487 " ,
" 02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
{ 4 ,
" 3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87 " ,
" 3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95 " ,
" 02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
2020-04-18 00:10:24 -04:00
} ) ;
// commitment tx with seven outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 647 ;
2020-05-02 22:00:08 -04:00
test_commitment! ( " 3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee " ,
" 30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f " ,
" 30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b " ,
" 020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000 " } ,
{ 1 ,
" 304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c " ,
" 30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f " ,
" 020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000 " } ,
{ 2 ,
" 30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8 " ,
" 3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673 " ,
" 020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000 " } ,
{ 3 ,
" 304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64 " ,
" 3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee " ,
" 020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
{ 4 ,
" 30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca " ,
" 3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9 " ,
" 020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with seven outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 644 ;
test_commitment_with_anchors! ( " 3045022100e0106830467a558c07544a3de7715610c1147062e7d091deeebe8b5c661cda9402202ad049c1a6d04834317a78483f723c205c9f638d17222aafc620800cc1b6ae35 " ,
" 3045022100ef82a405364bfc4007e63a7cc82925a513d79065bdbc216d60b6a4223a323f8a02200716730b8561f3c6d362eaf47f202e99fb30d0557b61b92b5f9134f8e2de3681 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80094a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994e80300000000000022002010f88bf09e56f14fb4543fd26e47b0db50ea5de9cf3fc46434792471082621aed0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a4f996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ef82a405364bfc4007e63a7cc82925a513d79065bdbc216d60b6a4223a323f8a02200716730b8561f3c6d362eaf47f202e99fb30d0557b61b92b5f9134f8e2de368101483045022100e0106830467a558c07544a3de7715610c1147062e7d091deeebe8b5c661cda9402202ad049c1a6d04834317a78483f723c205c9f638d17222aafc620800cc1b6ae3501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 304402205912d91c58016f593d9e46fefcdb6f4125055c41a17b03101eaaa034b9028ab60220520d4d239c85c66e4c75c5b413620b62736e227659d7821b308e2b8ced3e728e " ,
" 30440220473166a5adcca68550bab80403f410a726b5bd855030527e3fefa8c1e4b4fd7b02203b1dc91d8d69039473036cb5c34398b99e8eb90ae500c22130a557b62294b188 " ,
" 02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a0200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205912d91c58016f593d9e46fefcdb6f4125055c41a17b03101eaaa034b9028ab60220520d4d239c85c66e4c75c5b413620b62736e227659d7821b308e2b8ced3e728e834730440220473166a5adcca68550bab80403f410a726b5bd855030527e3fefa8c1e4b4fd7b02203b1dc91d8d69039473036cb5c34398b99e8eb90ae500c22130a557b62294b188012000000000000000000000000000000000000000000000000000000000000000008d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac6851b2756800000000 " } ,
{ 1 ,
" 3045022100c6b4113678039ee1e43a6cba5e3224ed2355ffc05e365a393afe8843dc9a76860220566d01fd52d65a89ba8595023884f9e8f2e9a310a6b9b85281c0bce06863430c " ,
" 3045022100d0d86307ea55d5daa80f453ad6d64b78fe8a6504aac25407c73e8502c0702c1602206a0809a02aa00c8dc4a53d976bb05d4605d8bb0b7b26b973a5c4e2734d8afbb4 " ,
" 02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a0300000000010000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c6b4113678039ee1e43a6cba5e3224ed2355ffc05e365a393afe8843dc9a76860220566d01fd52d65a89ba8595023884f9e8f2e9a310a6b9b85281c0bce06863430c83483045022100d0d86307ea55d5daa80f453ad6d64b78fe8a6504aac25407c73e8502c0702c1602206a0809a02aa00c8dc4a53d976bb05d4605d8bb0b7b26b973a5c4e2734d8afbb401008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000 " } ,
{ 2 ,
" 304402203c3a699fb80a38112aafd73d6e3a9b7d40bc2c3ed8b7fbc182a20f43b215172202204e71821b984d1af52c4b8e2cd4c572578c12a965866130c2345f61e4c2d3fef4 " ,
" 304402205bcfa92f83c69289a412b0b6dd4f2a0fe0b0fc2d45bd74706e963257a09ea24902203783e47883e60b86240e877fcbf33d50b1742f65bc93b3162d1be26583b367ee " ,
" 02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a040000000001000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402203c3a699fb80a38112aafd73d6e3a9b7d40bc2c3ed8b7fbc182a20f43b215172202204e71821b984d1af52c4b8e2cd4c572578c12a965866130c2345f61e4c2d3fef48347304402205bcfa92f83c69289a412b0b6dd4f2a0fe0b0fc2d45bd74706e963257a09ea24902203783e47883e60b86240e877fcbf33d50b1742f65bc93b3162d1be26583b367ee012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000 " } ,
{ 3 ,
" 304402200f089bcd20f25475216307d32aa5b6c857419624bfba1da07335f51f6ba4645b02206ce0f7153edfba23b0d4b2afc26bb3157d404368cb8ea0ca7cf78590dcdd28cf " ,
" 3045022100e4516da08f72c7a4f7b2f37aa84a0feb54ae2cc5b73f0da378e81ae0ca8119bf02207751b2628d8e2f62b4b9abccda4866246c1bfcc82e3d416ad562fd212102c28f " ,
" 02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a050000000001000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402200f089bcd20f25475216307d32aa5b6c857419624bfba1da07335f51f6ba4645b02206ce0f7153edfba23b0d4b2afc26bb3157d404368cb8ea0ca7cf78590dcdd28cf83483045022100e4516da08f72c7a4f7b2f37aa84a0feb54ae2cc5b73f0da378e81ae0ca8119bf02207751b2628d8e2f62b4b9abccda4866246c1bfcc82e3d416ad562fd212102c28f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 4 ,
" 3045022100aa72cfaf0965020c73a12c77276c6411ca68c4de36ac1998adf86c917a899a43022060da0a159fecfe0bed37c3962d767f12f90e30fed8a8f34b1301775c21a2bd3a " ,
" 304402203cd12065c2a42963c762e6b1a981e17695616ecb6f9fb33d8b0717cdd7ca0ee4022065500005c491c1dcf2fe9c4024f74b1c90785d572527055a491278f901143904 " ,
" 02000000000101b8cefef62ea66f5178b9361b2371be0759cbc8c689bcfa7a8e6746d497ec221a06000000000100000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100aa72cfaf0965020c73a12c77276c6411ca68c4de36ac1998adf86c917a899a43022060da0a159fecfe0bed37c3962d767f12f90e30fed8a8f34b1301775c21a2bd3a8347304402203cd12065c2a42963c762e6b1a981e17695616ecb6f9fb33d8b0717cdd7ca0ee4022065500005c491c1dcf2fe9c4024f74b1c90785d572527055a491278f901143904012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with six outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 648 ;
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5 " ,
" 3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0 " ,
" 304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6 " ,
" 020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000 " } ,
{ 1 ,
" 304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124 " ,
" 3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae " ,
" 020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000 " } ,
{ 2 ,
" 304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989 " ,
" 3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796 " ,
" 020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
{ 3 ,
" 3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be " ,
" 3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6 " ,
" 020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with six outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 645 ;
test_commitment_with_anchors! ( " 3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312 " ,
" 3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 30440220446f9e5c375db6a61d6eeee8b59219a30a4a37372afc2670a1a2889c78e9b943022061895f6088fb48b490ab2140a4842c277b64bf25ff591625dd0356e0c96ab7a8 " ,
" 3045022100c1621ba26a99c263fd885feff5fda5ca2cc73df080b3a49ecf15164ee244d2a5022037f4cc7fd4441af39a83a0e44c3b1db7d64a4c8080e8697f9e952f85421a34d8 " ,
" 02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b28534856132000200000000010000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220446f9e5c375db6a61d6eeee8b59219a30a4a37372afc2670a1a2889c78e9b943022061895f6088fb48b490ab2140a4842c277b64bf25ff591625dd0356e0c96ab7a883483045022100c1621ba26a99c263fd885feff5fda5ca2cc73df080b3a49ecf15164ee244d2a5022037f4cc7fd4441af39a83a0e44c3b1db7d64a4c8080e8697f9e952f85421a34d801008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000 " } ,
{ 1 ,
" 3044022027a3ffcb8a007e3349d75382efbd4b3fb99fcbd479a18555e58697bd1278d5c402205c8303d46211c3ae8975fe84a0df08b4623119fecd03bc93b49d7f7a0c64c710 " ,
" 3045022100b697aca55c6fb15e5348bb7387b584815fd15e8dd306afe0c477cb550d0c2d40022050b0f7e370f7604d2fec781fefe86715dbe95dff4dab88d628f509d62f854de1 " ,
" 02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b28534856132000300000000010000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022027a3ffcb8a007e3349d75382efbd4b3fb99fcbd479a18555e58697bd1278d5c402205c8303d46211c3ae8975fe84a0df08b4623119fecd03bc93b49d7f7a0c64c71083483045022100b697aca55c6fb15e5348bb7387b584815fd15e8dd306afe0c477cb550d0c2d40022050b0f7e370f7604d2fec781fefe86715dbe95dff4dab88d628f509d62f854de1012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000 " } ,
{ 2 ,
" 30440220013975ae356e6daf22a86a29f21c4f35aca82ed8f731a1103c60c74f5ed1c5aa02200350d4e5455cdbcacb7ccf174db5bed8286019e509a113f6b4c5e606ee12c9d7 " ,
" 3045022100e69a29f78779577830e73f327073c93168896f1b89432124b7846f5def9cd9cb02204433db3697e6ed7ac89574ca066a749640e0c9e114ac2e0ee4545741fcf7b7e9 " ,
" 02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b2853485613200040000000001000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220013975ae356e6daf22a86a29f21c4f35aca82ed8f731a1103c60c74f5ed1c5aa02200350d4e5455cdbcacb7ccf174db5bed8286019e509a113f6b4c5e606ee12c9d783483045022100e69a29f78779577830e73f327073c93168896f1b89432124b7846f5def9cd9cb02204433db3697e6ed7ac89574ca066a749640e0c9e114ac2e0ee4545741fcf7b7e901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 3 ,
" 304402205257017423644c7e831f30bc0c334eecfe66e9a6d2e92d157c5bece576b2be4f022047b21cf8e955e22b7471940563922d1a5852fb95459ca32905c7d46a19141664 " ,
" 304402204f5de65a624e3f757adffb678bd887eb4e656538c5ea7044922f6ee3eed8a06202206ff6f7bfe73b565343cae76131ac658f1a9c60d3ca2343358cda60b9e35f94c8 " ,
" 02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205257017423644c7e831f30bc0c334eecfe66e9a6d2e92d157c5bece576b2be4f022047b21cf8e955e22b7471940563922d1a5852fb95459ca32905c7d46a191416648347304402204f5de65a624e3f757adffb678bd887eb4e656538c5ea7044922f6ee3eed8a06202206ff6f7bfe73b565343cae76131ac658f1a9c60d3ca2343358cda60b9e35f94c8012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with six outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2069 ;
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc " ,
" 3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699 " ,
" 3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d " ,
" 02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000 " } ,
{ 1 ,
" 3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df " ,
" 3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61 " ,
" 02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000 " } ,
{ 2 ,
" 3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0 " ,
" 3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18 " ,
" 02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
{ 3 ,
" 304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df " ,
" 3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33 " ,
" 02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with six outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2060 ;
test_commitment_with_anchors! ( " 304402206208aeb34e404bd052ce3f298dfa832891c9d42caec99fe2a0d2832e9690b94302201b034bfcc6fa9faec667a9b7cbfe0b8d85e954aa239b66277887b5088aff08c3 " ,
" 304402201ce37a44b95213358c20f44404d6db7a6083bea6f58de6c46547ae41a47c9f8202206db1d45be41373e92f90d346381febbea8c78671b28c153e30ad1db3441a9497 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ab88f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201ce37a44b95213358c20f44404d6db7a6083bea6f58de6c46547ae41a47c9f8202206db1d45be41373e92f90d346381febbea8c78671b28c153e30ad1db3441a94970147304402206208aeb34e404bd052ce3f298dfa832891c9d42caec99fe2a0d2832e9690b94302201b034bfcc6fa9faec667a9b7cbfe0b8d85e954aa239b66277887b5088aff08c301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 30440220011f999016570bbab9f3125377d0f35096b4dbe155f97c20f71829ead2817d1602201f23f7e17f6928734601c5d8613431eed5c90aa41c3106e8c1cb02ce32aacb5d " ,
" 3044022017da96dfb0eb4061fa0162dc6fa6b2e07ecc5040ab5e6cb07be59838460b3e58022079371ffc95002cc1dc2891ec38198c9c25aca8164304fe114f1b55e2ffd1ddd5 " ,
" 02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d0200000000010000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220011f999016570bbab9f3125377d0f35096b4dbe155f97c20f71829ead2817d1602201f23f7e17f6928734601c5d8613431eed5c90aa41c3106e8c1cb02ce32aacb5d83473044022017da96dfb0eb4061fa0162dc6fa6b2e07ecc5040ab5e6cb07be59838460b3e58022079371ffc95002cc1dc2891ec38198c9c25aca8164304fe114f1b55e2ffd1ddd501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000 " } ,
{ 1 ,
" 304402202d2d9681409b0a0987bd4a268ffeb112df85c4c988ac2a3a2475cb00a61912c302206aa4f4d1388b7d3282bc847871af3cca30766cc8f1064e3a41ec7e82221e10f7 " ,
" 304402206426d67911aa6ff9b1cb147b093f3f65a37831a86d7c741d999afc0666e1773d022000bb71821650c70ea58d9bcdd03af736c41a5a8159d436c3ee0408a07394dcce " ,
" 02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d0300000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202d2d9681409b0a0987bd4a268ffeb112df85c4c988ac2a3a2475cb00a61912c302206aa4f4d1388b7d3282bc847871af3cca30766cc8f1064e3a41ec7e82221e10f78347304402206426d67911aa6ff9b1cb147b093f3f65a37831a86d7c741d999afc0666e1773d022000bb71821650c70ea58d9bcdd03af736c41a5a8159d436c3ee0408a07394dcce012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000 " } ,
{ 2 ,
" 3045022100f51cdaa525b7d4304548c642bb7945215eb5ae7d32874517cde67ca23ab0a12202206286d59e4b19926c6ac844be6f3ab8149a1ddb9c70f5026b7e83e40a6c08e6e1 " ,
" 304502210091b16b1ac63b867e7a5ca0344f7b2aa1cdd49d4b72eac86a31e7ec6f069e20640220402bfb571ba3a9c49e3b0061c89303453803d0241059d899222aaac4799b5076 " ,
" 02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d040000000001000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f51cdaa525b7d4304548c642bb7945215eb5ae7d32874517cde67ca23ab0a12202206286d59e4b19926c6ac844be6f3ab8149a1ddb9c70f5026b7e83e40a6c08e6e18348304502210091b16b1ac63b867e7a5ca0344f7b2aa1cdd49d4b72eac86a31e7ec6f069e20640220402bfb571ba3a9c49e3b0061c89303453803d0241059d899222aaac4799b507601008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 3 ,
" 304402202f058d99cb5a54f90773d43ba4e7a0089efd9f8269ef2da1b85d48a3e230555402205acc4bd6561830867d45cd7b84bba9fa35ad2b345016471c1737142bc99782c4 " ,
" 304402202913f9cacea54efd2316cffa91219def9e0e111977216c1e76e9da80befab14f022000a9a69e8f37ebe4a39107ab50fab0dde537334588f8f412bbaca57b179b87a6 " ,
" 02000000000101e7f364cf3a554b670767e723ef14b2af7a3eac70bd79dbde9256f384369c062d05000000000100000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202f058d99cb5a54f90773d43ba4e7a0089efd9f8269ef2da1b85d48a3e230555402205acc4bd6561830867d45cd7b84bba9fa35ad2b345016471c1737142bc99782c48347304402202913f9cacea54efd2316cffa91219def9e0e111977216c1e76e9da80befab14f022000a9a69e8f37ebe4a39107ab50fab0dde537334588f8f412bbaca57b179b87a6012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with five outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2070 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c " ,
" 3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 0 ,
" 304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b " ,
" 30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5 " ,
" 02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000 " } ,
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 1 ,
" 3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546 " ,
" 30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6 " ,
" 02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 2 ,
" 3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504 " ,
" 30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502 " ,
" 02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with five outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2061 ;
test_commitment_with_anchors! ( " 3045022100a2faf2ad7e323b2a82e07dc40b6847207ca6ad7b089f2c21dea9a4d37e52d59d02204c9480ce0358eb51d92a4342355a97e272e3cc45f86c612a76a3fe32fc3c4cb4 " ,
" 304402204ab07c659412dd2cd6043b1ad811ab215e901b6b5653e08cb3d2fe63d3e3dc57022031c7b3d130f9380ef09581f4f5a15cb6f359a2e0a597146b96c3533a26d6f4cd " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837eab80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a18916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402204ab07c659412dd2cd6043b1ad811ab215e901b6b5653e08cb3d2fe63d3e3dc57022031c7b3d130f9380ef09581f4f5a15cb6f359a2e0a597146b96c3533a26d6f4cd01483045022100a2faf2ad7e323b2a82e07dc40b6847207ca6ad7b089f2c21dea9a4d37e52d59d02204c9480ce0358eb51d92a4342355a97e272e3cc45f86c612a76a3fe32fc3c4cb401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 3045022100e10744f572a2cd1d787c969e894b792afaed21217ee0480df0112d2fa3ef96ea02202af4f66eb6beebc36d8e98719ed6b4be1b181659fcb561fc491d8cfebff3aa85 " ,
" 3045022100c3dc3ea50a0ca20e350f97b50c52c5514717cfa36cb9600918caac5cb556842b022049af018d676dde0c8e28ecf325f3ff5c1594261c4f7511d501f9d62d0594d2a2 " ,
" 02000000000101cf32732fe2d1387ed4e2335f69ddd3c0f337dabc03269e742531f89d35e161d10200000000010000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e10744f572a2cd1d787c969e894b792afaed21217ee0480df0112d2fa3ef96ea02202af4f66eb6beebc36d8e98719ed6b4be1b181659fcb561fc491d8cfebff3aa8583483045022100c3dc3ea50a0ca20e350f97b50c52c5514717cfa36cb9600918caac5cb556842b022049af018d676dde0c8e28ecf325f3ff5c1594261c4f7511d501f9d62d0594d2a201008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000 " } ,
{ 1 ,
" 3045022100e1f51fb72fec604b029b348a3bb6363454e1869f5b1e24fd736f860c8039f8070220030a2c90186437d8c9b47d4897798c024521b1274991c4cdc125970b346094b1 " ,
" 3045022100ec7ade6037e531629f24390ca9713782a04d648065d17fbe6b015981cdb296c202202d61049a6ecba2fb5314f3edcda2361cad187a89bea6e5d15185354d80c0c085 " ,
" 02000000000101cf32732fe2d1387ed4e2335f69ddd3c0f337dabc03269e742531f89d35e161d1030000000001000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e1f51fb72fec604b029b348a3bb6363454e1869f5b1e24fd736f860c8039f8070220030a2c90186437d8c9b47d4897798c024521b1274991c4cdc125970b346094b183483045022100ec7ade6037e531629f24390ca9713782a04d648065d17fbe6b015981cdb296c202202d61049a6ecba2fb5314f3edcda2361cad187a89bea6e5d15185354d80c0c08501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 2 ,
" 304402203479f81a1d83c516957679dc98bf91d35deada967739a8e3869e3e8db08246130220053c8e154b97e3019048dcec3d51bfaf396f36861fbda6d33f0e2a57155c8b9f " ,
" 3045022100a558eb5caa04e35a4417c1f0123ac12eec5f6badee28f5764dc6b69486e594f802201589b12784e242f205832d2d032149bd4e79433ec304c05394241fc7dcba5a71 " ,
" 02000000000101cf32732fe2d1387ed4e2335f69ddd3c0f337dabc03269e742531f89d35e161d104000000000100000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402203479f81a1d83c516957679dc98bf91d35deada967739a8e3869e3e8db08246130220053c8e154b97e3019048dcec3d51bfaf396f36861fbda6d33f0e2a57155c8b9f83483045022100a558eb5caa04e35a4417c1f0123ac12eec5f6badee28f5764dc6b69486e594f802201589b12784e242f205832d2d032149bd4e79433ec304c05394241fc7dcba5a71012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with five outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2194 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3 " ,
" 3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 0 ,
" 3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450 " ,
" 304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301 " ,
" 02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000 " } ,
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 1 ,
" 3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0 " ,
" 3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77 " ,
" 02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 2 ,
" 3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b " ,
" 30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3 " ,
" 02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with five outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2184 ;
test_commitment_with_anchors! ( " 3044022013d326f80ff7607cf366c823fcbbcb7a2b10322484825f151e6c4c756af24b8f02201ba05b9d8beb7cea2947f9f4d9e03f90435e93db2dd48b32eb9ca3f3dd042c79 " ,
" 30440220555c05261f72c5b4702d5c83a608630822b473048724b08640d6e75e345094250220448950b74a96a56963928ba5db8b457661a742c855e69d239b3b6ab73de307a3 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837eab80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a4f906a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220555c05261f72c5b4702d5c83a608630822b473048724b08640d6e75e345094250220448950b74a96a56963928ba5db8b457661a742c855e69d239b3b6ab73de307a301473044022013d326f80ff7607cf366c823fcbbcb7a2b10322484825f151e6c4c756af24b8f02201ba05b9d8beb7cea2947f9f4d9e03f90435e93db2dd48b32eb9ca3f3dd042c7901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 304402202e03ba1390998b3487e9a7fefcb66814c09abea0ef1bcc915dbaefbcf310569a02206bd10493a105ac69048e9bcedcb8e3301ef81b55018d911a4afd297297f98d30 " ,
" 304402200c3952ca04be0c60dcc0b7873a0829f560607524943554ae4a27d8d967166199022021a68657b88e22f9bf9ac6065be412685aff643d17049f04f2e99e86197dabb1 " ,
" 020000000001015b03043e20eb467029305a22af4c3b915e793743f192c5d225cf1d3c6e8c03010200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202e03ba1390998b3487e9a7fefcb66814c09abea0ef1bcc915dbaefbcf310569a02206bd10493a105ac69048e9bcedcb8e3301ef81b55018d911a4afd297297f98d308347304402200c3952ca04be0c60dcc0b7873a0829f560607524943554ae4a27d8d967166199022021a68657b88e22f9bf9ac6065be412685aff643d17049f04f2e99e86197dabb101008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000 " } ,
{ 1 ,
" 304402201f8a6adda2403bc400c919ea69d72d315337291e00d02cde085ea32953dbc50002202d65230da98df7af8ebefd2b60b457d0945232988ee2d7460a94a77d414a9acc " ,
" 3045022100ea69c9273b8914ac62b5b7082d6ac1da2b7b065ebf2ef3cd6403f5305ce3f26802203d98736ea97638895a898dfcc5ee0d0c55eb496b3964df0bb25d223688ea8b87 " ,
" 020000000001015b03043e20eb467029305a22af4c3b915e793743f192c5d225cf1d3c6e8c0301030000000001000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201f8a6adda2403bc400c919ea69d72d315337291e00d02cde085ea32953dbc50002202d65230da98df7af8ebefd2b60b457d0945232988ee2d7460a94a77d414a9acc83483045022100ea69c9273b8914ac62b5b7082d6ac1da2b7b065ebf2ef3cd6403f5305ce3f26802203d98736ea97638895a898dfcc5ee0d0c55eb496b3964df0bb25d223688ea8b8701008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 2 ,
" 3045022100ea6e4c9b8f56dd9cf5799492a201cdd65b8bc9bc089c3cff34107896ae313f90022034760f7760975cc68e8917a7f62894e25583da7be11af557c4fc402661d0cbf8 " ,
" 30440220717012f2f7ef6cac590aaf66c2109132c93ffba245959ac62d82e394ba80191302203f00fd9cb37c92c6b0ad4b33e62c3e55b04e5c2cfa0adcca5a9bc49774eeca8a " ,
" 020000000001015b03043e20eb467029305a22af4c3b915e793743f192c5d225cf1d3c6e8c0301040000000001000000019b090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ea6e4c9b8f56dd9cf5799492a201cdd65b8bc9bc089c3cff34107896ae313f90022034760f7760975cc68e8917a7f62894e25583da7be11af557c4fc402661d0cbf8834730440220717012f2f7ef6cac590aaf66c2109132c93ffba245959ac62d82e394ba80191302203f00fd9cb37c92c6b0ad4b33e62c3e55b04e5c2cfa0adcca5a9bc49774eeca8a012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with four outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2195 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403 " ,
" 3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 0 ,
" 3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e " ,
" 3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76 " ,
" 020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 1 ,
" 3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a " ,
" 3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82 " ,
" 020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with four outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 2185 ;
test_commitment_with_anchors! ( " 3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0 " ,
" 3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 304502210094480e38afb41d10fae299224872f19c53abe23c7033a1c0642c48713e7863a10220726dd9456407682667dc4bd9c66975acb3744961770b5002f7eb9c0df9ef2f3e " ,
" 304402203148dac61513dc0361738cba30cb341a1e580f8acd5ab0149bf65bd670688cf002207e5d9a0fcbbea2c263bc714fa9e9c44d7f582ea447f366119fc614a23de32f1f " ,
" 02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc0200000000010000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210094480e38afb41d10fae299224872f19c53abe23c7033a1c0642c48713e7863a10220726dd9456407682667dc4bd9c66975acb3744961770b5002f7eb9c0df9ef2f3e8347304402203148dac61513dc0361738cba30cb341a1e580f8acd5ab0149bf65bd670688cf002207e5d9a0fcbbea2c263bc714fa9e9c44d7f582ea447f366119fc614a23de32f1f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 1 ,
" 304402200dbde868dbc20c6a2433fe8979ba5e3f966b1c2d1aeb615f1c42e9c938b3495402202eec5f663c8b601c2061c1453d35de22597c137d1907a2feaf714d551035cb6e " ,
" 3045022100b896bded41d7feac7af25c19e35c53037c53b50e73cfd01eb4ba139c7fdf231602203a3be049d3d89396c4dc766d82ce31e237da8bc3a93e2c7d35992d1932d9cfeb " ,
" 02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc030000000001000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402200dbde868dbc20c6a2433fe8979ba5e3f966b1c2d1aeb615f1c42e9c938b3495402202eec5f663c8b601c2061c1453d35de22597c137d1907a2feaf714d551035cb6e83483045022100b896bded41d7feac7af25c19e35c53037c53b50e73cfd01eb4ba139c7fdf231602203a3be049d3d89396c4dc766d82ce31e237da8bc3a93e2c7d35992d1932d9cfeb012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with four outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 3702 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169 " ,
" 3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 0 ,
" 304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89 " ,
" 304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f " ,
" 020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000 " } ,
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 1 ,
" 3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817 " ,
" 304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc " ,
" 020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with four outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 3686 ;
test_commitment_with_anchors! ( " 30440220784485cf7a0ad7979daf2c858ffdaf5298d0020cea7aea466843e7948223bd9902206031b81d25e02a178c64e62f843577fdcdfc7a1decbbfb54cd895de692df85ca " ,
" 3045022100c268496aad5c3f97f25cf41c1ba5483a12982de29b222051b6de3daa2229413b02207f3c82d77a2c14f0096ed9bb4c34649483bb20fa71f819f71af44de6593e8bb2 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a29896a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c268496aad5c3f97f25cf41c1ba5483a12982de29b222051b6de3daa2229413b02207f3c82d77a2c14f0096ed9bb4c34649483bb20fa71f819f71af44de6593e8bb2014730440220784485cf7a0ad7979daf2c858ffdaf5298d0020cea7aea466843e7948223bd9902206031b81d25e02a178c64e62f843577fdcdfc7a1decbbfb54cd895de692df85ca01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 304402202cfe6618926ca9f1574f8c4659b425e9790b4677ba2248d77901290806130ffe02204ab37bb0287abcdb8b750b018d41a09effe37cb65ff801fa70d3f1a416599841 " ,
" 3044022030b318139715e3b34f19be852cc01c1c0e1599e8b926a73df2bfb70dd186ddee022062a2b7398aed9f563b4014da04a1a99debd0ff663ceece68a547df5982dc2d72 " ,
" 020000000001012c32e55722e4b96324d8e5b398d583a20780b25202816adc32dc3157dee731c90200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202cfe6618926ca9f1574f8c4659b425e9790b4677ba2248d77901290806130ffe02204ab37bb0287abcdb8b750b018d41a09effe37cb65ff801fa70d3f1a41659984183473044022030b318139715e3b34f19be852cc01c1c0e1599e8b926a73df2bfb70dd186ddee022062a2b7398aed9f563b4014da04a1a99debd0ff663ceece68a547df5982dc2d7201008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000 " } ,
{ 1 ,
" 30440220687af8544d335376620a6f4b5412bfd0da48de047c1785674f26e669d4a3ff82022058591c1e3a6c50017427d38a8f756eb685bdab88ec73838eed3530048861f9d5 " ,
" 30440220109f1a62b5a13d28d5b7634dd7693b1d5994eb404c4bb4a9a80aa540d3984d170220307251107ff8499a23e99abce7dda4f1c707c98abddb9405a83de0081cde8ace " ,
" 020000000001012c32e55722e4b96324d8e5b398d583a20780b25202816adc32dc3157dee731c90300000000010000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220687af8544d335376620a6f4b5412bfd0da48de047c1785674f26e669d4a3ff82022058591c1e3a6c50017427d38a8f756eb685bdab88ec73838eed3530048861f9d5834730440220109f1a62b5a13d28d5b7634dd7693b1d5994eb404c4bb4a9a80aa540d3984d170220307251107ff8499a23e99abce7dda4f1c707c98abddb9405a83de0081cde8ace012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with three outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 3703 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e " ,
" 304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 0 ,
" 3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a " ,
" 3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05 " ,
" 0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with three outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 3687 ;
test_commitment_with_anchors! ( " 3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377 " ,
" 3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 3045022100b287bb8e079a62dcb3aaa8b6c67c0f434a87ebf64ab0bcfb2fc14b55576b859f02206d37c2eb5fd04cfc9eb0534c76a28a98da251b84a931377cce307af39dfaed74 " ,
" 3045022100a497c64faea286ec4221f48628086dc6403fd7b60a23c4176e8ebbca15ae70dc0220754e20e968e96cf6421fd2a672c8c26d3bc6e19218cfc8fc2aa51fce026c14b1 " ,
" 02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a600200000000010000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b287bb8e079a62dcb3aaa8b6c67c0f434a87ebf64ab0bcfb2fc14b55576b859f02206d37c2eb5fd04cfc9eb0534c76a28a98da251b84a931377cce307af39dfaed7483483045022100a497c64faea286ec4221f48628086dc6403fd7b60a23c4176e8ebbca15ae70dc0220754e20e968e96cf6421fd2a672c8c26d3bc6e19218cfc8fc2aa51fce026c14b1012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with three outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 4914 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244 " ,
" 3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
{ 0 ,
" 3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374 " ,
" 30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10 " ,
" 02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000 " }
} ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with three outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 4893 ;
test_commitment_with_anchors! ( " 3045022100a8771147109e4d3f44a5976c3c3de98732bbb77308d21444dbe0d76faf06480e02200b4e916e850c3d1f918de87bbbbb07843ffea1d4658dfe060b6f9ccd96d34be8 " ,
" 30440220086288faceab47461eb2d808e9e9b0cb3ffc24a03c2f18db7198247d38f10e58022031d1c2782a58c8c6ce187d0019eb47a83babdf3040e2caff299ab48f7e12b1fa " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a87856a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220086288faceab47461eb2d808e9e9b0cb3ffc24a03c2f18db7198247d38f10e58022031d1c2782a58c8c6ce187d0019eb47a83babdf3040e2caff299ab48f7e12b1fa01483045022100a8771147109e4d3f44a5976c3c3de98732bbb77308d21444dbe0d76faf06480e02200b4e916e850c3d1f918de87bbbbb07843ffea1d4658dfe060b6f9ccd96d34be801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 30450221008db80f8531104820b3e894492b4463f074f965b542e1b5c153ddfb108a5ea642022030b203d857a2b3581c2087a7bf17c95d04fadc1c6cdae88c620477f2dccb1ee4 " ,
" 3045022100e5fbae857c47dbfc050a05924bd449fc9804798bd6442002c578437dc34450810220296589bc387645512345299e307116aaac4ce9fc752abcd1936b802d03526312 " ,
" 02000000000101d515a15e9175fd315bb8d4e768f28684801a9e5a9acdfeba34f7b3b3b3a9ba1d0200000000010000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008db80f8531104820b3e894492b4463f074f965b542e1b5c153ddfb108a5ea642022030b203d857a2b3581c2087a7bf17c95d04fadc1c6cdae88c620477f2dccb1ee483483045022100e5fbae857c47dbfc050a05924bd449fc9804798bd6442002c578437dc34450810220296589bc387645512345299e307116aaac4ce9fc752abcd1936b802d03526312012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000 " }
} ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with two outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 4915 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720 " ,
" 30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with two outputs untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 4894 ;
test_commitment_with_anchors! ( " 3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95 " ,
" 30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with two outputs untrimmed (maximum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 9651180 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3 " ,
" 3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2017-12-25 01:05:27 -05:00
2020-04-18 00:10:24 -04:00
// commitment tx with one output untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 9651181 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2 " ,
" 304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2017-12-25 01:05:27 -05:00
2022-01-04 14:53:44 -08:00
// anchors: commitment tx with one output untrimmed (minimum feerate)
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 6216010 ;
test_commitment_with_anchors! ( " 30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf " ,
" 30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2020-04-18 00:10:24 -04:00
// commitment tx with fee greater than funder amount
chan . value_to_self_msat = 6993000000 ; // 7000000000 - 7000000
chan . feerate_per_kw = 9651936 ;
2017-12-25 01:05:27 -05:00
2020-05-02 22:00:08 -04:00
test_commitment! ( " 304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2 " ,
" 304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , { } ) ;
2021-01-08 09:49:48 -08:00
// commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
chan . value_to_self_msat = 7_000_000_000 - 2_000_000 ;
chan . feerate_per_kw = 253 ;
chan . pending_inbound_htlcs . clear ( ) ;
chan . pending_inbound_htlcs . push ( {
let mut out = InboundHTLCOutput {
htlc_id : 1 ,
amount_msat : 2000000 ,
cltv_expiry : 501 ,
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
state : InboundHTLCState ::Committed ,
} ;
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0101010101010101010101010101010101010101010101010101010101010101 " ) . unwrap ( ) ) . into_inner ( ) ;
out
} ) ;
chan . pending_outbound_htlcs . clear ( ) ;
chan . pending_outbound_htlcs . push ( {
let mut out = OutboundHTLCOutput {
htlc_id : 6 ,
amount_msat : 5000000 ,
cltv_expiry : 506 ,
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
state : OutboundHTLCState ::Committed ,
source : HTLCSource ::dummy ( ) ,
} ;
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0505050505050505050505050505050505050505050505050505050505050505 " ) . unwrap ( ) ) . into_inner ( ) ;
out
} ) ;
chan . pending_outbound_htlcs . push ( {
let mut out = OutboundHTLCOutput {
htlc_id : 5 ,
amount_msat : 5000000 ,
cltv_expiry : 505 ,
payment_hash : PaymentHash ( [ 0 ; 32 ] ) ,
state : OutboundHTLCState ::Committed ,
source : HTLCSource ::dummy ( ) ,
} ;
out . payment_hash . 0 = Sha256 ::hash ( & hex ::decode ( " 0505050505050505050505050505050505050505050505050505050505050505 " ) . unwrap ( ) ) . into_inner ( ) ;
out
} ) ;
test_commitment! ( " 30440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df9 " ,
" 3045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a79f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f014730440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c6 " ,
" 304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c " ,
" 0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc34000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c60148304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000 " } ,
{ 1 ,
" 304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5 " ,
" 30450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d39 " ,
" 0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3401000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5014830450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d3901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000 " } ,
{ 2 ,
" 30440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f " ,
" 30440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e511 " ,
" 0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3402000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f014730440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e51101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000 " }
} ) ;
2022-01-04 14:53:44 -08:00
test_commitment_with_anchors! ( " 3045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a68948 " ,
" 3045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e45 " ,
" 02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aae9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e4501483045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a6894801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220 " , {
{ 0 ,
" 304402202060a5acb12105e92f27d7b86e6caf1e003d9d82068338e5a8a9a0d14cba11260220030ca4dba8fad24a2e395906220c991eccd5369bc4b0f216d217b5f86d1fc61d " ,
" 3044022044f5425fe630fa614f349f55642e4a0b76e2583054b21543821660d9e8f3735702207f70424835b541874ca8bf0443cca4028afa2f6c03a17b0688df85d5c44eeefc " ,
" 02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe29020000000001000000011e070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202060a5acb12105e92f27d7b86e6caf1e003d9d82068338e5a8a9a0d14cba11260220030ca4dba8fad24a2e395906220c991eccd5369bc4b0f216d217b5f86d1fc61d83473044022044f5425fe630fa614f349f55642e4a0b76e2583054b21543821660d9e8f3735702207f70424835b541874ca8bf0443cca4028afa2f6c03a17b0688df85d5c44eeefc012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000 " } ,
{ 1 ,
" 304402206fde7eb6d7a47fdc63705d3db2169054e229f10342dea66f150b163381f48a0802201be28509c2de9be4b7ab72c569c6fd51c0ce0904fea459142f31d442cd043eb8 " ,
" 3045022100ad0236a78dbd029d3a8f583f7f82ee62892273d45303d00ef5a03fecf8903a36022004b2db33f8ff2f4a08ca6127c9cbfd9144c691a2feb9287e36ae6bc7c83c5a5f " ,
" 02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2903000000000100000001e0120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fde7eb6d7a47fdc63705d3db2169054e229f10342dea66f150b163381f48a0802201be28509c2de9be4b7ab72c569c6fd51c0ce0904fea459142f31d442cd043eb883483045022100ad0236a78dbd029d3a8f583f7f82ee62892273d45303d00ef5a03fecf8903a36022004b2db33f8ff2f4a08ca6127c9cbfd9144c691a2feb9287e36ae6bc7c83c5a5f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000 " } ,
{ 2 ,
" 304402205eebc78d8ae6a36c27ef80172359eb757fb18e99fa75b28c37ffe3444b967bc7022060a01c33398d4d8244c42c762fb699e9f61c1f034ff976df2c94350c5a6032a7 " ,
" 3045022100ad3fd523594e1b876316401774a30ee6c48bb7fa0efd768bf9a2d022201311ff02207bed627ed8e01041137f03dbaf03c836970be27a4d50f69d90cf1282ff2815e3 " ,
" 02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2904000000000100000001e0120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205eebc78d8ae6a36c27ef80172359eb757fb18e99fa75b28c37ffe3444b967bc7022060a01c33398d4d8244c42c762fb699e9f61c1f034ff976df2c94350c5a6032a783483045022100ad3fd523594e1b876316401774a30ee6c48bb7fa0efd768bf9a2d022201311ff02207bed627ed8e01041137f03dbaf03c836970be27a4d50f69d90cf1282ff2815e301008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000 " }
} ) ;
2017-12-25 01:05:27 -05:00
}
#[ test ]
fn test_per_commitment_secret_gen ( ) {
// Test vectors from BOLT 3 Appendix D:
let mut seed = [ 0 ; 32 ] ;
2018-07-27 17:06:14 -07:00
seed [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 0000000000000000000000000000000000000000000000000000000000000000 " ) . unwrap ( ) ) ;
2019-11-26 16:46:33 -05:00
assert_eq! ( chan_utils ::build_commitment_secret ( & seed , 281474976710655 ) ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2018-07-27 17:06:14 -07:00
seed [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF " ) . unwrap ( ) ) ;
2019-11-26 16:46:33 -05:00
assert_eq! ( chan_utils ::build_commitment_secret ( & seed , 281474976710655 ) ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2019-11-26 16:46:33 -05:00
assert_eq! ( chan_utils ::build_commitment_secret ( & seed , 0xaaaaaaaaaaa ) ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2019-11-26 16:46:33 -05:00
assert_eq! ( chan_utils ::build_commitment_secret ( & seed , 0x555555555555 ) ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2018-07-27 17:06:14 -07:00
seed [ 0 .. 32 ] . clone_from_slice ( & hex ::decode ( " 0101010101010101010101010101010101010101010101010101010101010101 " ) . unwrap ( ) ) ;
2019-11-26 16:46:33 -05:00
assert_eq! ( chan_utils ::build_commitment_secret ( & seed , 1 ) ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
}
#[ test ]
fn test_key_derivation ( ) {
// Test vectors from BOLT 3 Appendix E:
let secp_ctx = Secp256k1 ::new ( ) ;
2019-01-16 15:45:05 -05:00
let base_secret = SecretKey ::from_slice ( & hex ::decode ( " 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
let per_commitment_secret = SecretKey ::from_slice ( & hex ::decode ( " 1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
2018-08-20 17:13:07 -04:00
let base_point = PublicKey ::from_secret_key ( & secp_ctx , & base_secret ) ;
2018-07-27 17:06:14 -07:00
assert_eq! ( base_point . serialize ( ) [ .. ] , hex ::decode ( " 036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
2018-08-20 17:13:07 -04:00
let per_commitment_point = PublicKey ::from_secret_key ( & secp_ctx , & per_commitment_secret ) ;
2018-07-27 17:06:14 -07:00
assert_eq! ( per_commitment_point . serialize ( ) [ .. ] , hex ::decode ( " 025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
assert_eq! ( chan_utils ::derive_public_key ( & secp_ctx , & per_commitment_point , & base_point ) . unwrap ( ) . serialize ( ) [ .. ] ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
assert_eq! ( chan_utils ::derive_private_key ( & secp_ctx , & per_commitment_point , & base_secret ) . unwrap ( ) ,
2019-01-16 15:45:05 -05:00
SecretKey ::from_slice ( & hex ::decode ( " cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ) ;
2017-12-25 01:05:27 -05:00
assert_eq! ( chan_utils ::derive_public_revocation_key ( & secp_ctx , & per_commitment_point , & base_point ) . unwrap ( ) . serialize ( ) [ .. ] ,
2018-07-27 17:06:14 -07:00
hex ::decode ( " 02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0 " ) . unwrap ( ) [ .. ] ) ;
2017-12-25 01:05:27 -05:00
assert_eq! ( chan_utils ::derive_private_revocation_key ( & secp_ctx , & per_commitment_secret , & base_secret ) . unwrap ( ) ,
2019-01-16 15:45:05 -05:00
SecretKey ::from_slice ( & hex ::decode ( " d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110 " ) . unwrap ( ) [ .. ] ) . unwrap ( ) ) ;
2017-12-25 01:05:27 -05:00
}
2022-06-01 17:05:17 -07:00
#[ test ]
fn test_zero_conf_channel_type_support ( ) {
2022-06-29 15:13:40 +02:00
let feeest = LowerBoundedFeeEstimator ::new ( & TestFeeEstimator { fee_est : 15000 } ) ;
2022-06-01 17:05:17 -07:00
let secp_ctx = Secp256k1 ::new ( ) ;
let seed = [ 42 ; 32 ] ;
let network = Network ::Testnet ;
let keys_provider = test_utils ::TestKeysInterface ::new ( & seed , network ) ;
let logger = test_utils ::TestLogger ::new ( ) ;
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 42 ; 32 ] ) . unwrap ( ) ) ;
let config = UserConfig ::default ( ) ;
2022-06-29 15:13:40 +02:00
let node_a_chan = Channel ::< EnforcingSigner > ::new_outbound ( & feeest , & & keys_provider ,
2022-06-01 17:05:17 -07:00
node_b_node_id , & InitFeatures ::known ( ) , 10000000 , 100000 , 42 , & config , 0 , 42 ) . unwrap ( ) ;
let mut channel_type_features = ChannelTypeFeatures ::only_static_remote_key ( ) ;
channel_type_features . set_zero_conf_required ( ) ;
let mut open_channel_msg = node_a_chan . get_open_channel ( genesis_block ( network ) . header . block_hash ( ) ) ;
open_channel_msg . channel_type = Some ( channel_type_features ) ;
let node_b_node_id = PublicKey ::from_secret_key ( & secp_ctx , & SecretKey ::from_slice ( & [ 7 ; 32 ] ) . unwrap ( ) ) ;
2022-06-29 15:13:40 +02:00
let res = Channel ::< EnforcingSigner > ::new_from_req ( & feeest , & & keys_provider ,
2022-06-01 17:05:17 -07:00
node_b_node_id , & InitFeatures ::known ( ) , & open_channel_msg , 7 , & config , 0 , & & logger , 42 ) ;
assert! ( res . is_ok ( ) ) ;
}
2017-12-25 01:05:27 -05:00
}